Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.8/0101-3.8.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2106 - (hide annotations) (download)
Mon Mar 11 10:13:46 2013 UTC (11 years, 2 months ago) by niro
File size: 100781 byte(s)
-patches up to linux-3.8.2
1 niro 2106 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2     index 6c72381..986614d 100644
3     --- a/Documentation/kernel-parameters.txt
4     +++ b/Documentation/kernel-parameters.txt
5     @@ -564,6 +564,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6     UART at the specified I/O port or MMIO address,
7     switching to the matching ttyS device later. The
8     options are the same as for ttyS, above.
9     + hvc<n> Use the hypervisor console device <n>. This is for
10     + both Xen and PowerPC hypervisors.
11    
12     If the device connected to the port is not a TTY but a braille
13     device, prepend "brl," before the device type, for instance
14     @@ -754,6 +756,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
15    
16     earlyprintk= [X86,SH,BLACKFIN]
17     earlyprintk=vga
18     + earlyprintk=xen
19     earlyprintk=serial[,ttySn[,baudrate]]
20     earlyprintk=ttySn[,baudrate]
21     earlyprintk=dbgp[debugController#]
22     @@ -771,6 +774,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
23     The VGA output is eventually overwritten by the real
24     console.
25    
26     + The xen output can only be used by Xen PV guests.
27     +
28     ekgdboc= [X86,KGDB] Allow early kernel console debugging
29     ekgdboc=kbd
30    
31     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
32     index f8fa411..c205035 100644
33     --- a/arch/x86/boot/compressed/eboot.c
34     +++ b/arch/x86/boot/compressed/eboot.c
35     @@ -19,23 +19,28 @@
36    
37     static efi_system_table_t *sys_table;
38    
39     +static void efi_char16_printk(efi_char16_t *str)
40     +{
41     + struct efi_simple_text_output_protocol *out;
42     +
43     + out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
44     + efi_call_phys2(out->output_string, out, str);
45     +}
46     +
47     static void efi_printk(char *str)
48     {
49     char *s8;
50    
51     for (s8 = str; *s8; s8++) {
52     - struct efi_simple_text_output_protocol *out;
53     efi_char16_t ch[2] = { 0 };
54    
55     ch[0] = *s8;
56     - out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
57     -
58     if (*s8 == '\n') {
59     efi_char16_t nl[2] = { '\r', 0 };
60     - efi_call_phys2(out->output_string, out, nl);
61     + efi_char16_printk(nl);
62     }
63    
64     - efi_call_phys2(out->output_string, out, ch);
65     + efi_char16_printk(ch);
66     }
67     }
68    
69     @@ -709,7 +714,12 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
70     if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
71     break;
72    
73     - *p++ = *str++;
74     + if (*str == '/') {
75     + *p++ = '\\';
76     + *str++;
77     + } else {
78     + *p++ = *str++;
79     + }
80     }
81    
82     *p = '\0';
83     @@ -737,7 +747,9 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
84     status = efi_call_phys5(fh->open, fh, &h, filename_16,
85     EFI_FILE_MODE_READ, (u64)0);
86     if (status != EFI_SUCCESS) {
87     - efi_printk("Failed to open initrd file\n");
88     + efi_printk("Failed to open initrd file: ");
89     + efi_char16_printk(filename_16);
90     + efi_printk("\n");
91     goto close_handles;
92     }
93    
94     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
95     index b994cc8..cbf5121 100644
96     --- a/arch/x86/kernel/apic/apic.c
97     +++ b/arch/x86/kernel/apic/apic.c
98     @@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
99     {
100     if (config_enabled(CONFIG_X86_32) && !arg)
101     force_enable_local_apic = 1;
102     - else if (!strncmp(arg, "notscdeadline", 13))
103     + else if (arg && !strncmp(arg, "notscdeadline", 13))
104     setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
105     return 0;
106     }
107     diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
108     index 48d9d4e..992f442 100644
109     --- a/arch/x86/kernel/head.c
110     +++ b/arch/x86/kernel/head.c
111     @@ -5,8 +5,6 @@
112     #include <asm/setup.h>
113     #include <asm/bios_ebda.h>
114    
115     -#define BIOS_LOWMEM_KILOBYTES 0x413
116     -
117     /*
118     * The BIOS places the EBDA/XBDA at the top of conventional
119     * memory, and usually decreases the reported amount of
120     @@ -16,17 +14,30 @@
121     * chipset: reserve a page before VGA to prevent PCI prefetch
122     * into it (errata #56). Usually the page is reserved anyways,
123     * unless you have no PS/2 mouse plugged in.
124     + *
125     + * This functions is deliberately very conservative. Losing
126     + * memory in the bottom megabyte is rarely a problem, as long
127     + * as we have enough memory to install the trampoline. Using
128     + * memory that is in use by the BIOS or by some DMA device
129     + * the BIOS didn't shut down *is* a big problem.
130     */
131     +
132     +#define BIOS_LOWMEM_KILOBYTES 0x413
133     +#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
134     +#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
135     +
136     void __init reserve_ebda_region(void)
137     {
138     unsigned int lowmem, ebda_addr;
139    
140     - /* To determine the position of the EBDA and the */
141     - /* end of conventional memory, we need to look at */
142     - /* the BIOS data area. In a paravirtual environment */
143     - /* that area is absent. We'll just have to assume */
144     - /* that the paravirt case can handle memory setup */
145     - /* correctly, without our help. */
146     + /*
147     + * To determine the position of the EBDA and the
148     + * end of conventional memory, we need to look at
149     + * the BIOS data area. In a paravirtual environment
150     + * that area is absent. We'll just have to assume
151     + * that the paravirt case can handle memory setup
152     + * correctly, without our help.
153     + */
154     if (paravirt_enabled())
155     return;
156    
157     @@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
158     /* start of EBDA area */
159     ebda_addr = get_bios_ebda();
160    
161     - /* Fixup: bios puts an EBDA in the top 64K segment */
162     - /* of conventional memory, but does not adjust lowmem. */
163     - if ((lowmem - ebda_addr) <= 0x10000)
164     - lowmem = ebda_addr;
165     + /*
166     + * Note: some old Dells seem to need 4k EBDA without
167     + * reporting so, so just consider the memory above 0x9f000
168     + * to be off limits (bugzilla 2990).
169     + */
170     +
171     + /* If the EBDA address is below 128K, assume it is bogus */
172     + if (ebda_addr < INSANE_CUTOFF)
173     + ebda_addr = LOWMEM_CAP;
174    
175     - /* Fixup: bios does not report an EBDA at all. */
176     - /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
177     - if ((ebda_addr == 0) && (lowmem >= 0x9f000))
178     - lowmem = 0x9f000;
179     + /* If lowmem is less than 128K, assume it is bogus */
180     + if (lowmem < INSANE_CUTOFF)
181     + lowmem = LOWMEM_CAP;
182    
183     - /* Paranoia: should never happen, but... */
184     - if ((lowmem == 0) || (lowmem >= 0x100000))
185     - lowmem = 0x9f000;
186     + /* Use the lower of the lowmem and EBDA markers as the cutoff */
187     + lowmem = min(lowmem, ebda_addr);
188     + lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
189    
190     /* reserve all memory between lowmem and the 1MB mark */
191     memblock_reserve(lowmem, 0x100000 - lowmem);
192     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
193     index 928bf83..e2cd38f 100644
194     --- a/arch/x86/platform/efi/efi.c
195     +++ b/arch/x86/platform/efi/efi.c
196     @@ -85,9 +85,10 @@ int efi_enabled(int facility)
197     }
198     EXPORT_SYMBOL(efi_enabled);
199    
200     +static bool disable_runtime = false;
201     static int __init setup_noefi(char *arg)
202     {
203     - clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
204     + disable_runtime = true;
205     return 0;
206     }
207     early_param("noefi", setup_noefi);
208     @@ -734,7 +735,7 @@ void __init efi_init(void)
209     if (!efi_is_native())
210     pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
211     else {
212     - if (efi_runtime_init())
213     + if (disable_runtime || efi_runtime_init())
214     return;
215     set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
216     }
217     diff --git a/block/genhd.c b/block/genhd.c
218     index 3993ebf..7dcfdd8 100644
219     --- a/block/genhd.c
220     +++ b/block/genhd.c
221     @@ -25,7 +25,7 @@ static DEFINE_MUTEX(block_class_lock);
222     struct kobject *block_depr;
223    
224     /* for extended dynamic devt allocation, currently only one major is used */
225     -#define MAX_EXT_DEVT (1 << MINORBITS)
226     +#define NR_EXT_DEVT (1 << MINORBITS)
227    
228     /* For extended devt allocation. ext_devt_mutex prevents look up
229     * results from going away underneath its user.
230     @@ -422,17 +422,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
231     do {
232     if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
233     return -ENOMEM;
234     + mutex_lock(&ext_devt_mutex);
235     rc = idr_get_new(&ext_devt_idr, part, &idx);
236     + if (!rc && idx >= NR_EXT_DEVT) {
237     + idr_remove(&ext_devt_idr, idx);
238     + rc = -EBUSY;
239     + }
240     + mutex_unlock(&ext_devt_mutex);
241     } while (rc == -EAGAIN);
242    
243     if (rc)
244     return rc;
245    
246     - if (idx > MAX_EXT_DEVT) {
247     - idr_remove(&ext_devt_idr, idx);
248     - return -EBUSY;
249     - }
250     -
251     *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
252     return 0;
253     }
254     @@ -646,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
255     disk_part_iter_exit(&piter);
256    
257     invalidate_partition(disk, 0);
258     - blk_free_devt(disk_to_dev(disk)->devt);
259     set_capacity(disk, 0);
260     disk->flags &= ~GENHD_FL_UP;
261    
262     @@ -664,6 +664,7 @@ void del_gendisk(struct gendisk *disk)
263     if (!sysfs_deprecated)
264     sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
265     device_del(disk_to_dev(disk));
266     + blk_free_devt(disk_to_dev(disk)->devt);
267     }
268     EXPORT_SYMBOL(del_gendisk);
269    
270     diff --git a/block/partition-generic.c b/block/partition-generic.c
271     index f1d1451..1cb4dec 100644
272     --- a/block/partition-generic.c
273     +++ b/block/partition-generic.c
274     @@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
275     if (!part)
276     return;
277    
278     - blk_free_devt(part_devt(part));
279     rcu_assign_pointer(ptbl->part[partno], NULL);
280     rcu_assign_pointer(ptbl->last_lookup, NULL);
281     kobject_put(part->holder_dir);
282     device_del(part_to_dev(part));
283     + blk_free_devt(part_devt(part));
284    
285     hd_struct_put(part);
286     }
287     diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
288     index 38c5078..f5ae996 100644
289     --- a/drivers/acpi/Kconfig
290     +++ b/drivers/acpi/Kconfig
291     @@ -268,7 +268,8 @@ config ACPI_CUSTOM_DSDT
292     default ACPI_CUSTOM_DSDT_FILE != ""
293    
294     config ACPI_INITRD_TABLE_OVERRIDE
295     - bool "ACPI tables can be passed via uncompressed cpio in initrd"
296     + bool "ACPI tables override via initrd"
297     + depends on BLK_DEV_INITRD && X86
298     default n
299     help
300     This option provides functionality to override arbitrary ACPI tables
301     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
302     index 2fcc67d..df85051 100644
303     --- a/drivers/acpi/sleep.c
304     +++ b/drivers/acpi/sleep.c
305     @@ -177,6 +177,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
306     },
307     {
308     .callback = init_nvs_nosave,
309     + .ident = "Sony Vaio VGN-FW41E_H",
310     + .matches = {
311     + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
312     + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
313     + },
314     + },
315     + {
316     + .callback = init_nvs_nosave,
317     .ident = "Sony Vaio VGN-FW21E",
318     .matches = {
319     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
320     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
321     index 4979127..72e3e12 100644
322     --- a/drivers/ata/ahci.c
323     +++ b/drivers/ata/ahci.c
324     @@ -265,6 +265,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
325     { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
326     { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
327     { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
328     + { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
329     + { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
330     + { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
331     + { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
332     + { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
333     + { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
334     + { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
335     + { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
336     + { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
337     + { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
338     + { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
339     + { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
340     + { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
341     + { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
342     + { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
343     + { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
344     + { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
345     + { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
346     + { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
347     + { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
348     + { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
349     + { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
350     + { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
351     + { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
352    
353     /* JMicron 360/1/3/5/6, match class to avoid IDE function */
354     { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
355     diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
356     index 174eca6..d2ba439 100644
357     --- a/drivers/ata/ata_piix.c
358     +++ b/drivers/ata/ata_piix.c
359     @@ -317,6 +317,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
360     { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
361     /* SATA Controller IDE (DH89xxCC) */
362     { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
363     + /* SATA Controller IDE (Avoton) */
364     + { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
365     + /* SATA Controller IDE (Avoton) */
366     + { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
367     + /* SATA Controller IDE (Avoton) */
368     + { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
369     + /* SATA Controller IDE (Avoton) */
370     + { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
371     + /* SATA Controller IDE (Wellsburg) */
372     + { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
373     + /* SATA Controller IDE (Wellsburg) */
374     + { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
375     + /* SATA Controller IDE (Wellsburg) */
376     + { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
377     + /* SATA Controller IDE (Wellsburg) */
378     + { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
379     +
380     { } /* terminate list */
381     };
382    
383     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
384     index 043ddcc..eb591fb 100644
385     --- a/drivers/block/nbd.c
386     +++ b/drivers/block/nbd.c
387     @@ -595,12 +595,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
388     struct request sreq;
389    
390     dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
391     + if (!nbd->sock)
392     + return -EINVAL;
393    
394     + mutex_unlock(&nbd->tx_lock);
395     + fsync_bdev(bdev);
396     + mutex_lock(&nbd->tx_lock);
397     blk_rq_init(NULL, &sreq);
398     sreq.cmd_type = REQ_TYPE_SPECIAL;
399     nbd_cmd(&sreq) = NBD_CMD_DISC;
400     +
401     + /* Check again after getting mutex back. */
402     if (!nbd->sock)
403     return -EINVAL;
404     +
405     nbd_send_req(nbd, &sreq);
406     return 0;
407     }
408     @@ -614,6 +622,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
409     nbd_clear_que(nbd);
410     BUG_ON(!list_empty(&nbd->queue_head));
411     BUG_ON(!list_empty(&nbd->waiting_queue));
412     + kill_bdev(bdev);
413     if (file)
414     fput(file);
415     return 0;
416     @@ -702,6 +711,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
417     nbd->file = NULL;
418     nbd_clear_que(nbd);
419     dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
420     + kill_bdev(bdev);
421     queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
422     if (file)
423     fput(file);
424     diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
425     index 5ac841f..de1f319 100644
426     --- a/drivers/block/xen-blkback/blkback.c
427     +++ b/drivers/block/xen-blkback/blkback.c
428     @@ -46,6 +46,7 @@
429     #include <xen/xen.h>
430     #include <asm/xen/hypervisor.h>
431     #include <asm/xen/hypercall.h>
432     +#include <xen/balloon.h>
433     #include "common.h"
434    
435     /*
436     @@ -239,6 +240,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
437     ret = gnttab_unmap_refs(unmap, NULL, pages,
438     segs_to_unmap);
439     BUG_ON(ret);
440     + free_xenballooned_pages(segs_to_unmap, pages);
441     segs_to_unmap = 0;
442     }
443    
444     @@ -527,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
445     GFP_KERNEL);
446     if (!persistent_gnt)
447     return -ENOMEM;
448     - persistent_gnt->page = alloc_page(GFP_KERNEL);
449     - if (!persistent_gnt->page) {
450     + if (alloc_xenballooned_pages(1, &persistent_gnt->page,
451     + false)) {
452     kfree(persistent_gnt);
453     return -ENOMEM;
454     }
455     @@ -879,7 +881,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
456     goto fail_response;
457     }
458    
459     - preq.dev = req->u.rw.handle;
460     preq.sector_number = req->u.rw.sector_number;
461     preq.nr_sects = 0;
462    
463     diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
464     index 6398072..5e237f6 100644
465     --- a/drivers/block/xen-blkback/xenbus.c
466     +++ b/drivers/block/xen-blkback/xenbus.c
467     @@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
468     be->blkif = NULL;
469     }
470    
471     + kfree(be->mode);
472     kfree(be);
473     dev_set_drvdata(&dev->dev, NULL);
474     return 0;
475     @@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
476     = container_of(watch, struct backend_info, backend_watch);
477     struct xenbus_device *dev = be->dev;
478     int cdrom = 0;
479     + unsigned long handle;
480     char *device_type;
481    
482     DPRINTK("");
483     @@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
484     return;
485     }
486    
487     - if ((be->major || be->minor) &&
488     - ((be->major != major) || (be->minor != minor))) {
489     - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
490     - be->major, be->minor, major, minor);
491     + if (be->major | be->minor) {
492     + if (be->major != major || be->minor != minor)
493     + pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
494     + be->major, be->minor, major, minor);
495     return;
496     }
497    
498     @@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
499     kfree(device_type);
500     }
501    
502     - if (be->major == 0 && be->minor == 0) {
503     - /* Front end dir is a number, which is used as the handle. */
504     -
505     - char *p = strrchr(dev->otherend, '/') + 1;
506     - long handle;
507     - err = strict_strtoul(p, 0, &handle);
508     - if (err)
509     - return;
510     + /* Front end dir is a number, which is used as the handle. */
511     + err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
512     + if (err)
513     + return;
514    
515     - be->major = major;
516     - be->minor = minor;
517     + be->major = major;
518     + be->minor = minor;
519    
520     - err = xen_vbd_create(be->blkif, handle, major, minor,
521     - (NULL == strchr(be->mode, 'w')), cdrom);
522     - if (err) {
523     - be->major = 0;
524     - be->minor = 0;
525     - xenbus_dev_fatal(dev, err, "creating vbd structure");
526     - return;
527     - }
528     + err = xen_vbd_create(be->blkif, handle, major, minor,
529     + !strchr(be->mode, 'w'), cdrom);
530    
531     + if (err)
532     + xenbus_dev_fatal(dev, err, "creating vbd structure");
533     + else {
534     err = xenvbd_sysfs_addif(dev);
535     if (err) {
536     xen_vbd_free(&be->blkif->vbd);
537     - be->major = 0;
538     - be->minor = 0;
539     xenbus_dev_fatal(dev, err, "creating sysfs entries");
540     - return;
541     }
542     + }
543    
544     + if (err) {
545     + kfree(be->mode);
546     + be->mode = NULL;
547     + be->major = 0;
548     + be->minor = 0;
549     + } else {
550     /* We're potentially connected now */
551     xen_update_blkif_status(be->blkif);
552     }
553     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
554     index 11043c1..c3dae2e 100644
555     --- a/drivers/block/xen-blkfront.c
556     +++ b/drivers/block/xen-blkfront.c
557     @@ -791,7 +791,7 @@ static void blkif_restart_queue(struct work_struct *work)
558     static void blkif_free(struct blkfront_info *info, int suspend)
559     {
560     struct llist_node *all_gnts;
561     - struct grant *persistent_gnt;
562     + struct grant *persistent_gnt, *tmp;
563     struct llist_node *n;
564    
565     /* Prevent new requests being issued until we fix things up. */
566     @@ -805,10 +805,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
567     /* Remove all persistent grants */
568     if (info->persistent_gnts_c) {
569     all_gnts = llist_del_all(&info->persistent_gnts);
570     - llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
571     + persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
572     + while (persistent_gnt) {
573     gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
574     __free_page(pfn_to_page(persistent_gnt->pfn));
575     - kfree(persistent_gnt);
576     + tmp = persistent_gnt;
577     + n = persistent_gnt->node.next;
578     + if (n)
579     + persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
580     + else
581     + persistent_gnt = NULL;
582     + kfree(tmp);
583     }
584     info->persistent_gnts_c = 0;
585     }
586     diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
587     index 3873d53..af3e8aa 100644
588     --- a/drivers/firewire/core-device.c
589     +++ b/drivers/firewire/core-device.c
590     @@ -1020,6 +1020,10 @@ static void fw_device_init(struct work_struct *work)
591     ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
592     idr_get_new(&fw_device_idr, device, &minor) :
593     -ENOMEM;
594     + if (minor >= 1 << MINORBITS) {
595     + idr_remove(&fw_device_idr, minor);
596     + minor = -ENOSPC;
597     + }
598     up_write(&fw_device_rwsem);
599    
600     if (ret < 0)
601     diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
602     index f5596db..bcb201c 100644
603     --- a/drivers/firmware/efivars.c
604     +++ b/drivers/firmware/efivars.c
605     @@ -79,6 +79,7 @@
606     #include <linux/device.h>
607     #include <linux/slab.h>
608     #include <linux/pstore.h>
609     +#include <linux/ctype.h>
610    
611     #include <linux/fs.h>
612     #include <linux/ramfs.h>
613     @@ -900,6 +901,48 @@ static struct inode *efivarfs_get_inode(struct super_block *sb,
614     return inode;
615     }
616    
617     +/*
618     + * Return true if 'str' is a valid efivarfs filename of the form,
619     + *
620     + * VariableName-12345678-1234-1234-1234-1234567891bc
621     + */
622     +static bool efivarfs_valid_name(const char *str, int len)
623     +{
624     + static const char dashes[GUID_LEN] = {
625     + [8] = 1, [13] = 1, [18] = 1, [23] = 1
626     + };
627     + const char *s = str + len - GUID_LEN;
628     + int i;
629     +
630     + /*
631     + * We need a GUID, plus at least one letter for the variable name,
632     + * plus the '-' separator
633     + */
634     + if (len < GUID_LEN + 2)
635     + return false;
636     +
637     + /* GUID should be right after the first '-' */
638     + if (s - 1 != strchr(str, '-'))
639     + return false;
640     +
641     + /*
642     + * Validate that 's' is of the correct format, e.g.
643     + *
644     + * 12345678-1234-1234-1234-123456789abc
645     + */
646     + for (i = 0; i < GUID_LEN; i++) {
647     + if (dashes[i]) {
648     + if (*s++ != '-')
649     + return false;
650     + } else {
651     + if (!isxdigit(*s++))
652     + return false;
653     + }
654     + }
655     +
656     + return true;
657     +}
658     +
659     static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
660     {
661     guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
662     @@ -928,11 +971,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
663     struct efivar_entry *var;
664     int namelen, i = 0, err = 0;
665    
666     - /*
667     - * We need a GUID, plus at least one letter for the variable name,
668     - * plus the '-' separator
669     - */
670     - if (dentry->d_name.len < GUID_LEN + 2)
671     + if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
672     return -EINVAL;
673    
674     inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
675     @@ -1004,6 +1043,84 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
676     return -EINVAL;
677     };
678    
679     +/*
680     + * Compare two efivarfs file names.
681     + *
682     + * An efivarfs filename is composed of two parts,
683     + *
684     + * 1. A case-sensitive variable name
685     + * 2. A case-insensitive GUID
686     + *
687     + * So we need to perform a case-sensitive match on part 1 and a
688     + * case-insensitive match on part 2.
689     + */
690     +static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
691     + const struct dentry *dentry, const struct inode *inode,
692     + unsigned int len, const char *str,
693     + const struct qstr *name)
694     +{
695     + int guid = len - GUID_LEN;
696     +
697     + if (name->len != len)
698     + return 1;
699     +
700     + /* Case-sensitive compare for the variable name */
701     + if (memcmp(str, name->name, guid))
702     + return 1;
703     +
704     + /* Case-insensitive compare for the GUID */
705     + return strncasecmp(name->name + guid, str + guid, GUID_LEN);
706     +}
707     +
708     +static int efivarfs_d_hash(const struct dentry *dentry,
709     + const struct inode *inode, struct qstr *qstr)
710     +{
711     + unsigned long hash = init_name_hash();
712     + const unsigned char *s = qstr->name;
713     + unsigned int len = qstr->len;
714     +
715     + if (!efivarfs_valid_name(s, len))
716     + return -EINVAL;
717     +
718     + while (len-- > GUID_LEN)
719     + hash = partial_name_hash(*s++, hash);
720     +
721     + /* GUID is case-insensitive. */
722     + while (len--)
723     + hash = partial_name_hash(tolower(*s++), hash);
724     +
725     + qstr->hash = end_name_hash(hash);
726     + return 0;
727     +}
728     +
729     +/*
730     + * Retaining negative dentries for an in-memory filesystem just wastes
731     + * memory and lookup time: arrange for them to be deleted immediately.
732     + */
733     +static int efivarfs_delete_dentry(const struct dentry *dentry)
734     +{
735     + return 1;
736     +}
737     +
738     +static struct dentry_operations efivarfs_d_ops = {
739     + .d_compare = efivarfs_d_compare,
740     + .d_hash = efivarfs_d_hash,
741     + .d_delete = efivarfs_delete_dentry,
742     +};
743     +
744     +static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
745     +{
746     + struct qstr q;
747     +
748     + q.name = name;
749     + q.len = strlen(name);
750     +
751     + if (efivarfs_d_hash(NULL, NULL, &q))
752     + return NULL;
753     +
754     + return d_alloc(parent, &q);
755     +}
756     +
757     static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
758     {
759     struct inode *inode = NULL;
760     @@ -1019,6 +1136,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
761     sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
762     sb->s_magic = EFIVARFS_MAGIC;
763     sb->s_op = &efivarfs_ops;
764     + sb->s_d_op = &efivarfs_d_ops;
765     sb->s_time_gran = 1;
766    
767     inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
768     @@ -1059,7 +1177,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
769     if (!inode)
770     goto fail_name;
771    
772     - dentry = d_alloc_name(root, name);
773     + dentry = efivarfs_alloc_dentry(root, name);
774     if (!dentry)
775     goto fail_inode;
776    
777     @@ -1109,8 +1227,20 @@ static struct file_system_type efivarfs_type = {
778     .kill_sb = efivarfs_kill_sb,
779     };
780    
781     +/*
782     + * Handle negative dentry.
783     + */
784     +static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
785     + unsigned int flags)
786     +{
787     + if (dentry->d_name.len > NAME_MAX)
788     + return ERR_PTR(-ENAMETOOLONG);
789     + d_add(dentry, NULL);
790     + return NULL;
791     +}
792     +
793     static const struct inode_operations efivarfs_dir_inode_operations = {
794     - .lookup = simple_lookup,
795     + .lookup = efivarfs_lookup,
796     .unlink = efivarfs_unlink,
797     .create = efivarfs_create,
798     };
799     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
800     index eb2ee11..ceb3040 100644
801     --- a/drivers/hid/hid-core.c
802     +++ b/drivers/hid/hid-core.c
803     @@ -1697,6 +1697,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
804     { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
805     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
806     { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
807     + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
808     { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
809     { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
810     { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
811     @@ -2070,6 +2071,7 @@ static const struct hid_device_id hid_ignore_list[] = {
812     { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
813     { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
814     { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
815     + { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
816     { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
817     { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
818     { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
819     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
820     index 34e2547..266e2ae 100644
821     --- a/drivers/hid/hid-ids.h
822     +++ b/drivers/hid/hid-ids.h
823     @@ -554,6 +554,9 @@
824     #define USB_VENDOR_ID_MADCATZ 0x0738
825     #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
826    
827     +#define USB_VENDOR_ID_MASTERKIT 0x16c0
828     +#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
829     +
830     #define USB_VENDOR_ID_MCC 0x09db
831     #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
832     #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
833     @@ -709,6 +712,7 @@
834    
835     #define USB_VENDOR_ID_SONY 0x054c
836     #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
837     +#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
838     #define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
839     #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
840     #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
841     diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
842     index 7f33ebf..126d6ae 100644
843     --- a/drivers/hid/hid-sony.c
844     +++ b/drivers/hid/hid-sony.c
845     @@ -43,9 +43,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
846     {
847     struct sony_sc *sc = hid_get_drvdata(hdev);
848    
849     - if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
850     - *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
851     - hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
852     + /*
853     + * Some Sony RF receivers wrongly declare the mouse pointer as a
854     + * a constant non-data variable.
855     + */
856     + if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
857     + /* usage page: generic desktop controls */
858     + /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
859     + /* usage: mouse */
860     + rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
861     + /* input (usage page for x,y axes): constant, variable, relative */
862     + rdesc[54] == 0x81 && rdesc[55] == 0x07) {
863     + hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
864     + /* input: data, variable, relative */
865     rdesc[55] = 0x06;
866     }
867    
868     @@ -217,6 +227,8 @@ static const struct hid_device_id sony_devices[] = {
869     .driver_data = SIXAXIS_CONTROLLER_BT },
870     { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
871     .driver_data = VAIO_RDESC_CONSTANT },
872     + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
873     + .driver_data = VAIO_RDESC_CONSTANT },
874     { }
875     };
876     MODULE_DEVICE_TABLE(hid, sony_devices);
877     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
878     index d5088ce..7ccf328 100644
879     --- a/drivers/infiniband/ulp/srp/ib_srp.c
880     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
881     @@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target)
882     struct Scsi_Host *shost = target->scsi_host;
883     int i, ret;
884    
885     - if (target->state != SRP_TARGET_LIVE)
886     - return -EAGAIN;
887     -
888     scsi_target_block(&shost->shost_gendev);
889    
890     srp_disconnect_target(target);
891     /*
892     - * Now get a new local CM ID so that we avoid confusing the
893     - * target in case things are really fouled up.
894     + * Now get a new local CM ID so that we avoid confusing the target in
895     + * case things are really fouled up. Doing so also ensures that all CM
896     + * callbacks will have finished before a new QP is allocated.
897     */
898     ret = srp_new_cm_id(target);
899     - if (ret)
900     - goto unblock;
901     -
902     - ret = srp_create_target_ib(target);
903     - if (ret)
904     - goto unblock;
905     + /*
906     + * Whether or not creating a new CM ID succeeded, create a new
907     + * QP. This guarantees that all completion callback function
908     + * invocations have finished before request resetting starts.
909     + */
910     + if (ret == 0)
911     + ret = srp_create_target_ib(target);
912     + else
913     + srp_create_target_ib(target);
914    
915     for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
916     struct srp_request *req = &target->req_ring[i];
917     @@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
918     for (i = 0; i < SRP_SQ_SIZE; ++i)
919     list_add(&target->tx_ring[i]->list, &target->free_tx);
920    
921     - ret = srp_connect_target(target);
922     + if (ret == 0)
923     + ret = srp_connect_target(target);
924    
925     -unblock:
926     scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
927     SDEV_TRANSPORT_OFFLINE);
928     + target->transport_offline = !!ret;
929    
930     if (ret)
931     goto err;
932     @@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
933     unsigned long flags;
934     int len;
935    
936     + if (unlikely(target->transport_offline)) {
937     + scmnd->result = DID_NO_CONNECT << 16;
938     + scmnd->scsi_done(scmnd);
939     + return 0;
940     + }
941     +
942     spin_lock_irqsave(&target->lock, flags);
943     iu = __srp_get_tx_iu(target, SRP_IU_CMD);
944     if (!iu)
945     @@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
946     struct srp_iu *iu;
947     struct srp_tsk_mgmt *tsk_mgmt;
948    
949     + if (!target->connected || target->qp_in_error)
950     + return -1;
951     +
952     init_completion(&target->tsk_mgmt_done);
953    
954     spin_lock_irq(&target->lock);
955     @@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
956    
957     shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
958    
959     - if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
960     + if (!req || !srp_claim_req(target, req, scmnd))
961     return FAILED;
962     srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
963     SRP_TSK_ABORT_TASK);
964     @@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
965    
966     shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
967    
968     - if (target->qp_in_error)
969     - return FAILED;
970     if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
971     SRP_TSK_LUN_RESET))
972     return FAILED;
973     @@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
974     spin_unlock(&host->target_lock);
975    
976     target->state = SRP_TARGET_LIVE;
977     - target->connected = false;
978    
979     scsi_scan_target(&target->scsi_host->shost_gendev,
980     0, target->scsi_id, SCAN_WILD_CARD, 0);
981     diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
982     index de2d0b3..66fbedd 100644
983     --- a/drivers/infiniband/ulp/srp/ib_srp.h
984     +++ b/drivers/infiniband/ulp/srp/ib_srp.h
985     @@ -140,6 +140,7 @@ struct srp_target_port {
986     unsigned int cmd_sg_cnt;
987     unsigned int indirect_size;
988     bool allow_ext_sg;
989     + bool transport_offline;
990    
991     /* Everything above this point is used in the hot path of
992     * command processing. Try to keep them packed into cachelines.
993     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
994     index faf10ba..b6ecddb 100644
995     --- a/drivers/iommu/amd_iommu_init.c
996     +++ b/drivers/iommu/amd_iommu_init.c
997     @@ -1876,11 +1876,6 @@ static int amd_iommu_init_dma(void)
998     struct amd_iommu *iommu;
999     int ret;
1000    
1001     - init_device_table_dma();
1002     -
1003     - for_each_iommu(iommu)
1004     - iommu_flush_all_caches(iommu);
1005     -
1006     if (iommu_pass_through)
1007     ret = amd_iommu_init_passthrough();
1008     else
1009     @@ -1889,6 +1884,11 @@ static int amd_iommu_init_dma(void)
1010     if (ret)
1011     return ret;
1012    
1013     + init_device_table_dma();
1014     +
1015     + for_each_iommu(iommu)
1016     + iommu_flush_all_caches(iommu);
1017     +
1018     amd_iommu_init_api();
1019    
1020     amd_iommu_init_notifier();
1021     diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
1022     index 8e971ff..b2c8c34 100644
1023     --- a/drivers/media/pci/cx18/cx18-alsa-main.c
1024     +++ b/drivers/media/pci/cx18/cx18-alsa-main.c
1025     @@ -197,7 +197,7 @@ err_exit:
1026     return ret;
1027     }
1028    
1029     -static int __init cx18_alsa_load(struct cx18 *cx)
1030     +static int cx18_alsa_load(struct cx18 *cx)
1031     {
1032     struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
1033     struct cx18_stream *s;
1034     diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h
1035     index d26e51f..e2b2c5b 100644
1036     --- a/drivers/media/pci/cx18/cx18-alsa-pcm.h
1037     +++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h
1038     @@ -20,7 +20,7 @@
1039     * 02111-1307 USA
1040     */
1041    
1042     -int __init snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
1043     +int snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
1044    
1045     /* Used by cx18-mailbox to announce the PCM data to the module */
1046     void cx18_alsa_announce_pcm_data(struct snd_cx18_card *card, u8 *pcm_data,
1047     diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
1048     index 4a221c6..e970cfa 100644
1049     --- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
1050     +++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
1051     @@ -205,7 +205,7 @@ err_exit:
1052     return ret;
1053     }
1054    
1055     -static int __init ivtv_alsa_load(struct ivtv *itv)
1056     +static int ivtv_alsa_load(struct ivtv *itv)
1057     {
1058     struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
1059     struct ivtv_stream *s;
1060     diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
1061     index 23dfe0d..186814e 100644
1062     --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
1063     +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
1064     @@ -20,4 +20,4 @@
1065     * 02111-1307 USA
1066     */
1067    
1068     -int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
1069     +int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
1070     diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
1071     index 35cc526..8e9a668 100644
1072     --- a/drivers/media/platform/omap/omap_vout.c
1073     +++ b/drivers/media/platform/omap/omap_vout.c
1074     @@ -205,19 +205,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
1075     struct vm_area_struct *vma;
1076     struct mm_struct *mm = current->mm;
1077    
1078     - vma = find_vma(mm, virtp);
1079     /* For kernel direct-mapped memory, take the easy way */
1080     - if (virtp >= PAGE_OFFSET) {
1081     - physp = virt_to_phys((void *) virtp);
1082     - } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
1083     + if (virtp >= PAGE_OFFSET)
1084     + return virt_to_phys((void *) virtp);
1085     +
1086     + down_read(&current->mm->mmap_sem);
1087     + vma = find_vma(mm, virtp);
1088     + if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
1089     /* this will catch, kernel-allocated, mmaped-to-usermode
1090     addresses */
1091     physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
1092     + up_read(&current->mm->mmap_sem);
1093     } else {
1094     /* otherwise, use get_user_pages() for general userland pages */
1095     int res, nr_pages = 1;
1096     struct page *pages;
1097     - down_read(&current->mm->mmap_sem);
1098    
1099     res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
1100     0, &pages, NULL);
1101     diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1102     index 601d1ac1..d593bc6 100644
1103     --- a/drivers/media/rc/rc-main.c
1104     +++ b/drivers/media/rc/rc-main.c
1105     @@ -789,8 +789,10 @@ static ssize_t show_protocols(struct device *device,
1106     } else if (dev->raw) {
1107     enabled = dev->raw->enabled_protocols;
1108     allowed = ir_raw_get_allowed_protocols();
1109     - } else
1110     + } else {
1111     + mutex_unlock(&dev->lock);
1112     return -ENODEV;
1113     + }
1114    
1115     IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
1116     (long long)allowed,
1117     diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
1118     index 513969f..98a7f5e 100644
1119     --- a/drivers/media/v4l2-core/v4l2-device.c
1120     +++ b/drivers/media/v4l2-core/v4l2-device.c
1121     @@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
1122     sd->v4l2_dev = v4l2_dev;
1123     if (sd->internal_ops && sd->internal_ops->registered) {
1124     err = sd->internal_ops->registered(sd);
1125     - if (err) {
1126     - module_put(sd->owner);
1127     - return err;
1128     - }
1129     + if (err)
1130     + goto error_module;
1131     }
1132    
1133     /* This just returns 0 if either of the two args is NULL */
1134     err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
1135     - if (err) {
1136     - if (sd->internal_ops && sd->internal_ops->unregistered)
1137     - sd->internal_ops->unregistered(sd);
1138     - module_put(sd->owner);
1139     - return err;
1140     - }
1141     + if (err)
1142     + goto error_unregister;
1143    
1144     #if defined(CONFIG_MEDIA_CONTROLLER)
1145     /* Register the entity. */
1146     if (v4l2_dev->mdev) {
1147     err = media_device_register_entity(v4l2_dev->mdev, entity);
1148     - if (err < 0) {
1149     - if (sd->internal_ops && sd->internal_ops->unregistered)
1150     - sd->internal_ops->unregistered(sd);
1151     - module_put(sd->owner);
1152     - return err;
1153     - }
1154     + if (err < 0)
1155     + goto error_unregister;
1156     }
1157     #endif
1158    
1159     @@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
1160     spin_unlock(&v4l2_dev->lock);
1161    
1162     return 0;
1163     +
1164     +error_unregister:
1165     + if (sd->internal_ops && sd->internal_ops->unregistered)
1166     + sd->internal_ops->unregistered(sd);
1167     +error_module:
1168     + module_put(sd->owner);
1169     + sd->v4l2_dev = NULL;
1170     + return err;
1171     }
1172     EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
1173    
1174     diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1175     index 806e34c..0568273 100644
1176     --- a/drivers/net/wireless/b43/main.c
1177     +++ b/drivers/net/wireless/b43/main.c
1178     @@ -4214,7 +4214,6 @@ redo:
1179     mutex_unlock(&wl->mutex);
1180     cancel_delayed_work_sync(&dev->periodic_work);
1181     cancel_work_sync(&wl->tx_work);
1182     - cancel_work_sync(&wl->firmware_load);
1183     mutex_lock(&wl->mutex);
1184     dev = wl->current_dev;
1185     if (!dev || b43_status(dev) < B43_STAT_STARTED) {
1186     @@ -5434,6 +5433,7 @@ static void b43_bcma_remove(struct bcma_device *core)
1187     /* We must cancel any work here before unregistering from ieee80211,
1188     * as the ieee80211 unreg will destroy the workqueue. */
1189     cancel_work_sync(&wldev->restart_work);
1190     + cancel_work_sync(&wl->firmware_load);
1191    
1192     B43_WARN_ON(!wl);
1193     if (!wldev->fw.ucode.data)
1194     @@ -5510,6 +5510,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
1195     /* We must cancel any work here before unregistering from ieee80211,
1196     * as the ieee80211 unreg will destroy the workqueue. */
1197     cancel_work_sync(&wldev->restart_work);
1198     + cancel_work_sync(&wl->firmware_load);
1199    
1200     B43_WARN_ON(!wl);
1201     if (!wldev->fw.ucode.data)
1202     diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
1203     index 20e2a7d..056222e 100644
1204     --- a/drivers/power/ab8500_btemp.c
1205     +++ b/drivers/power/ab8500_btemp.c
1206     @@ -1123,7 +1123,7 @@ static void __exit ab8500_btemp_exit(void)
1207     platform_driver_unregister(&ab8500_btemp_driver);
1208     }
1209    
1210     -subsys_initcall_sync(ab8500_btemp_init);
1211     +device_initcall(ab8500_btemp_init);
1212     module_exit(ab8500_btemp_exit);
1213    
1214     MODULE_LICENSE("GPL v2");
1215     diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
1216     index 2970891..eb7b4a6 100644
1217     --- a/drivers/power/abx500_chargalg.c
1218     +++ b/drivers/power/abx500_chargalg.c
1219     @@ -1698,7 +1698,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
1220     static struct attribute abx500_chargalg_en_charger = \
1221     {
1222     .name = "chargalg",
1223     - .mode = S_IWUGO,
1224     + .mode = S_IWUSR,
1225     };
1226    
1227     static struct attribute *abx500_chargalg_chg[] = {
1228     diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
1229     index 36b34ef..7087d0d 100644
1230     --- a/drivers/power/bq27x00_battery.c
1231     +++ b/drivers/power/bq27x00_battery.c
1232     @@ -448,7 +448,6 @@ static void bq27x00_update(struct bq27x00_device_info *di)
1233     cache.temperature = bq27x00_battery_read_temperature(di);
1234     if (!is_bq27425)
1235     cache.cycle_count = bq27x00_battery_read_cyct(di);
1236     - cache.cycle_count = bq27x00_battery_read_cyct(di);
1237     cache.power_avg =
1238     bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
1239    
1240     @@ -696,7 +695,6 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
1241     int ret;
1242    
1243     di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
1244     - di->chip = BQ27425;
1245     if (di->chip == BQ27425) {
1246     di->bat.properties = bq27425_battery_props;
1247     di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
1248     diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
1249     index 8f14c42..6894b3e 100644
1250     --- a/drivers/staging/comedi/comedi_fops.c
1251     +++ b/drivers/staging/comedi/comedi_fops.c
1252     @@ -1779,7 +1779,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
1253    
1254     mask = 0;
1255     read_subdev = comedi_get_read_subdevice(dev_file_info);
1256     - if (read_subdev) {
1257     + if (read_subdev && read_subdev->async) {
1258     poll_wait(file, &read_subdev->async->wait_head, wait);
1259     if (!read_subdev->busy
1260     || comedi_buf_read_n_available(read_subdev->async) > 0
1261     @@ -1789,7 +1789,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
1262     }
1263     }
1264     write_subdev = comedi_get_write_subdevice(dev_file_info);
1265     - if (write_subdev) {
1266     + if (write_subdev && write_subdev->async) {
1267     poll_wait(file, &write_subdev->async->wait_head, wait);
1268     comedi_buf_write_alloc(write_subdev->async,
1269     write_subdev->async->prealloc_bufsz);
1270     @@ -1831,7 +1831,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1271     }
1272    
1273     s = comedi_get_write_subdevice(dev_file_info);
1274     - if (s == NULL) {
1275     + if (s == NULL || s->async == NULL) {
1276     retval = -EIO;
1277     goto done;
1278     }
1279     @@ -1942,7 +1942,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
1280     }
1281    
1282     s = comedi_get_read_subdevice(dev_file_info);
1283     - if (s == NULL) {
1284     + if (s == NULL || s->async == NULL) {
1285     retval = -EIO;
1286     goto done;
1287     }
1288     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1289     index f2aa754..96f4981 100644
1290     --- a/drivers/target/target_core_device.c
1291     +++ b/drivers/target/target_core_device.c
1292     @@ -1182,24 +1182,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1293    
1294     struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1295     struct se_portal_group *tpg,
1296     + struct se_node_acl *nacl,
1297     u32 mapped_lun,
1298     - char *initiatorname,
1299     int *ret)
1300     {
1301     struct se_lun_acl *lacl;
1302     - struct se_node_acl *nacl;
1303    
1304     - if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1305     + if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1306     pr_err("%s InitiatorName exceeds maximum size.\n",
1307     tpg->se_tpg_tfo->get_fabric_name());
1308     *ret = -EOVERFLOW;
1309     return NULL;
1310     }
1311     - nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1312     - if (!nacl) {
1313     - *ret = -EINVAL;
1314     - return NULL;
1315     - }
1316     lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1317     if (!lacl) {
1318     pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1319     @@ -1210,7 +1204,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1320     INIT_LIST_HEAD(&lacl->lacl_list);
1321     lacl->mapped_lun = mapped_lun;
1322     lacl->se_lun_nacl = nacl;
1323     - snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1324     + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1325     + nacl->initiatorname);
1326    
1327     return lacl;
1328     }
1329     diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
1330     index c57bbbc..04c775c 100644
1331     --- a/drivers/target/target_core_fabric_configfs.c
1332     +++ b/drivers/target/target_core_fabric_configfs.c
1333     @@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
1334     ret = -EINVAL;
1335     goto out;
1336     }
1337     + if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1338     + pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
1339     + "-1: %u for Target Portal Group: %u\n", mapped_lun,
1340     + TRANSPORT_MAX_LUNS_PER_TPG-1,
1341     + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1342     + ret = -EINVAL;
1343     + goto out;
1344     + }
1345    
1346     - lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
1347     - config_item_name(acl_ci), &ret);
1348     + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
1349     + mapped_lun, &ret);
1350     if (!lacl) {
1351     ret = -EINVAL;
1352     goto out;
1353     diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
1354     index 93e9c1f..396e1eb 100644
1355     --- a/drivers/target/target_core_internal.h
1356     +++ b/drivers/target/target_core_internal.h
1357     @@ -45,7 +45,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
1358     int core_dev_del_lun(struct se_portal_group *, u32);
1359     struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
1360     struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
1361     - u32, char *, int *);
1362     + struct se_node_acl *, u32, int *);
1363     int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
1364     struct se_lun_acl *, u32, u32);
1365     int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
1366     diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
1367     index 5192ac0..9169d6a 100644
1368     --- a/drivers/target/target_core_tpg.c
1369     +++ b/drivers/target/target_core_tpg.c
1370     @@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
1371     struct se_node_acl *acl;
1372    
1373     spin_lock_irq(&tpg->acl_node_lock);
1374     - list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1375     - if (!strcmp(acl->initiatorname, initiatorname) &&
1376     - !acl->dynamic_node_acl) {
1377     - spin_unlock_irq(&tpg->acl_node_lock);
1378     - return acl;
1379     - }
1380     - }
1381     + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
1382     spin_unlock_irq(&tpg->acl_node_lock);
1383    
1384     - return NULL;
1385     + return acl;
1386     }
1387    
1388     /* core_tpg_add_node_to_devs():
1389     diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
1390     index 4999563..1dae91d 100644
1391     --- a/drivers/usb/dwc3/core.h
1392     +++ b/drivers/usb/dwc3/core.h
1393     @@ -405,7 +405,6 @@ struct dwc3_event_buffer {
1394     * @number: endpoint number (1 - 15)
1395     * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
1396     * @resource_index: Resource transfer index
1397     - * @current_uf: Current uf received through last event parameter
1398     * @interval: the intervall on which the ISOC transfer is started
1399     * @name: a human readable name e.g. ep1out-bulk
1400     * @direction: true for TX, false for RX
1401     @@ -439,7 +438,6 @@ struct dwc3_ep {
1402     u8 number;
1403     u8 type;
1404     u8 resource_index;
1405     - u16 current_uf;
1406     u32 interval;
1407    
1408     char name[20];
1409     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1410     index 2fdd767..09835b6 100644
1411     --- a/drivers/usb/dwc3/gadget.c
1412     +++ b/drivers/usb/dwc3/gadget.c
1413     @@ -754,21 +754,18 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1414     struct dwc3 *dwc = dep->dwc;
1415     struct dwc3_trb *trb;
1416    
1417     - unsigned int cur_slot;
1418     -
1419     dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
1420     dep->name, req, (unsigned long long) dma,
1421     length, last ? " last" : "",
1422     chain ? " chain" : "");
1423    
1424     - trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
1425     - cur_slot = dep->free_slot;
1426     - dep->free_slot++;
1427     -
1428     /* Skip the LINK-TRB on ISOC */
1429     - if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
1430     + if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
1431     usb_endpoint_xfer_isoc(dep->endpoint.desc))
1432     - return;
1433     + dep->free_slot++;
1434     +
1435     + trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
1436     + dep->free_slot++;
1437    
1438     if (!req->trb) {
1439     dwc3_gadget_move_request_queued(req);
1440     @@ -1091,7 +1088,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1441     * notion of current microframe.
1442     */
1443     if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1444     - dwc3_stop_active_transfer(dwc, dep->number);
1445     + if (list_empty(&dep->req_queued)) {
1446     + dwc3_stop_active_transfer(dwc, dep->number);
1447     + dep->flags = DWC3_EP_ENABLED;
1448     + }
1449     return 0;
1450     }
1451    
1452     @@ -1117,16 +1117,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1453     dep->name);
1454     }
1455    
1456     - /*
1457     - * 3. Missed ISOC Handling. We need to start isoc transfer on the saved
1458     - * uframe number.
1459     - */
1460     - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1461     - (dep->flags & DWC3_EP_MISSED_ISOC)) {
1462     - __dwc3_gadget_start_isoc(dwc, dep, dep->current_uf);
1463     - dep->flags &= ~DWC3_EP_MISSED_ISOC;
1464     - }
1465     -
1466     return 0;
1467     }
1468    
1469     @@ -1689,14 +1679,29 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1470     if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1471     dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1472     dep->name);
1473     - dep->current_uf = event->parameters &
1474     - ~(dep->interval - 1);
1475     + /*
1476     + * If missed isoc occurred and there is
1477     + * no request queued then issue END
1478     + * TRANSFER, so that core generates
1479     + * next xfernotready and we will issue
1480     + * a fresh START TRANSFER.
1481     + * If there are still queued request
1482     + * then wait, do not issue either END
1483     + * or UPDATE TRANSFER, just attach next
1484     + * request in request_list during
1485     + * giveback.If any future queued request
1486     + * is successfully transferred then we
1487     + * will issue UPDATE TRANSFER for all
1488     + * request in the request_list.
1489     + */
1490     dep->flags |= DWC3_EP_MISSED_ISOC;
1491     } else {
1492     dev_err(dwc->dev, "incomplete IN transfer %s\n",
1493     dep->name);
1494     status = -ECONNRESET;
1495     }
1496     + } else {
1497     + dep->flags &= ~DWC3_EP_MISSED_ISOC;
1498     }
1499     } else {
1500     if (count && (event->status & DEPEVT_STATUS_SHORT))
1501     @@ -1723,6 +1728,23 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1502     break;
1503     } while (1);
1504    
1505     + if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1506     + list_empty(&dep->req_queued)) {
1507     + if (list_empty(&dep->request_list)) {
1508     + /*
1509     + * If there is no entry in request list then do
1510     + * not issue END TRANSFER now. Just set PENDING
1511     + * flag, so that END TRANSFER is issued when an
1512     + * entry is added into request list.
1513     + */
1514     + dep->flags = DWC3_EP_PENDING_REQUEST;
1515     + } else {
1516     + dwc3_stop_active_transfer(dwc, dep->number);
1517     + dep->flags = DWC3_EP_ENABLED;
1518     + }
1519     + return 1;
1520     + }
1521     +
1522     if ((event->status & DEPEVT_STATUS_IOC) &&
1523     (trb->ctrl & DWC3_TRB_CTRL_IOC))
1524     return 0;
1525     @@ -2157,6 +2179,26 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1526     break;
1527     }
1528    
1529     + /* Enable USB2 LPM Capability */
1530     +
1531     + if ((dwc->revision > DWC3_REVISION_194A)
1532     + && (speed != DWC3_DCFG_SUPERSPEED)) {
1533     + reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1534     + reg |= DWC3_DCFG_LPM_CAP;
1535     + dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1536     +
1537     + reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1538     + reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
1539     +
1540     + /*
1541     + * TODO: This should be configurable. For now using
1542     + * maximum allowed HIRD threshold value of 0b1100
1543     + */
1544     + reg |= DWC3_DCTL_HIRD_THRES(12);
1545     +
1546     + dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1547     + }
1548     +
1549     /* Recent versions support automatic phy suspend and don't need this */
1550     if (dwc->revision < DWC3_REVISION_194A) {
1551     /* Suspend unneeded PHY */
1552     @@ -2463,20 +2505,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
1553     DWC3_DEVTEN_DISCONNEVTEN);
1554     dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1555    
1556     - /* Enable USB2 LPM and automatic phy suspend only on recent versions */
1557     + /* automatic phy suspend only on recent versions */
1558     if (dwc->revision >= DWC3_REVISION_194A) {
1559     - reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1560     - reg |= DWC3_DCFG_LPM_CAP;
1561     - dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1562     -
1563     - reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1564     - reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
1565     -
1566     - /* TODO: This should be configurable */
1567     - reg |= DWC3_DCTL_HIRD_THRES(28);
1568     -
1569     - dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1570     -
1571     dwc3_gadget_usb2_phy_suspend(dwc, false);
1572     dwc3_gadget_usb3_phy_suspend(dwc, false);
1573     }
1574     diff --git a/fs/direct-io.c b/fs/direct-io.c
1575     index cf5b44b..f853263 100644
1576     --- a/fs/direct-io.c
1577     +++ b/fs/direct-io.c
1578     @@ -261,9 +261,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
1579     dio->end_io(dio->iocb, offset, transferred,
1580     dio->private, ret, is_async);
1581     } else {
1582     + inode_dio_done(dio->inode);
1583     if (is_async)
1584     aio_complete(dio->iocb, ret, 0);
1585     - inode_dio_done(dio->inode);
1586     }
1587    
1588     return ret;
1589     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
1590     index cf18217..2f2e0da 100644
1591     --- a/fs/ext4/balloc.c
1592     +++ b/fs/ext4/balloc.c
1593     @@ -358,7 +358,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
1594     }
1595    
1596     /**
1597     - * ext4_read_block_bitmap()
1598     + * ext4_read_block_bitmap_nowait()
1599     * @sb: super block
1600     * @block_group: given block group
1601     *
1602     @@ -457,6 +457,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
1603     struct buffer_head *bh;
1604    
1605     bh = ext4_read_block_bitmap_nowait(sb, block_group);
1606     + if (!bh)
1607     + return NULL;
1608     if (ext4_wait_block_bitmap(sb, block_group, bh)) {
1609     put_bh(bh);
1610     return NULL;
1611     @@ -482,11 +484,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
1612    
1613     free_clusters = percpu_counter_read_positive(fcc);
1614     dirty_clusters = percpu_counter_read_positive(dcc);
1615     - root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
1616     +
1617     + /*
1618     + * r_blocks_count should always be multiple of the cluster ratio so
1619     + * we are safe to do a plane bit shift only.
1620     + */
1621     + root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
1622    
1623     if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
1624     EXT4_FREECLUSTERS_WATERMARK) {
1625     - free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
1626     + free_clusters = percpu_counter_sum_positive(fcc);
1627     dirty_clusters = percpu_counter_sum_positive(dcc);
1628     }
1629     /* Check whether we have space after accounting for current
1630     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1631     index 5ae1674..d42a8c4 100644
1632     --- a/fs/ext4/extents.c
1633     +++ b/fs/ext4/extents.c
1634     @@ -725,6 +725,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
1635     struct ext4_extent_header *eh;
1636     struct buffer_head *bh;
1637     short int depth, i, ppos = 0, alloc = 0;
1638     + int ret;
1639    
1640     eh = ext_inode_hdr(inode);
1641     depth = ext_depth(inode);
1642     @@ -752,12 +753,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
1643     path[ppos].p_ext = NULL;
1644    
1645     bh = sb_getblk(inode->i_sb, path[ppos].p_block);
1646     - if (unlikely(!bh))
1647     + if (unlikely(!bh)) {
1648     + ret = -ENOMEM;
1649     goto err;
1650     + }
1651     if (!bh_uptodate_or_lock(bh)) {
1652     trace_ext4_ext_load_extent(inode, block,
1653     path[ppos].p_block);
1654     - if (bh_submit_read(bh) < 0) {
1655     + ret = bh_submit_read(bh);
1656     + if (ret < 0) {
1657     put_bh(bh);
1658     goto err;
1659     }
1660     @@ -768,13 +772,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
1661     put_bh(bh);
1662     EXT4_ERROR_INODE(inode,
1663     "ppos %d > depth %d", ppos, depth);
1664     + ret = -EIO;
1665     goto err;
1666     }
1667     path[ppos].p_bh = bh;
1668     path[ppos].p_hdr = eh;
1669     i--;
1670    
1671     - if (ext4_ext_check_block(inode, eh, i, bh))
1672     + ret = ext4_ext_check_block(inode, eh, i, bh);
1673     + if (ret < 0)
1674     goto err;
1675     }
1676    
1677     @@ -796,7 +802,7 @@ err:
1678     ext4_ext_drop_refs(path);
1679     if (alloc)
1680     kfree(path);
1681     - return ERR_PTR(-EIO);
1682     + return ERR_PTR(ret);
1683     }
1684    
1685     /*
1686     @@ -951,7 +957,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
1687     }
1688     bh = sb_getblk(inode->i_sb, newblock);
1689     if (!bh) {
1690     - err = -EIO;
1691     + err = -ENOMEM;
1692     goto cleanup;
1693     }
1694     lock_buffer(bh);
1695     @@ -1024,7 +1030,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
1696     newblock = ablocks[--a];
1697     bh = sb_getblk(inode->i_sb, newblock);
1698     if (!bh) {
1699     - err = -EIO;
1700     + err = -ENOMEM;
1701     goto cleanup;
1702     }
1703     lock_buffer(bh);
1704     @@ -1136,11 +1142,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1705     return err;
1706    
1707     bh = sb_getblk(inode->i_sb, newblock);
1708     - if (!bh) {
1709     - err = -EIO;
1710     - ext4_std_error(inode->i_sb, err);
1711     - return err;
1712     - }
1713     + if (!bh)
1714     + return -ENOMEM;
1715     lock_buffer(bh);
1716    
1717     err = ext4_journal_get_create_access(handle, bh);
1718     diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
1719     index 20862f9..8d83d1e 100644
1720     --- a/fs/ext4/indirect.c
1721     +++ b/fs/ext4/indirect.c
1722     @@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
1723     struct super_block *sb = inode->i_sb;
1724     Indirect *p = chain;
1725     struct buffer_head *bh;
1726     + int ret = -EIO;
1727    
1728     *err = 0;
1729     /* i_data is not going away, no lock needed */
1730     @@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
1731     goto no_block;
1732     while (--depth) {
1733     bh = sb_getblk(sb, le32_to_cpu(p->key));
1734     - if (unlikely(!bh))
1735     + if (unlikely(!bh)) {
1736     + ret = -ENOMEM;
1737     goto failure;
1738     + }
1739    
1740     if (!bh_uptodate_or_lock(bh)) {
1741     if (bh_submit_read(bh) < 0) {
1742     @@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
1743     return NULL;
1744    
1745     failure:
1746     - *err = -EIO;
1747     + *err = ret;
1748     no_block:
1749     return p;
1750     }
1751     @@ -471,7 +474,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
1752     */
1753     bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
1754     if (unlikely(!bh)) {
1755     - err = -EIO;
1756     + err = -ENOMEM;
1757     goto failed;
1758     }
1759    
1760     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1761     index 387c47c..93a3408 100644
1762     --- a/fs/ext4/inline.c
1763     +++ b/fs/ext4/inline.c
1764     @@ -1188,7 +1188,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
1765    
1766     data_bh = sb_getblk(inode->i_sb, map.m_pblk);
1767     if (!data_bh) {
1768     - error = -EIO;
1769     + error = -ENOMEM;
1770     goto out_restore;
1771     }
1772    
1773     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1774     index cbfe13b..39f1fa7 100644
1775     --- a/fs/ext4/inode.c
1776     +++ b/fs/ext4/inode.c
1777     @@ -714,7 +714,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1778    
1779     bh = sb_getblk(inode->i_sb, map.m_pblk);
1780     if (!bh) {
1781     - *errp = -EIO;
1782     + *errp = -ENOMEM;
1783     return NULL;
1784     }
1785     if (map.m_flags & EXT4_MAP_NEW) {
1786     @@ -2977,9 +2977,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
1787     if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
1788     ext4_free_io_end(io_end);
1789     out:
1790     + inode_dio_done(inode);
1791     if (is_async)
1792     aio_complete(iocb, ret, 0);
1793     - inode_dio_done(inode);
1794     return;
1795     }
1796    
1797     @@ -3660,11 +3660,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
1798     iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
1799    
1800     bh = sb_getblk(sb, block);
1801     - if (!bh) {
1802     - EXT4_ERROR_INODE_BLOCK(inode, block,
1803     - "unable to read itable block");
1804     - return -EIO;
1805     - }
1806     + if (!bh)
1807     + return -ENOMEM;
1808     if (!buffer_uptodate(bh)) {
1809     lock_buffer(bh);
1810    
1811     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1812     index 1bf6fe7..061727a 100644
1813     --- a/fs/ext4/mballoc.c
1814     +++ b/fs/ext4/mballoc.c
1815     @@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
1816     /* The max size of hash table is PREALLOC_TB_SIZE */
1817     order = PREALLOC_TB_SIZE - 1;
1818     /* Add the prealloc space to lg */
1819     - rcu_read_lock();
1820     + spin_lock(&lg->lg_prealloc_lock);
1821     list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
1822     pa_inode_list) {
1823     spin_lock(&tmp_pa->pa_lock);
1824     @@ -4160,12 +4160,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
1825     if (!added)
1826     list_add_tail_rcu(&pa->pa_inode_list,
1827     &lg->lg_prealloc_list[order]);
1828     - rcu_read_unlock();
1829     + spin_unlock(&lg->lg_prealloc_lock);
1830    
1831     /* Now trim the list to be not more than 8 elements */
1832     if (lg_prealloc_count > 8) {
1833     ext4_mb_discard_lg_preallocations(sb, lg,
1834     - order, lg_prealloc_count);
1835     + order, lg_prealloc_count);
1836     return;
1837     }
1838     return ;
1839     diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
1840     index fe7c63f..44734f1 100644
1841     --- a/fs/ext4/mmp.c
1842     +++ b/fs/ext4/mmp.c
1843     @@ -80,6 +80,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
1844     * is not blocked in the elevator. */
1845     if (!*bh)
1846     *bh = sb_getblk(sb, mmp_block);
1847     + if (!*bh)
1848     + return -ENOMEM;
1849     if (*bh) {
1850     get_bh(*bh);
1851     lock_buffer(*bh);
1852     diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1853     index 0016fbc..b42d04f 100644
1854     --- a/fs/ext4/page-io.c
1855     +++ b/fs/ext4/page-io.c
1856     @@ -103,14 +103,13 @@ static int ext4_end_io(ext4_io_end_t *io)
1857     "(inode %lu, offset %llu, size %zd, error %d)",
1858     inode->i_ino, offset, size, ret);
1859     }
1860     - if (io->iocb)
1861     - aio_complete(io->iocb, io->result, 0);
1862     -
1863     - if (io->flag & EXT4_IO_END_DIRECT)
1864     - inode_dio_done(inode);
1865     /* Wake up anyone waiting on unwritten extent conversion */
1866     if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
1867     wake_up_all(ext4_ioend_wq(inode));
1868     + if (io->flag & EXT4_IO_END_DIRECT)
1869     + inode_dio_done(inode);
1870     + if (io->iocb)
1871     + aio_complete(io->iocb, io->result, 0);
1872     return ret;
1873     }
1874    
1875     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1876     index d99387b..02824dc 100644
1877     --- a/fs/ext4/resize.c
1878     +++ b/fs/ext4/resize.c
1879     @@ -334,7 +334,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
1880    
1881     bh = sb_getblk(sb, blk);
1882     if (!bh)
1883     - return ERR_PTR(-EIO);
1884     + return ERR_PTR(-ENOMEM);
1885     if ((err = ext4_journal_get_write_access(handle, bh))) {
1886     brelse(bh);
1887     bh = ERR_PTR(err);
1888     @@ -411,7 +411,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
1889    
1890     bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
1891     if (!bh)
1892     - return -EIO;
1893     + return -ENOMEM;
1894    
1895     err = ext4_journal_get_write_access(handle, bh);
1896     if (err)
1897     @@ -501,7 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
1898    
1899     gdb = sb_getblk(sb, block);
1900     if (!gdb) {
1901     - err = -EIO;
1902     + err = -ENOMEM;
1903     goto out;
1904     }
1905    
1906     @@ -1065,7 +1065,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
1907    
1908     bh = sb_getblk(sb, backup_block);
1909     if (!bh) {
1910     - err = -EIO;
1911     + err = -ENOMEM;
1912     break;
1913     }
1914     ext4_debug("update metadata backup %llu(+%llu)\n",
1915     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1916     index 3d4fb81..0465f36 100644
1917     --- a/fs/ext4/super.c
1918     +++ b/fs/ext4/super.c
1919     @@ -4008,7 +4008,7 @@ no_journal:
1920     !(sb->s_flags & MS_RDONLY)) {
1921     err = ext4_enable_quotas(sb);
1922     if (err)
1923     - goto failed_mount7;
1924     + goto failed_mount8;
1925     }
1926     #endif /* CONFIG_QUOTA */
1927    
1928     @@ -4035,6 +4035,10 @@ cantfind_ext4:
1929     ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
1930     goto failed_mount;
1931    
1932     +#ifdef CONFIG_QUOTA
1933     +failed_mount8:
1934     + kobject_del(&sbi->s_kobj);
1935     +#endif
1936     failed_mount7:
1937     ext4_unregister_li_request(sb);
1938     failed_mount6:
1939     @@ -5005,9 +5009,9 @@ static int ext4_enable_quotas(struct super_block *sb)
1940     DQUOT_USAGE_ENABLED);
1941     if (err) {
1942     ext4_warning(sb,
1943     - "Failed to enable quota (type=%d) "
1944     - "tracking. Please run e2fsck to fix.",
1945     - type);
1946     + "Failed to enable quota tracking "
1947     + "(type=%d, err=%d). Please run "
1948     + "e2fsck to fix.", type, err);
1949     return err;
1950     }
1951     }
1952     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1953     index 3a91ebc..b93846b 100644
1954     --- a/fs/ext4/xattr.c
1955     +++ b/fs/ext4/xattr.c
1956     @@ -549,7 +549,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
1957     error = ext4_handle_dirty_xattr_block(handle, inode, bh);
1958     if (IS_SYNC(inode))
1959     ext4_handle_sync(handle);
1960     - dquot_free_block(inode, 1);
1961     + dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1962     ea_bdebug(bh, "refcount now=%d; releasing",
1963     le32_to_cpu(BHDR(bh)->h_refcount));
1964     }
1965     @@ -832,7 +832,8 @@ inserted:
1966     else {
1967     /* The old block is released after updating
1968     the inode. */
1969     - error = dquot_alloc_block(inode, 1);
1970     + error = dquot_alloc_block(inode,
1971     + EXT4_C2B(EXT4_SB(sb), 1));
1972     if (error)
1973     goto cleanup;
1974     error = ext4_journal_get_write_access(handle,
1975     @@ -887,16 +888,17 @@ inserted:
1976    
1977     new_bh = sb_getblk(sb, block);
1978     if (!new_bh) {
1979     + error = -ENOMEM;
1980     getblk_failed:
1981     ext4_free_blocks(handle, inode, NULL, block, 1,
1982     EXT4_FREE_BLOCKS_METADATA);
1983     - error = -EIO;
1984     goto cleanup;
1985     }
1986     lock_buffer(new_bh);
1987     error = ext4_journal_get_create_access(handle, new_bh);
1988     if (error) {
1989     unlock_buffer(new_bh);
1990     + error = -EIO;
1991     goto getblk_failed;
1992     }
1993     memcpy(new_bh->b_data, s->base, new_bh->b_size);
1994     @@ -928,7 +930,7 @@ cleanup:
1995     return error;
1996    
1997     cleanup_dquot:
1998     - dquot_free_block(inode, 1);
1999     + dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
2000     goto cleanup;
2001    
2002     bad_block:
2003     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2004     index b7c09f9..315e1f8 100644
2005     --- a/fs/fuse/dir.c
2006     +++ b/fs/fuse/dir.c
2007     @@ -682,7 +682,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
2008    
2009     spin_lock(&fc->lock);
2010     fi->attr_version = ++fc->attr_version;
2011     - drop_nlink(inode);
2012     + /*
2013     + * If i_nlink == 0 then unlink doesn't make sense, yet this can
2014     + * happen if userspace filesystem is careless. It would be
2015     + * difficult to enforce correct nlink usage so just ignore this
2016     + * condition here
2017     + */
2018     + if (inode->i_nlink > 0)
2019     + drop_nlink(inode);
2020     spin_unlock(&fc->lock);
2021     fuse_invalidate_attr(inode);
2022     fuse_invalidate_attr(dir);
2023     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2024     index ac8ed96c..a8309c6 100644
2025     --- a/fs/nfsd/nfs4state.c
2026     +++ b/fs/nfsd/nfs4state.c
2027     @@ -1060,6 +1060,8 @@ free_client(struct nfs4_client *clp)
2028     }
2029     free_svc_cred(&clp->cl_cred);
2030     kfree(clp->cl_name.data);
2031     + idr_remove_all(&clp->cl_stateids);
2032     + idr_destroy(&clp->cl_stateids);
2033     kfree(clp);
2034     }
2035    
2036     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
2037     index 6577432..340bd02 100644
2038     --- a/fs/ocfs2/aops.c
2039     +++ b/fs/ocfs2/aops.c
2040     @@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
2041     level = ocfs2_iocb_rw_locked_level(iocb);
2042     ocfs2_rw_unlock(inode, level);
2043    
2044     + inode_dio_done(inode);
2045     if (is_async)
2046     aio_complete(iocb, ret, 0);
2047     - inode_dio_done(inode);
2048     }
2049    
2050     /*
2051     diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
2052     index f169da4..b7e74b5 100644
2053     --- a/fs/ocfs2/suballoc.c
2054     +++ b/fs/ocfs2/suballoc.c
2055     @@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
2056     * cluster groups will be staying in cache for the duration of
2057     * this operation.
2058     */
2059     - ac->ac_allow_chain_relink = 0;
2060     + ac->ac_disable_chain_relink = 1;
2061    
2062     /* Claim the first region */
2063     status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
2064     @@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
2065     * Do this *after* figuring out how many bits we're taking out
2066     * of our target group.
2067     */
2068     - if (ac->ac_allow_chain_relink &&
2069     + if (!ac->ac_disable_chain_relink &&
2070     (prev_group_bh) &&
2071     (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
2072     status = ocfs2_relink_block_group(handle, alloc_inode,
2073     @@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
2074    
2075     victim = ocfs2_find_victim_chain(cl);
2076     ac->ac_chain = victim;
2077     - ac->ac_allow_chain_relink = 1;
2078    
2079     status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
2080     res, &bits_left);
2081     @@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
2082     * searching each chain in order. Don't allow chain relinking
2083     * because we only calculate enough journal credits for one
2084     * relink per alloc. */
2085     - ac->ac_allow_chain_relink = 0;
2086     + ac->ac_disable_chain_relink = 1;
2087     for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
2088     if (i == victim)
2089     continue;
2090     diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
2091     index b8afabf..a36d0aa 100644
2092     --- a/fs/ocfs2/suballoc.h
2093     +++ b/fs/ocfs2/suballoc.h
2094     @@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
2095    
2096     /* these are used by the chain search */
2097     u16 ac_chain;
2098     - int ac_allow_chain_relink;
2099     + int ac_disable_chain_relink;
2100     group_search_t *ac_group_search;
2101    
2102     u64 ac_last_group;
2103     diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
2104     index 0ba9ea1..2e3ea30 100644
2105     --- a/fs/ocfs2/xattr.c
2106     +++ b/fs/ocfs2/xattr.c
2107     @@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
2108     struct buffer_head *dir_bh = NULL;
2109    
2110     ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
2111     - if (!ret) {
2112     + if (ret) {
2113     mlog_errno(ret);
2114     goto leave;
2115     }
2116     diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
2117     index 5ea2e77..86d1038 100644
2118     --- a/fs/pstore/platform.c
2119     +++ b/fs/pstore/platform.c
2120     @@ -96,6 +96,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
2121     }
2122     }
2123    
2124     +bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
2125     +{
2126     + /*
2127     + * In case of NMI path, pstore shouldn't be blocked
2128     + * regardless of reason.
2129     + */
2130     + if (in_nmi())
2131     + return true;
2132     +
2133     + switch (reason) {
2134     + /* In panic case, other cpus are stopped by smp_send_stop(). */
2135     + case KMSG_DUMP_PANIC:
2136     + /* Emergency restart shouldn't be blocked by spin lock. */
2137     + case KMSG_DUMP_EMERG:
2138     + return true;
2139     + default:
2140     + return false;
2141     + }
2142     +}
2143     +EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
2144     +
2145     /*
2146     * callback from kmsg_dump. (s2,l2) has the most recently
2147     * written bytes, older bytes are in (s1,l1). Save as much
2148     @@ -114,10 +135,12 @@ static void pstore_dump(struct kmsg_dumper *dumper,
2149    
2150     why = get_reason_str(reason);
2151    
2152     - if (in_nmi()) {
2153     - is_locked = spin_trylock(&psinfo->buf_lock);
2154     - if (!is_locked)
2155     - pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
2156     + if (pstore_cannot_block_path(reason)) {
2157     + is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
2158     + if (!is_locked) {
2159     + pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
2160     + , in_nmi() ? "NMI" : why);
2161     + }
2162     } else
2163     spin_lock_irqsave(&psinfo->buf_lock, flags);
2164     oopscount++;
2165     @@ -143,9 +166,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
2166     total += hsize + len;
2167     part++;
2168     }
2169     - if (in_nmi()) {
2170     + if (pstore_cannot_block_path(reason)) {
2171     if (is_locked)
2172     - spin_unlock(&psinfo->buf_lock);
2173     + spin_unlock_irqrestore(&psinfo->buf_lock, flags);
2174     } else
2175     spin_unlock_irqrestore(&psinfo->buf_lock, flags);
2176     }
2177     diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
2178     index 769701c..ba32da3 100644
2179     --- a/fs/ubifs/orphan.c
2180     +++ b/fs/ubifs/orphan.c
2181     @@ -126,13 +126,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
2182     else if (inum > o->inum)
2183     p = p->rb_right;
2184     else {
2185     - if (o->dnext) {
2186     + if (o->del) {
2187     spin_unlock(&c->orphan_lock);
2188     dbg_gen("deleted twice ino %lu",
2189     (unsigned long)inum);
2190     return;
2191     }
2192     - if (o->cnext) {
2193     + if (o->cmt) {
2194     + o->del = 1;
2195     o->dnext = c->orph_dnext;
2196     c->orph_dnext = o;
2197     spin_unlock(&c->orphan_lock);
2198     @@ -172,7 +173,9 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
2199     last = &c->orph_cnext;
2200     list_for_each_entry(orphan, &c->orph_new, new_list) {
2201     ubifs_assert(orphan->new);
2202     + ubifs_assert(!orphan->cmt);
2203     orphan->new = 0;
2204     + orphan->cmt = 1;
2205     *last = orphan;
2206     last = &orphan->cnext;
2207     }
2208     @@ -299,7 +302,9 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
2209     cnext = c->orph_cnext;
2210     for (i = 0; i < cnt; i++) {
2211     orphan = cnext;
2212     + ubifs_assert(orphan->cmt);
2213     orph->inos[i] = cpu_to_le64(orphan->inum);
2214     + orphan->cmt = 0;
2215     cnext = orphan->cnext;
2216     orphan->cnext = NULL;
2217     }
2218     @@ -378,6 +383,7 @@ static int consolidate(struct ubifs_info *c)
2219     list_for_each_entry(orphan, &c->orph_list, list) {
2220     if (orphan->new)
2221     continue;
2222     + orphan->cmt = 1;
2223     *last = orphan;
2224     last = &orphan->cnext;
2225     cnt += 1;
2226     @@ -442,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
2227     orphan = dnext;
2228     dnext = orphan->dnext;
2229     ubifs_assert(!orphan->new);
2230     + ubifs_assert(orphan->del);
2231     rb_erase(&orphan->rb, &c->orph_tree);
2232     list_del(&orphan->list);
2233     c->tot_orphans -= 1;
2234     @@ -531,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
2235     rb_link_node(&orphan->rb, parent, p);
2236     rb_insert_color(&orphan->rb, &c->orph_tree);
2237     list_add_tail(&orphan->list, &c->orph_list);
2238     + orphan->del = 1;
2239     orphan->dnext = c->orph_dnext;
2240     c->orph_dnext = orphan;
2241     dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
2242     diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
2243     index d133c27..b2babce 100644
2244     --- a/fs/ubifs/ubifs.h
2245     +++ b/fs/ubifs/ubifs.h
2246     @@ -904,6 +904,8 @@ struct ubifs_budget_req {
2247     * @dnext: next orphan to delete
2248     * @inum: inode number
2249     * @new: %1 => added since the last commit, otherwise %0
2250     + * @cmt: %1 => commit pending, otherwise %0
2251     + * @del: %1 => delete pending, otherwise %0
2252     */
2253     struct ubifs_orphan {
2254     struct rb_node rb;
2255     @@ -912,7 +914,9 @@ struct ubifs_orphan {
2256     struct ubifs_orphan *cnext;
2257     struct ubifs_orphan *dnext;
2258     ino_t inum;
2259     - int new;
2260     + unsigned new:1;
2261     + unsigned cmt:1;
2262     + unsigned del:1;
2263     };
2264    
2265     /**
2266     diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
2267     index cdb2d33..572a858 100644
2268     --- a/fs/xfs/xfs_bmap.c
2269     +++ b/fs/xfs/xfs_bmap.c
2270     @@ -147,7 +147,10 @@ xfs_bmap_local_to_extents(
2271     xfs_fsblock_t *firstblock, /* first block allocated in xaction */
2272     xfs_extlen_t total, /* total blocks needed by transaction */
2273     int *logflagsp, /* inode logging flags */
2274     - int whichfork); /* data or attr fork */
2275     + int whichfork, /* data or attr fork */
2276     + void (*init_fn)(struct xfs_buf *bp,
2277     + struct xfs_inode *ip,
2278     + struct xfs_ifork *ifp));
2279    
2280     /*
2281     * Search the extents list for the inode, for the extent containing bno.
2282     @@ -357,7 +360,42 @@ xfs_bmap_add_attrfork_extents(
2283     }
2284    
2285     /*
2286     - * Called from xfs_bmap_add_attrfork to handle local format files.
2287     + * Block initialisation functions for local to extent format conversion.
2288     + * As these get more complex, they will be moved to the relevant files,
2289     + * but for now they are too simple to worry about.
2290     + */
2291     +STATIC void
2292     +xfs_bmap_local_to_extents_init_fn(
2293     + struct xfs_buf *bp,
2294     + struct xfs_inode *ip,
2295     + struct xfs_ifork *ifp)
2296     +{
2297     + bp->b_ops = &xfs_bmbt_buf_ops;
2298     + memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
2299     +}
2300     +
2301     +STATIC void
2302     +xfs_symlink_local_to_remote(
2303     + struct xfs_buf *bp,
2304     + struct xfs_inode *ip,
2305     + struct xfs_ifork *ifp)
2306     +{
2307     + /* remote symlink blocks are not verifiable until CRCs come along */
2308     + bp->b_ops = NULL;
2309     + memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
2310     +}
2311     +
2312     +/*
2313     + * Called from xfs_bmap_add_attrfork to handle local format files. Each
2314     + * different data fork content type needs a different callout to do the
2315     + * conversion. Some are basic and only require special block initialisation
2316     + * callouts for the data formating, others (directories) are so specialised they
2317     + * handle everything themselves.
2318     + *
2319     + * XXX (dgc): investigate whether directory conversion can use the generic
2320     + * formatting callout. It should be possible - it's just a very complex
2321     + * formatter. it would also require passing the transaction through to the init
2322     + * function.
2323     */
2324     STATIC int /* error */
2325     xfs_bmap_add_attrfork_local(
2326     @@ -368,25 +406,29 @@ xfs_bmap_add_attrfork_local(
2327     int *flags) /* inode logging flags */
2328     {
2329     xfs_da_args_t dargs; /* args for dir/attr code */
2330     - int error; /* error return value */
2331     - xfs_mount_t *mp; /* mount structure pointer */
2332    
2333     if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
2334     return 0;
2335     +
2336     if (S_ISDIR(ip->i_d.di_mode)) {
2337     - mp = ip->i_mount;
2338     memset(&dargs, 0, sizeof(dargs));
2339     dargs.dp = ip;
2340     dargs.firstblock = firstblock;
2341     dargs.flist = flist;
2342     - dargs.total = mp->m_dirblkfsbs;
2343     + dargs.total = ip->i_mount->m_dirblkfsbs;
2344     dargs.whichfork = XFS_DATA_FORK;
2345     dargs.trans = tp;
2346     - error = xfs_dir2_sf_to_block(&dargs);
2347     - } else
2348     - error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
2349     - XFS_DATA_FORK);
2350     - return error;
2351     + return xfs_dir2_sf_to_block(&dargs);
2352     + }
2353     +
2354     + if (S_ISLNK(ip->i_d.di_mode))
2355     + return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
2356     + flags, XFS_DATA_FORK,
2357     + xfs_symlink_local_to_remote);
2358     +
2359     + return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
2360     + XFS_DATA_FORK,
2361     + xfs_bmap_local_to_extents_init_fn);
2362     }
2363    
2364     /*
2365     @@ -3221,7 +3263,10 @@ xfs_bmap_local_to_extents(
2366     xfs_fsblock_t *firstblock, /* first block allocated in xaction */
2367     xfs_extlen_t total, /* total blocks needed by transaction */
2368     int *logflagsp, /* inode logging flags */
2369     - int whichfork) /* data or attr fork */
2370     + int whichfork,
2371     + void (*init_fn)(struct xfs_buf *bp,
2372     + struct xfs_inode *ip,
2373     + struct xfs_ifork *ifp))
2374     {
2375     int error; /* error return value */
2376     int flags; /* logging flags returned */
2377     @@ -3241,12 +3286,12 @@ xfs_bmap_local_to_extents(
2378     xfs_buf_t *bp; /* buffer for extent block */
2379     xfs_bmbt_rec_host_t *ep;/* extent record pointer */
2380    
2381     + ASSERT((ifp->if_flags &
2382     + (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
2383     memset(&args, 0, sizeof(args));
2384     args.tp = tp;
2385     args.mp = ip->i_mount;
2386     args.firstblock = *firstblock;
2387     - ASSERT((ifp->if_flags &
2388     - (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
2389     /*
2390     * Allocate a block. We know we need only one, since the
2391     * file currently fits in an inode.
2392     @@ -3262,17 +3307,20 @@ xfs_bmap_local_to_extents(
2393     args.mod = args.minleft = args.alignment = args.wasdel =
2394     args.isfl = args.minalignslop = 0;
2395     args.minlen = args.maxlen = args.prod = 1;
2396     - if ((error = xfs_alloc_vextent(&args)))
2397     + error = xfs_alloc_vextent(&args);
2398     + if (error)
2399     goto done;
2400     - /*
2401     - * Can't fail, the space was reserved.
2402     - */
2403     +
2404     + /* Can't fail, the space was reserved. */
2405     ASSERT(args.fsbno != NULLFSBLOCK);
2406     ASSERT(args.len == 1);
2407     *firstblock = args.fsbno;
2408     bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
2409     - bp->b_ops = &xfs_bmbt_buf_ops;
2410     - memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
2411     +
2412     + /* initialise the block and copy the data */
2413     + init_fn(bp, ip, ifp);
2414     +
2415     + /* account for the change in fork size and log everything */
2416     xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
2417     xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
2418     xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2419     @@ -4919,8 +4967,32 @@ xfs_bmapi_write(
2420     XFS_STATS_INC(xs_blk_mapw);
2421    
2422     if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2423     + /*
2424     + * XXX (dgc): This assumes we are only called for inodes that
2425     + * contain content neutral data in local format. Anything that
2426     + * contains caller-specific data in local format that needs
2427     + * transformation to move to a block format needs to do the
2428     + * conversion to extent format itself.
2429     + *
2430     + * Directory data forks and attribute forks handle this
2431     + * themselves, but with the addition of metadata verifiers every
2432     + * data fork in local format now contains caller specific data
2433     + * and as such conversion through this function is likely to be
2434     + * broken.
2435     + *
2436     + * The only likely user of this branch is for remote symlinks,
2437     + * but we cannot overwrite the data fork contents of the symlink
2438     + * (EEXIST occurs higher up the stack) and so it will never go
2439     + * from local format to extent format here. Hence I don't think
2440     + * this branch is ever executed intentionally and we should
2441     + * consider removing it and asserting that xfs_bmapi_write()
2442     + * cannot be called directly on local format forks. i.e. callers
2443     + * are completely responsible for local to extent format
2444     + * conversion, not xfs_bmapi_write().
2445     + */
2446     error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
2447     - &bma.logflags, whichfork);
2448     + &bma.logflags, whichfork,
2449     + xfs_bmap_local_to_extents_init_fn);
2450     if (error)
2451     goto error0;
2452     }
2453     diff --git a/include/linux/llist.h b/include/linux/llist.h
2454     index d0ab98f..a5199f6 100644
2455     --- a/include/linux/llist.h
2456     +++ b/include/linux/llist.h
2457     @@ -125,31 +125,6 @@ static inline void init_llist_head(struct llist_head *list)
2458     (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
2459    
2460     /**
2461     - * llist_for_each_entry_safe - iterate safely against remove over some entries
2462     - * of lock-less list of given type.
2463     - * @pos: the type * to use as a loop cursor.
2464     - * @n: another type * to use as a temporary storage.
2465     - * @node: the fist entry of deleted list entries.
2466     - * @member: the name of the llist_node with the struct.
2467     - *
2468     - * In general, some entries of the lock-less list can be traversed
2469     - * safely only after being removed from list, so start with an entry
2470     - * instead of list head. This variant allows removal of entries
2471     - * as we iterate.
2472     - *
2473     - * If being used on entries deleted from lock-less list directly, the
2474     - * traverse order is from the newest to the oldest added entry. If
2475     - * you want to traverse from the oldest to the newest, you must
2476     - * reverse the order by yourself before traversing.
2477     - */
2478     -#define llist_for_each_entry_safe(pos, n, node, member) \
2479     - for ((pos) = llist_entry((node), typeof(*(pos)), member), \
2480     - (n) = (pos)->member.next; \
2481     - &(pos)->member != NULL; \
2482     - (pos) = llist_entry(n, typeof(*(pos)), member), \
2483     - (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
2484     -
2485     -/**
2486     * llist_empty - tests whether a lock-less list is empty
2487     * @head: the list to test
2488     *
2489     diff --git a/include/linux/pstore.h b/include/linux/pstore.h
2490     index 1788909..75d0176 100644
2491     --- a/include/linux/pstore.h
2492     +++ b/include/linux/pstore.h
2493     @@ -68,12 +68,18 @@ struct pstore_info {
2494    
2495     #ifdef CONFIG_PSTORE
2496     extern int pstore_register(struct pstore_info *);
2497     +extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
2498     #else
2499     static inline int
2500     pstore_register(struct pstore_info *psi)
2501     {
2502     return -ENODEV;
2503     }
2504     +static inline bool
2505     +pstore_cannot_block_path(enum kmsg_dump_reason reason)
2506     +{
2507     + return false;
2508     +}
2509     #endif
2510    
2511     #endif /*_LINUX_PSTORE_H*/
2512     diff --git a/include/linux/quota.h b/include/linux/quota.h
2513     index 58fdef12..d133711 100644
2514     --- a/include/linux/quota.h
2515     +++ b/include/linux/quota.h
2516     @@ -405,6 +405,7 @@ struct quota_module_name {
2517     #define INIT_QUOTA_MODULE_NAMES {\
2518     {QFMT_VFS_OLD, "quota_v1"},\
2519     {QFMT_VFS_V0, "quota_v2"},\
2520     + {QFMT_VFS_V1, "quota_v2"},\
2521     {0, NULL}}
2522    
2523     #endif /* _QUOTA_ */
2524     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2525     index 4855892..1e23664 100644
2526     --- a/kernel/cgroup.c
2527     +++ b/kernel/cgroup.c
2528     @@ -426,12 +426,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
2529     struct cgroup *cgrp = link->cgrp;
2530     list_del(&link->cg_link_list);
2531     list_del(&link->cgrp_link_list);
2532     +
2533     + /*
2534     + * We may not be holding cgroup_mutex, and if cgrp->count is
2535     + * dropped to 0 the cgroup can be destroyed at any time, hence
2536     + * rcu_read_lock is used to keep it alive.
2537     + */
2538     + rcu_read_lock();
2539     if (atomic_dec_and_test(&cgrp->count) &&
2540     notify_on_release(cgrp)) {
2541     if (taskexit)
2542     set_bit(CGRP_RELEASABLE, &cgrp->flags);
2543     check_for_release(cgrp);
2544     }
2545     + rcu_read_unlock();
2546    
2547     kfree(link);
2548     }
2549     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2550     index 7bb63ee..5bb9bf1 100644
2551     --- a/kernel/cpuset.c
2552     +++ b/kernel/cpuset.c
2553     @@ -2511,8 +2511,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2554    
2555     dentry = task_cs(tsk)->css.cgroup->dentry;
2556     spin_lock(&cpuset_buffer_lock);
2557     - snprintf(cpuset_name, CPUSET_NAME_LEN,
2558     - dentry ? (const char *)dentry->d_name.name : "/");
2559     +
2560     + if (!dentry) {
2561     + strcpy(cpuset_name, "/");
2562     + } else {
2563     + spin_lock(&dentry->d_lock);
2564     + strlcpy(cpuset_name, (const char *)dentry->d_name.name,
2565     + CPUSET_NAME_LEN);
2566     + spin_unlock(&dentry->d_lock);
2567     + }
2568     +
2569     nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2570     tsk->mems_allowed);
2571     printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2572     diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
2573     index 69185ae..e885be1 100644
2574     --- a/kernel/posix-timers.c
2575     +++ b/kernel/posix-timers.c
2576     @@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
2577     {
2578     struct k_itimer *timr;
2579    
2580     + /*
2581     + * timer_t could be any type >= int and we want to make sure any
2582     + * @timer_id outside positive int range fails lookup.
2583     + */
2584     + if ((unsigned long long)timer_id > INT_MAX)
2585     + return NULL;
2586     +
2587     rcu_read_lock();
2588     timr = idr_find(&posix_timers_id, (int)timer_id);
2589     if (timr) {
2590     diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
2591     index 5a63844..0ddf3a0 100644
2592     --- a/kernel/sysctl_binary.c
2593     +++ b/kernel/sysctl_binary.c
2594     @@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
2595    
2596     /* Convert the decnet address to binary */
2597     result = -EIO;
2598     - nodep = strchr(buf, '.') + 1;
2599     + nodep = strchr(buf, '.');
2600     if (!nodep)
2601     goto out;
2602     + ++nodep;
2603    
2604     area = simple_strtoul(buf, NULL, 10);
2605     node = simple_strtoul(nodep, NULL, 10);
2606     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2607     index 41473b4..43defd1 100644
2608     --- a/kernel/trace/ftrace.c
2609     +++ b/kernel/trace/ftrace.c
2610     @@ -3970,37 +3970,51 @@ static void ftrace_init_module(struct module *mod,
2611     ftrace_process_locs(mod, start, end);
2612     }
2613    
2614     -static int ftrace_module_notify(struct notifier_block *self,
2615     - unsigned long val, void *data)
2616     +static int ftrace_module_notify_enter(struct notifier_block *self,
2617     + unsigned long val, void *data)
2618     {
2619     struct module *mod = data;
2620    
2621     - switch (val) {
2622     - case MODULE_STATE_COMING:
2623     + if (val == MODULE_STATE_COMING)
2624     ftrace_init_module(mod, mod->ftrace_callsites,
2625     mod->ftrace_callsites +
2626     mod->num_ftrace_callsites);
2627     - break;
2628     - case MODULE_STATE_GOING:
2629     + return 0;
2630     +}
2631     +
2632     +static int ftrace_module_notify_exit(struct notifier_block *self,
2633     + unsigned long val, void *data)
2634     +{
2635     + struct module *mod = data;
2636     +
2637     + if (val == MODULE_STATE_GOING)
2638     ftrace_release_mod(mod);
2639     - break;
2640     - }
2641    
2642     return 0;
2643     }
2644     #else
2645     -static int ftrace_module_notify(struct notifier_block *self,
2646     - unsigned long val, void *data)
2647     +static int ftrace_module_notify_enter(struct notifier_block *self,
2648     + unsigned long val, void *data)
2649     +{
2650     + return 0;
2651     +}
2652     +static int ftrace_module_notify_exit(struct notifier_block *self,
2653     + unsigned long val, void *data)
2654     {
2655     return 0;
2656     }
2657     #endif /* CONFIG_MODULES */
2658    
2659     -struct notifier_block ftrace_module_nb = {
2660     - .notifier_call = ftrace_module_notify,
2661     +struct notifier_block ftrace_module_enter_nb = {
2662     + .notifier_call = ftrace_module_notify_enter,
2663     .priority = INT_MAX, /* Run before anything that can use kprobes */
2664     };
2665    
2666     +struct notifier_block ftrace_module_exit_nb = {
2667     + .notifier_call = ftrace_module_notify_exit,
2668     + .priority = INT_MIN, /* Run after anything that can remove kprobes */
2669     +};
2670     +
2671     extern unsigned long __start_mcount_loc[];
2672     extern unsigned long __stop_mcount_loc[];
2673    
2674     @@ -4032,9 +4046,13 @@ void __init ftrace_init(void)
2675     __start_mcount_loc,
2676     __stop_mcount_loc);
2677    
2678     - ret = register_module_notifier(&ftrace_module_nb);
2679     + ret = register_module_notifier(&ftrace_module_enter_nb);
2680     + if (ret)
2681     + pr_warning("Failed to register trace ftrace module enter notifier\n");
2682     +
2683     + ret = register_module_notifier(&ftrace_module_exit_nb);
2684     if (ret)
2685     - pr_warning("Failed to register trace ftrace module notifier\n");
2686     + pr_warning("Failed to register trace ftrace module exit notifier\n");
2687    
2688     set_ftrace_early_filters();
2689    
2690     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2691     index 033ad5b..3a3a98f 100644
2692     --- a/kernel/workqueue.c
2693     +++ b/kernel/workqueue.c
2694     @@ -138,6 +138,7 @@ struct worker {
2695     };
2696    
2697     struct work_struct *current_work; /* L: work being processed */
2698     + work_func_t current_func; /* L: current_work's fn */
2699     struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
2700     struct list_head scheduled; /* L: scheduled works */
2701     struct task_struct *task; /* I: worker task */
2702     @@ -910,7 +911,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
2703     struct hlist_node *tmp;
2704    
2705     hlist_for_each_entry(worker, tmp, bwh, hentry)
2706     - if (worker->current_work == work)
2707     + if (worker->current_work == work &&
2708     + worker->current_func == work->func)
2709     return worker;
2710     return NULL;
2711     }
2712     @@ -920,9 +922,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
2713     * @gcwq: gcwq of interest
2714     * @work: work to find worker for
2715     *
2716     - * Find a worker which is executing @work on @gcwq. This function is
2717     - * identical to __find_worker_executing_work() except that this
2718     - * function calculates @bwh itself.
2719     + * Find a worker which is executing @work on @gcwq by searching
2720     + * @gcwq->busy_hash which is keyed by the address of @work. For a worker
2721     + * to match, its current execution should match the address of @work and
2722     + * its work function. This is to avoid unwanted dependency between
2723     + * unrelated work executions through a work item being recycled while still
2724     + * being executed.
2725     + *
2726     + * This is a bit tricky. A work item may be freed once its execution
2727     + * starts and nothing prevents the freed area from being recycled for
2728     + * another work item. If the same work item address ends up being reused
2729     + * before the original execution finishes, workqueue will identify the
2730     + * recycled work item as currently executing and make it wait until the
2731     + * current execution finishes, introducing an unwanted dependency.
2732     + *
2733     + * This function checks the work item address, work function and workqueue
2734     + * to avoid false positives. Note that this isn't complete as one may
2735     + * construct a work function which can introduce dependency onto itself
2736     + * through a recycled work item. Well, if somebody wants to shoot oneself
2737     + * in the foot that badly, there's only so much we can do, and if such
2738     + * deadlock actually occurs, it should be easy to locate the culprit work
2739     + * function.
2740     *
2741     * CONTEXT:
2742     * spin_lock_irq(gcwq->lock).
2743     @@ -2168,7 +2188,6 @@ __acquires(&gcwq->lock)
2744     struct global_cwq *gcwq = pool->gcwq;
2745     struct hlist_head *bwh = busy_worker_head(gcwq, work);
2746     bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
2747     - work_func_t f = work->func;
2748     int work_color;
2749     struct worker *collision;
2750     #ifdef CONFIG_LOCKDEP
2751     @@ -2208,6 +2227,7 @@ __acquires(&gcwq->lock)
2752     debug_work_deactivate(work);
2753     hlist_add_head(&worker->hentry, bwh);
2754     worker->current_work = work;
2755     + worker->current_func = work->func;
2756     worker->current_cwq = cwq;
2757     work_color = get_work_color(work);
2758    
2759     @@ -2240,7 +2260,7 @@ __acquires(&gcwq->lock)
2760     lock_map_acquire_read(&cwq->wq->lockdep_map);
2761     lock_map_acquire(&lockdep_map);
2762     trace_workqueue_execute_start(work);
2763     - f(work);
2764     + worker->current_func(work);
2765     /*
2766     * While we must be careful to not use "work" after this, the trace
2767     * point will only record its address.
2768     @@ -2252,7 +2272,8 @@ __acquires(&gcwq->lock)
2769     if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2770     pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2771     " last function: %pf\n",
2772     - current->comm, preempt_count(), task_pid_nr(current), f);
2773     + current->comm, preempt_count(), task_pid_nr(current),
2774     + worker->current_func);
2775     debug_show_held_locks(current);
2776     dump_stack();
2777     }
2778     @@ -2266,6 +2287,7 @@ __acquires(&gcwq->lock)
2779     /* we're done with it, release */
2780     hlist_del_init(&worker->hentry);
2781     worker->current_work = NULL;
2782     + worker->current_func = NULL;
2783     worker->current_cwq = NULL;
2784     cwq_dec_nr_in_flight(cwq, work_color);
2785     }
2786     diff --git a/lib/idr.c b/lib/idr.c
2787     index 6482390..ca5aa00 100644
2788     --- a/lib/idr.c
2789     +++ b/lib/idr.c
2790     @@ -625,7 +625,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
2791     return p;
2792     }
2793    
2794     - id += 1 << n;
2795     + /*
2796     + * Proceed to the next layer at the current level. Unlike
2797     + * idr_for_each(), @id isn't guaranteed to be aligned to
2798     + * layer boundary at this point and adding 1 << n may
2799     + * incorrectly skip IDs. Make sure we jump to the
2800     + * beginning of the next layer using round_up().
2801     + */
2802     + id = round_up(id + 1, 1 << n);
2803     while (n < fls(id)) {
2804     n += IDR_BITS;
2805     p = *--paa;
2806     diff --git a/mm/mmap.c b/mm/mmap.c
2807     index d1e4124..8832b87 100644
2808     --- a/mm/mmap.c
2809     +++ b/mm/mmap.c
2810     @@ -2169,9 +2169,28 @@ int expand_downwards(struct vm_area_struct *vma,
2811     return error;
2812     }
2813    
2814     +/*
2815     + * Note how expand_stack() refuses to expand the stack all the way to
2816     + * abut the next virtual mapping, *unless* that mapping itself is also
2817     + * a stack mapping. We want to leave room for a guard page, after all
2818     + * (the guard page itself is not added here, that is done by the
2819     + * actual page faulting logic)
2820     + *
2821     + * This matches the behavior of the guard page logic (see mm/memory.c:
2822     + * check_stack_guard_page()), which only allows the guard page to be
2823     + * removed under these circumstances.
2824     + */
2825     #ifdef CONFIG_STACK_GROWSUP
2826     int expand_stack(struct vm_area_struct *vma, unsigned long address)
2827     {
2828     + struct vm_area_struct *next;
2829     +
2830     + address &= PAGE_MASK;
2831     + next = vma->vm_next;
2832     + if (next && next->vm_start == address + PAGE_SIZE) {
2833     + if (!(next->vm_flags & VM_GROWSUP))
2834     + return -ENOMEM;
2835     + }
2836     return expand_upwards(vma, address);
2837     }
2838    
2839     @@ -2194,6 +2213,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2840     #else
2841     int expand_stack(struct vm_area_struct *vma, unsigned long address)
2842     {
2843     + struct vm_area_struct *prev;
2844     +
2845     + address &= PAGE_MASK;
2846     + prev = vma->vm_prev;
2847     + if (prev && prev->vm_end == address) {
2848     + if (!(prev->vm_flags & VM_GROWSDOWN))
2849     + return -ENOMEM;
2850     + }
2851     return expand_downwards(vma, address);
2852     }
2853    
2854     diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
2855     index dbf12ac..2d34b6b 100644
2856     --- a/net/sunrpc/svc.c
2857     +++ b/net/sunrpc/svc.c
2858     @@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
2859    
2860     void svc_shutdown_net(struct svc_serv *serv, struct net *net)
2861     {
2862     - /*
2863     - * The set of xprts (contained in the sv_tempsocks and
2864     - * sv_permsocks lists) is now constant, since it is modified
2865     - * only by accepting new sockets (done by service threads in
2866     - * svc_recv) or aging old ones (done by sv_temptimer), or
2867     - * configuration changes (excluded by whatever locking the
2868     - * caller is using--nfsd_mutex in the case of nfsd). So it's
2869     - * safe to traverse those lists and shut everything down:
2870     - */
2871     svc_close_net(serv, net);
2872    
2873     if (serv->sv_shutdown)
2874     diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
2875     index b8e47fa..ca71056 100644
2876     --- a/net/sunrpc/svc_xprt.c
2877     +++ b/net/sunrpc/svc_xprt.c
2878     @@ -856,7 +856,6 @@ static void svc_age_temp_xprts(unsigned long closure)
2879     struct svc_serv *serv = (struct svc_serv *)closure;
2880     struct svc_xprt *xprt;
2881     struct list_head *le, *next;
2882     - LIST_HEAD(to_be_aged);
2883    
2884     dprintk("svc_age_temp_xprts\n");
2885    
2886     @@ -877,25 +876,15 @@ static void svc_age_temp_xprts(unsigned long closure)
2887     if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
2888     test_bit(XPT_BUSY, &xprt->xpt_flags))
2889     continue;
2890     - svc_xprt_get(xprt);
2891     - list_move(le, &to_be_aged);
2892     + list_del_init(le);
2893     set_bit(XPT_CLOSE, &xprt->xpt_flags);
2894     set_bit(XPT_DETACHED, &xprt->xpt_flags);
2895     - }
2896     - spin_unlock_bh(&serv->sv_lock);
2897     -
2898     - while (!list_empty(&to_be_aged)) {
2899     - le = to_be_aged.next;
2900     - /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
2901     - list_del_init(le);
2902     - xprt = list_entry(le, struct svc_xprt, xpt_list);
2903     -
2904     dprintk("queuing xprt %p for closing\n", xprt);
2905    
2906     /* a thread will dequeue and close it soon */
2907     svc_xprt_enqueue(xprt);
2908     - svc_xprt_put(xprt);
2909     }
2910     + spin_unlock_bh(&serv->sv_lock);
2911    
2912     mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
2913     }
2914     @@ -959,21 +948,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
2915     }
2916     EXPORT_SYMBOL_GPL(svc_close_xprt);
2917    
2918     -static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
2919     +static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
2920     {
2921     struct svc_xprt *xprt;
2922     + int ret = 0;
2923    
2924     spin_lock(&serv->sv_lock);
2925     list_for_each_entry(xprt, xprt_list, xpt_list) {
2926     if (xprt->xpt_net != net)
2927     continue;
2928     + ret++;
2929     set_bit(XPT_CLOSE, &xprt->xpt_flags);
2930     - set_bit(XPT_BUSY, &xprt->xpt_flags);
2931     + svc_xprt_enqueue(xprt);
2932     }
2933     spin_unlock(&serv->sv_lock);
2934     + return ret;
2935     }
2936    
2937     -static void svc_clear_pools(struct svc_serv *serv, struct net *net)
2938     +static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
2939     {
2940     struct svc_pool *pool;
2941     struct svc_xprt *xprt;
2942     @@ -988,42 +980,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
2943     if (xprt->xpt_net != net)
2944     continue;
2945     list_del_init(&xprt->xpt_ready);
2946     + spin_unlock_bh(&pool->sp_lock);
2947     + return xprt;
2948     }
2949     spin_unlock_bh(&pool->sp_lock);
2950     }
2951     + return NULL;
2952     }
2953    
2954     -static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
2955     +static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
2956     {
2957     struct svc_xprt *xprt;
2958     - struct svc_xprt *tmp;
2959     - LIST_HEAD(victims);
2960     -
2961     - spin_lock(&serv->sv_lock);
2962     - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
2963     - if (xprt->xpt_net != net)
2964     - continue;
2965     - list_move(&xprt->xpt_list, &victims);
2966     - }
2967     - spin_unlock(&serv->sv_lock);
2968    
2969     - list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
2970     + while ((xprt = svc_dequeue_net(serv, net))) {
2971     + set_bit(XPT_CLOSE, &xprt->xpt_flags);
2972     svc_delete_xprt(xprt);
2973     + }
2974     }
2975    
2976     +/*
2977     + * Server threads may still be running (especially in the case where the
2978     + * service is still running in other network namespaces).
2979     + *
2980     + * So we shut down sockets the same way we would on a running server, by
2981     + * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
2982     + * the close. In the case there are no such other threads,
2983     + * threads running, svc_clean_up_xprts() does a simple version of a
2984     + * server's main event loop, and in the case where there are other
2985     + * threads, we may need to wait a little while and then check again to
2986     + * see if they're done.
2987     + */
2988     void svc_close_net(struct svc_serv *serv, struct net *net)
2989     {
2990     - svc_close_list(serv, &serv->sv_tempsocks, net);
2991     - svc_close_list(serv, &serv->sv_permsocks, net);
2992     + int delay = 0;
2993    
2994     - svc_clear_pools(serv, net);
2995     - /*
2996     - * At this point the sp_sockets lists will stay empty, since
2997     - * svc_xprt_enqueue will not add new entries without taking the
2998     - * sp_lock and checking XPT_BUSY.
2999     - */
3000     - svc_clear_list(serv, &serv->sv_tempsocks, net);
3001     - svc_clear_list(serv, &serv->sv_permsocks, net);
3002     + while (svc_close_list(serv, &serv->sv_permsocks, net) +
3003     + svc_close_list(serv, &serv->sv_tempsocks, net)) {
3004     +
3005     + svc_clean_up_xprts(serv, net);
3006     + msleep(delay++);
3007     + }
3008     }
3009    
3010     /*
3011     diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
3012     index cdd100d..9febe55 100644
3013     --- a/sound/pci/bt87x.c
3014     +++ b/sound/pci/bt87x.c
3015     @@ -836,6 +836,8 @@ static struct {
3016     {0x7063, 0x2000}, /* pcHDTV HD-2000 TV */
3017     };
3018    
3019     +static struct pci_driver driver;
3020     +
3021     /* return the id of the card, or a negative value if it's blacklisted */
3022     static int snd_bt87x_detect_card(struct pci_dev *pci)
3023     {
3024     @@ -962,11 +964,24 @@ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = {
3025     { }
3026     };
3027    
3028     -static struct pci_driver bt87x_driver = {
3029     +static struct pci_driver driver = {
3030     .name = KBUILD_MODNAME,
3031     .id_table = snd_bt87x_ids,
3032     .probe = snd_bt87x_probe,
3033     .remove = snd_bt87x_remove,
3034     };
3035    
3036     -module_pci_driver(bt87x_driver);
3037     +static int __init alsa_card_bt87x_init(void)
3038     +{
3039     + if (load_all)
3040     + driver.id_table = snd_bt87x_default_ids;
3041     + return pci_register_driver(&driver);
3042     +}
3043     +
3044     +static void __exit alsa_card_bt87x_exit(void)
3045     +{
3046     + pci_unregister_driver(&driver);
3047     +}
3048     +
3049     +module_init(alsa_card_bt87x_init)
3050     +module_exit(alsa_card_bt87x_exit)
3051     diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
3052     index a7c296a..e6b0166 100644
3053     --- a/sound/pci/emu10k1/emu10k1_main.c
3054     +++ b/sound/pci/emu10k1/emu10k1_main.c
3055     @@ -862,6 +862,12 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
3056     filename, emu->firmware->size);
3057     }
3058    
3059     + err = snd_emu1010_load_firmware(emu);
3060     + if (err != 0) {
3061     + snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n");
3062     + return err;
3063     + }
3064     +
3065     /* ID, should read & 0x7f = 0x55 when FPGA programmed. */
3066     snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
3067     if ((reg & 0x3f) != 0x15) {
3068     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3069     index b14813d..c690b2a 100644
3070     --- a/sound/pci/hda/patch_hdmi.c
3071     +++ b/sound/pci/hda/patch_hdmi.c
3072     @@ -1573,6 +1573,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
3073    
3074     if (pcmdev > 0)
3075     sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
3076     + if (!is_jack_detectable(codec, per_pin->pin_nid))
3077     + strncat(hdmi_str, " Phantom",
3078     + sizeof(hdmi_str) - strlen(hdmi_str) - 1);
3079    
3080     return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0);
3081     }