Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.14/0101-3.14.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2437 - (hide annotations) (download)
Thu Jun 5 08:04:56 2014 UTC (9 years, 11 months ago) by niro
File size: 40023 byte(s)
-linux-3.14.5
1 niro 2437 diff --git a/Makefile b/Makefile
2     index 7d0b6992d9ed..b2f7de81e9a2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 14
8     -SUBLEVEL = 1
9     +SUBLEVEL = 2
10     EXTRAVERSION =
11     NAME = Shuffling Zombie Juror
12    
13     diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
14     index 9f7ca266864a..832d05a914ba 100644
15     --- a/arch/x86/kernel/cpu/mshyperv.c
16     +++ b/arch/x86/kernel/cpu/mshyperv.c
17     @@ -26,6 +26,7 @@
18     #include <asm/irq_regs.h>
19     #include <asm/i8259.h>
20     #include <asm/apic.h>
21     +#include <asm/timer.h>
22    
23     struct ms_hyperv_info ms_hyperv;
24     EXPORT_SYMBOL_GPL(ms_hyperv);
25     @@ -105,6 +106,11 @@ static void __init ms_hyperv_init_platform(void)
26    
27     if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
28     clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
29     +
30     +#ifdef CONFIG_X86_IO_APIC
31     + no_timer_check = 1;
32     +#endif
33     +
34     }
35    
36     const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
37     diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
38     index bc4a088f9023..6d7d5a1260a6 100644
39     --- a/arch/x86/kernel/early-quirks.c
40     +++ b/arch/x86/kernel/early-quirks.c
41     @@ -203,18 +203,15 @@ static void __init intel_remapping_check(int num, int slot, int func)
42     revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
43    
44     /*
45     - * Revision 13 of all triggering devices id in this quirk have
46     - * a problem draining interrupts when irq remapping is enabled,
47     - * and should be flagged as broken. Additionally revisions 0x12
48     - * and 0x22 of device id 0x3405 has this problem.
49     + * Revision <= 13 of all triggering devices id in this quirk
50     + * have a problem draining interrupts when irq remapping is
51     + * enabled, and should be flagged as broken. Additionally
52     + * revision 0x22 of device id 0x3405 has this problem.
53     */
54     - if (revision == 0x13)
55     + if (revision <= 0x13)
56     set_irq_remapping_broken();
57     - else if ((device == 0x3405) &&
58     - ((revision == 0x12) ||
59     - (revision == 0x22)))
60     + else if (device == 0x3405 && revision == 0x22)
61     set_irq_remapping_broken();
62     -
63     }
64    
65     /*
66     diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
67     index 714e957a871a..db35594d4df7 100644
68     --- a/drivers/acpi/button.c
69     +++ b/drivers/acpi/button.c
70     @@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
71     input_sync(input);
72    
73     pm_wakeup_event(&device->dev, 0);
74     + acpi_bus_generate_netlink_event(
75     + device->pnp.device_class,
76     + dev_name(&device->dev),
77     + event, ++button->pushed);
78     }
79     break;
80     default:
81     diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
82     index f5e4cd7617f6..61e71616689b 100644
83     --- a/drivers/char/ipmi/ipmi_bt_sm.c
84     +++ b/drivers/char/ipmi/ipmi_bt_sm.c
85     @@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
86    
87     static inline int read_all_bytes(struct si_sm_data *bt)
88     {
89     - unsigned char i;
90     + unsigned int i;
91    
92     /*
93     * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
94     diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
95     index 17ce88f79d2b..f173dd09fce4 100644
96     --- a/drivers/pci/host/pcie-designware.c
97     +++ b/drivers/pci/host/pcie-designware.c
98     @@ -522,13 +522,13 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
99     dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
100     PCIE_ATU_VIEWPORT);
101     dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
102     - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
103     dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
104     dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
105     dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
106     PCIE_ATU_LIMIT);
107     dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
108     dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
109     + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
110     }
111    
112     static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
113     @@ -537,7 +537,6 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
114     dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
115     PCIE_ATU_VIEWPORT);
116     dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
117     - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
118     dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
119     dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
120     dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
121     @@ -545,6 +544,7 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
122     dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
123     dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
124     PCIE_ATU_UPPER_TARGET);
125     + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
126     }
127    
128     static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
129     @@ -553,7 +553,6 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
130     dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
131     PCIE_ATU_VIEWPORT);
132     dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
133     - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
134     dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
135     dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
136     dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
137     @@ -561,6 +560,7 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
138     dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
139     dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
140     PCIE_ATU_UPPER_TARGET);
141     + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
142     }
143    
144     static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
145     @@ -800,7 +800,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
146    
147     /* setup RC BARs */
148     dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
149     - dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
150     + dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
151    
152     /* setup interrupt pins */
153     dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
154     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
155     index 470954aba728..36d1a23f14be 100644
156     --- a/drivers/scsi/sd.c
157     +++ b/drivers/scsi/sd.c
158     @@ -1463,8 +1463,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
159     sd_print_sense_hdr(sdkp, &sshdr);
160     /* we need to evaluate the error return */
161     if (scsi_sense_valid(&sshdr) &&
162     - /* 0x3a is medium not present */
163     - sshdr.asc == 0x3a)
164     + (sshdr.asc == 0x3a || /* medium not present */
165     + sshdr.asc == 0x20)) /* invalid command */
166     /* this is no error here */
167     return 0;
168    
169     diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
170     index 924fce977985..257595016161 100644
171     --- a/drivers/staging/comedi/comedi_buf.c
172     +++ b/drivers/staging/comedi/comedi_buf.c
173     @@ -61,6 +61,8 @@ static void __comedi_buf_free(struct comedi_device *dev,
174     struct comedi_subdevice *s)
175     {
176     struct comedi_async *async = s->async;
177     + struct comedi_buf_map *bm;
178     + unsigned long flags;
179    
180     if (async->prealloc_buf) {
181     vunmap(async->prealloc_buf);
182     @@ -68,8 +70,11 @@ static void __comedi_buf_free(struct comedi_device *dev,
183     async->prealloc_bufsz = 0;
184     }
185    
186     - comedi_buf_map_put(async->buf_map);
187     + spin_lock_irqsave(&s->spin_lock, flags);
188     + bm = async->buf_map;
189     async->buf_map = NULL;
190     + spin_unlock_irqrestore(&s->spin_lock, flags);
191     + comedi_buf_map_put(bm);
192     }
193    
194     static void __comedi_buf_alloc(struct comedi_device *dev,
195     @@ -80,6 +85,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
196     struct page **pages = NULL;
197     struct comedi_buf_map *bm;
198     struct comedi_buf_page *buf;
199     + unsigned long flags;
200     unsigned i;
201    
202     if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
203     @@ -92,8 +98,10 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
204     if (!bm)
205     return;
206    
207     - async->buf_map = bm;
208     kref_init(&bm->refcount);
209     + spin_lock_irqsave(&s->spin_lock, flags);
210     + async->buf_map = bm;
211     + spin_unlock_irqrestore(&s->spin_lock, flags);
212     bm->dma_dir = s->async_dma_dir;
213     if (bm->dma_dir != DMA_NONE)
214     /* Need ref to hardware device to free buffer later. */
215     @@ -127,7 +135,9 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
216    
217     pages[i] = virt_to_page(buf->virt_addr);
218     }
219     + spin_lock_irqsave(&s->spin_lock, flags);
220     bm->n_pages = i;
221     + spin_unlock_irqrestore(&s->spin_lock, flags);
222    
223     /* vmap the prealloc_buf if all the pages were allocated */
224     if (i == n_pages)
225     @@ -150,6 +160,29 @@ int comedi_buf_map_put(struct comedi_buf_map *bm)
226     return 1;
227     }
228    
229     +/* returns s->async->buf_map and increments its kref refcount */
230     +struct comedi_buf_map *
231     +comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
232     +{
233     + struct comedi_async *async = s->async;
234     + struct comedi_buf_map *bm = NULL;
235     + unsigned long flags;
236     +
237     + if (!async)
238     + return NULL;
239     +
240     + spin_lock_irqsave(&s->spin_lock, flags);
241     + bm = async->buf_map;
242     + /* only want it if buffer pages allocated */
243     + if (bm && bm->n_pages)
244     + comedi_buf_map_get(bm);
245     + else
246     + bm = NULL;
247     + spin_unlock_irqrestore(&s->spin_lock, flags);
248     +
249     + return bm;
250     +}
251     +
252     bool comedi_buf_is_mmapped(struct comedi_async *async)
253     {
254     struct comedi_buf_map *bm = async->buf_map;
255     diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
256     index c22c617b0da1..eae3ee139330 100644
257     --- a/drivers/staging/comedi/comedi_fops.c
258     +++ b/drivers/staging/comedi/comedi_fops.c
259     @@ -1923,14 +1923,21 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
260     struct comedi_device *dev = file->private_data;
261     struct comedi_subdevice *s;
262     struct comedi_async *async;
263     - struct comedi_buf_map *bm;
264     + struct comedi_buf_map *bm = NULL;
265     unsigned long start = vma->vm_start;
266     unsigned long size;
267     int n_pages;
268     int i;
269     int retval;
270    
271     - mutex_lock(&dev->mutex);
272     + /*
273     + * 'trylock' avoids circular dependency with current->mm->mmap_sem
274     + * and down-reading &dev->attach_lock should normally succeed without
275     + * contention unless the device is in the process of being attached
276     + * or detached.
277     + */
278     + if (!down_read_trylock(&dev->attach_lock))
279     + return -EAGAIN;
280    
281     if (!dev->attached) {
282     dev_dbg(dev->class_dev, "no driver attached\n");
283     @@ -1970,7 +1977,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
284     }
285    
286     n_pages = size >> PAGE_SHIFT;
287     - bm = async->buf_map;
288     +
289     + /* get reference to current buf map (if any) */
290     + bm = comedi_buf_map_from_subdev_get(s);
291     if (!bm || n_pages > bm->n_pages) {
292     retval = -EINVAL;
293     goto done;
294     @@ -1994,7 +2003,8 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
295    
296     retval = 0;
297     done:
298     - mutex_unlock(&dev->mutex);
299     + up_read(&dev->attach_lock);
300     + comedi_buf_map_put(bm); /* put reference to buf map - okay if NULL */
301     return retval;
302     }
303    
304     diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
305     index 9a746570f161..a492f2d2436e 100644
306     --- a/drivers/staging/comedi/comedi_internal.h
307     +++ b/drivers/staging/comedi/comedi_internal.h
308     @@ -19,6 +19,8 @@ void comedi_buf_reset(struct comedi_async *async);
309     bool comedi_buf_is_mmapped(struct comedi_async *async);
310     void comedi_buf_map_get(struct comedi_buf_map *bm);
311     int comedi_buf_map_put(struct comedi_buf_map *bm);
312     +struct comedi_buf_map *comedi_buf_map_from_subdev_get(
313     + struct comedi_subdevice *s);
314     unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
315     void comedi_device_cancel_all(struct comedi_device *dev);
316    
317     diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
318     index 8a57c3c1ade0..1097dc6a3086 100644
319     --- a/drivers/staging/comedi/drivers/8255_pci.c
320     +++ b/drivers/staging/comedi/drivers/8255_pci.c
321     @@ -56,6 +56,7 @@ Configuration Options: not applicable, uses PCI auto config
322     #include "../comedidev.h"
323    
324     #include "8255.h"
325     +#include "mite.h"
326    
327     enum pci_8255_boardid {
328     BOARD_ADLINK_PCI7224,
329     @@ -79,6 +80,7 @@ struct pci_8255_boardinfo {
330     const char *name;
331     int dio_badr;
332     int n_8255;
333     + unsigned int has_mite:1;
334     };
335    
336     static const struct pci_8255_boardinfo pci_8255_boards[] = {
337     @@ -126,36 +128,43 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
338     .name = "ni_pci-dio-96",
339     .dio_badr = 1,
340     .n_8255 = 4,
341     + .has_mite = 1,
342     },
343     [BOARD_NI_PCIDIO96B] = {
344     .name = "ni_pci-dio-96b",
345     .dio_badr = 1,
346     .n_8255 = 4,
347     + .has_mite = 1,
348     },
349     [BOARD_NI_PXI6508] = {
350     .name = "ni_pxi-6508",
351     .dio_badr = 1,
352     .n_8255 = 4,
353     + .has_mite = 1,
354     },
355     [BOARD_NI_PCI6503] = {
356     .name = "ni_pci-6503",
357     .dio_badr = 1,
358     .n_8255 = 1,
359     + .has_mite = 1,
360     },
361     [BOARD_NI_PCI6503B] = {
362     .name = "ni_pci-6503b",
363     .dio_badr = 1,
364     .n_8255 = 1,
365     + .has_mite = 1,
366     },
367     [BOARD_NI_PCI6503X] = {
368     .name = "ni_pci-6503x",
369     .dio_badr = 1,
370     .n_8255 = 1,
371     + .has_mite = 1,
372     },
373     [BOARD_NI_PXI_6503] = {
374     .name = "ni_pxi-6503",
375     .dio_badr = 1,
376     .n_8255 = 1,
377     + .has_mite = 1,
378     },
379     };
380    
381     @@ -163,6 +172,25 @@ struct pci_8255_private {
382     void __iomem *mmio_base;
383     };
384    
385     +static int pci_8255_mite_init(struct pci_dev *pcidev)
386     +{
387     + void __iomem *mite_base;
388     + u32 main_phys_addr;
389     +
390     + /* ioremap the MITE registers (BAR 0) temporarily */
391     + mite_base = pci_ioremap_bar(pcidev, 0);
392     + if (!mite_base)
393     + return -ENOMEM;
394     +
395     + /* set data window to main registers (BAR 1) */
396     + main_phys_addr = pci_resource_start(pcidev, 1);
397     + writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR);
398     +
399     + /* finished with MITE registers */
400     + iounmap(mite_base);
401     + return 0;
402     +}
403     +
404     static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase)
405     {
406     void __iomem *mmio_base = (void __iomem *)iobase;
407     @@ -201,6 +229,12 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
408     if (ret)
409     return ret;
410    
411     + if (board->has_mite) {
412     + ret = pci_8255_mite_init(pcidev);
413     + if (ret)
414     + return ret;
415     + }
416     +
417     is_mmio = (pci_resource_flags(pcidev, board->dio_badr) &
418     IORESOURCE_MEM) != 0;
419     if (is_mmio) {
420     diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
421     index ebd5bff0f5c1..17ee3bf0926b 100644
422     --- a/drivers/tty/ipwireless/tty.c
423     +++ b/drivers/tty/ipwireless/tty.c
424     @@ -176,9 +176,6 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
425     ": %d chars not inserted to flip buffer!\n",
426     length - work);
427    
428     - /*
429     - * This may sleep if ->low_latency is set
430     - */
431     if (work)
432     tty_flip_buffer_push(&tty->port);
433     }
434     diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
435     index 765125dff20e..8ebd9f88a6f6 100644
436     --- a/drivers/tty/tty_buffer.c
437     +++ b/drivers/tty/tty_buffer.c
438     @@ -351,14 +351,11 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
439     * Takes any pending buffers and transfers their ownership to the
440     * ldisc side of the queue. It then schedules those characters for
441     * processing by the line discipline.
442     - * Note that this function can only be used when the low_latency flag
443     - * is unset. Otherwise the workqueue won't be flushed.
444     */
445    
446     void tty_schedule_flip(struct tty_port *port)
447     {
448     struct tty_bufhead *buf = &port->buf;
449     - WARN_ON(port->low_latency);
450    
451     buf->tail->commit = buf->tail->used;
452     schedule_work(&buf->work);
453     @@ -482,17 +479,15 @@ static void flush_to_ldisc(struct work_struct *work)
454     */
455     void tty_flush_to_ldisc(struct tty_struct *tty)
456     {
457     - if (!tty->port->low_latency)
458     - flush_work(&tty->port->buf.work);
459     + flush_work(&tty->port->buf.work);
460     }
461    
462     /**
463     * tty_flip_buffer_push - terminal
464     * @port: tty port to push
465     *
466     - * Queue a push of the terminal flip buffers to the line discipline. This
467     - * function must not be called from IRQ context if port->low_latency is
468     - * set.
469     + * Queue a push of the terminal flip buffers to the line discipline.
470     + * Can be called from IRQ/atomic context.
471     *
472     * In the event of the queue being busy for flipping the work will be
473     * held off and retried later.
474     @@ -500,14 +495,7 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
475    
476     void tty_flip_buffer_push(struct tty_port *port)
477     {
478     - struct tty_bufhead *buf = &port->buf;
479     -
480     - buf->tail->commit = buf->tail->used;
481     -
482     - if (port->low_latency)
483     - flush_to_ldisc(&buf->work);
484     - else
485     - schedule_work(&buf->work);
486     + tty_schedule_flip(port);
487     }
488     EXPORT_SYMBOL(tty_flip_buffer_push);
489    
490     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
491     index c74a00ad7add..d3448a90f0f9 100644
492     --- a/drivers/tty/tty_io.c
493     +++ b/drivers/tty/tty_io.c
494     @@ -1271,12 +1271,13 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
495     *
496     * Locking: None
497     */
498     -static void tty_line_name(struct tty_driver *driver, int index, char *p)
499     +static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
500     {
501     if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
502     - strcpy(p, driver->name);
503     + return sprintf(p, "%s", driver->name);
504     else
505     - sprintf(p, "%s%d", driver->name, index + driver->name_base);
506     + return sprintf(p, "%s%d", driver->name,
507     + index + driver->name_base);
508     }
509    
510     /**
511     @@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev,
512     if (i >= ARRAY_SIZE(cs))
513     break;
514     }
515     - while (i--)
516     - count += sprintf(buf + count, "%s%d%c",
517     - cs[i]->name, cs[i]->index, i ? ' ':'\n');
518     + while (i--) {
519     + int index = cs[i]->index;
520     + struct tty_driver *drv = cs[i]->device(cs[i], &index);
521     +
522     + /* don't resolve tty0 as some programs depend on it */
523     + if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
524     + count += tty_line_name(drv, index, buf + count);
525     + else
526     + count += sprintf(buf + count, "%s%d",
527     + cs[i]->name, cs[i]->index);
528     +
529     + count += sprintf(buf + count, "%c", i ? ' ':'\n');
530     + }
531     console_unlock();
532    
533     return count;
534     diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
535     index b369292d4b90..ad0aca812002 100644
536     --- a/drivers/usb/gadget/u_serial.c
537     +++ b/drivers/usb/gadget/u_serial.c
538     @@ -549,8 +549,8 @@ static void gs_rx_push(unsigned long _port)
539     port->read_started--;
540     }
541    
542     - /* Push from tty to ldisc; without low_latency set this is handled by
543     - * a workqueue, so we won't get callbacks and can hold port_lock
544     + /* Push from tty to ldisc; this is handled by a workqueue,
545     + * so we won't get callbacks and can hold port_lock
546     */
547     if (do_push)
548     tty_flip_buffer_push(&port->port);
549     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
550     index 81ea55314b1f..9a527a1826df 100644
551     --- a/fs/btrfs/disk-io.c
552     +++ b/fs/btrfs/disk-io.c
553     @@ -3244,6 +3244,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
554     /* send down all the barriers */
555     head = &info->fs_devices->devices;
556     list_for_each_entry_rcu(dev, head, dev_list) {
557     + if (dev->missing)
558     + continue;
559     if (!dev->bdev) {
560     errors_send++;
561     continue;
562     @@ -3258,6 +3260,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
563    
564     /* wait for all the barriers */
565     list_for_each_entry_rcu(dev, head, dev_list) {
566     + if (dev->missing)
567     + continue;
568     if (!dev->bdev) {
569     errors_wait++;
570     continue;
571     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
572     index 32312e09f0f5..3c8e68da9ef8 100644
573     --- a/fs/btrfs/extent-tree.c
574     +++ b/fs/btrfs/extent-tree.c
575     @@ -2444,7 +2444,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
576     spin_unlock(&locked_ref->lock);
577     spin_lock(&delayed_refs->lock);
578     spin_lock(&locked_ref->lock);
579     - if (rb_first(&locked_ref->ref_root)) {
580     + if (rb_first(&locked_ref->ref_root) ||
581     + locked_ref->extent_op) {
582     spin_unlock(&locked_ref->lock);
583     spin_unlock(&delayed_refs->lock);
584     continue;
585     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
586     index 34cd83184c4a..b05bf58b9395 100644
587     --- a/fs/btrfs/transaction.c
588     +++ b/fs/btrfs/transaction.c
589     @@ -683,7 +683,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
590     int lock = (trans->type != TRANS_JOIN_NOLOCK);
591     int err = 0;
592    
593     - if (--trans->use_count) {
594     + if (trans->use_count > 1) {
595     + trans->use_count--;
596     trans->block_rsv = trans->orig_rsv;
597     return 0;
598     }
599     @@ -731,17 +732,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
600     }
601    
602     if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
603     - if (throttle) {
604     - /*
605     - * We may race with somebody else here so end up having
606     - * to call end_transaction on ourselves again, so inc
607     - * our use_count.
608     - */
609     - trans->use_count++;
610     + if (throttle)
611     return btrfs_commit_transaction(trans, root);
612     - } else {
613     + else
614     wake_up_process(info->transaction_kthread);
615     - }
616     }
617    
618     if (trans->type & __TRANS_FREEZABLE)
619     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
620     index 74bc2d549c58..47188916dd8d 100644
621     --- a/fs/ext4/extents.c
622     +++ b/fs/ext4/extents.c
623     @@ -2585,6 +2585,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
624     ex_ee_block = le32_to_cpu(ex->ee_block);
625     ex_ee_len = ext4_ext_get_actual_len(ex);
626    
627     + /*
628     + * If we're starting with an extent other than the last one in the
629     + * node, we need to see if it shares a cluster with the extent to
630     + * the right (towards the end of the file). If its leftmost cluster
631     + * is this extent's rightmost cluster and it is not cluster aligned,
632     + * we'll mark it as a partial that is not to be deallocated.
633     + */
634     +
635     + if (ex != EXT_LAST_EXTENT(eh)) {
636     + ext4_fsblk_t current_pblk, right_pblk;
637     + long long current_cluster, right_cluster;
638     +
639     + current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
640     + current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
641     + right_pblk = ext4_ext_pblock(ex + 1);
642     + right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
643     + if (current_cluster == right_cluster &&
644     + EXT4_PBLK_COFF(sbi, right_pblk))
645     + *partial_cluster = -right_cluster;
646     + }
647     +
648     trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
649    
650     while (ex >= EXT_FIRST_EXTENT(eh) &&
651     @@ -2710,10 +2731,15 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
652     err = ext4_ext_correct_indexes(handle, inode, path);
653    
654     /*
655     - * Free the partial cluster only if the current extent does not
656     - * reference it. Otherwise we might free used cluster.
657     + * If there's a partial cluster and at least one extent remains in
658     + * the leaf, free the partial cluster if it isn't shared with the
659     + * current extent. If there's a partial cluster and no extents
660     + * remain in the leaf, it can't be freed here. It can only be
661     + * freed when it's possible to determine if it's not shared with
662     + * any other extent - when the next leaf is processed or when space
663     + * removal is complete.
664     */
665     - if (*partial_cluster > 0 &&
666     + if (*partial_cluster > 0 && eh->eh_entries &&
667     (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
668     *partial_cluster)) {
669     int flags = get_default_free_blocks_flags(inode);
670     @@ -4128,7 +4154,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
671     struct ext4_extent newex, *ex, *ex2;
672     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
673     ext4_fsblk_t newblock = 0;
674     - int free_on_err = 0, err = 0, depth;
675     + int free_on_err = 0, err = 0, depth, ret;
676     unsigned int allocated = 0, offset = 0;
677     unsigned int allocated_clusters = 0;
678     struct ext4_allocation_request ar;
679     @@ -4189,9 +4215,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
680     if (!ext4_ext_is_uninitialized(ex))
681     goto out;
682    
683     - allocated = ext4_ext_handle_uninitialized_extents(
684     + ret = ext4_ext_handle_uninitialized_extents(
685     handle, inode, map, path, flags,
686     allocated, newblock);
687     + if (ret < 0)
688     + err = ret;
689     + else
690     + allocated = ret;
691     goto out3;
692     }
693     }
694     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
695     index d754e3cf99a8..a16315957ef3 100644
696     --- a/fs/fs-writeback.c
697     +++ b/fs/fs-writeback.c
698     @@ -89,16 +89,29 @@ static inline struct inode *wb_inode(struct list_head *head)
699     #define CREATE_TRACE_POINTS
700     #include <trace/events/writeback.h>
701    
702     +static void bdi_wakeup_thread(struct backing_dev_info *bdi)
703     +{
704     + spin_lock_bh(&bdi->wb_lock);
705     + if (test_bit(BDI_registered, &bdi->state))
706     + mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
707     + spin_unlock_bh(&bdi->wb_lock);
708     +}
709     +
710     static void bdi_queue_work(struct backing_dev_info *bdi,
711     struct wb_writeback_work *work)
712     {
713     trace_writeback_queue(bdi, work);
714    
715     spin_lock_bh(&bdi->wb_lock);
716     + if (!test_bit(BDI_registered, &bdi->state)) {
717     + if (work->done)
718     + complete(work->done);
719     + goto out_unlock;
720     + }
721     list_add_tail(&work->list, &bdi->work_list);
722     - spin_unlock_bh(&bdi->wb_lock);
723     -
724     mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
725     +out_unlock:
726     + spin_unlock_bh(&bdi->wb_lock);
727     }
728    
729     static void
730     @@ -114,7 +127,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
731     work = kzalloc(sizeof(*work), GFP_ATOMIC);
732     if (!work) {
733     trace_writeback_nowork(bdi);
734     - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
735     + bdi_wakeup_thread(bdi);
736     return;
737     }
738    
739     @@ -161,7 +174,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
740     * writeback as soon as there is no other work to do.
741     */
742     trace_writeback_wake_background(bdi);
743     - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
744     + bdi_wakeup_thread(bdi);
745     }
746    
747     /*
748     @@ -1017,7 +1030,7 @@ void bdi_writeback_workfn(struct work_struct *work)
749     current->flags |= PF_SWAPWRITE;
750    
751     if (likely(!current_is_workqueue_rescuer() ||
752     - list_empty(&bdi->bdi_list))) {
753     + !test_bit(BDI_registered, &bdi->state))) {
754     /*
755     * The normal path. Keep writing back @bdi until its
756     * work_list is empty. Note that this path is also taken
757     @@ -1039,10 +1052,10 @@ void bdi_writeback_workfn(struct work_struct *work)
758     trace_writeback_pages_written(pages_written);
759     }
760    
761     - if (!list_empty(&bdi->work_list) ||
762     - (wb_has_dirty_io(wb) && dirty_writeback_interval))
763     - queue_delayed_work(bdi_wq, &wb->dwork,
764     - msecs_to_jiffies(dirty_writeback_interval * 10));
765     + if (!list_empty(&bdi->work_list))
766     + mod_delayed_work(bdi_wq, &wb->dwork, 0);
767     + else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
768     + bdi_wakeup_thread_delayed(bdi);
769    
770     current->flags &= ~PF_SWAPWRITE;
771     }
772     diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
773     index 16a5047903a6..406d9cc84ba8 100644
774     --- a/fs/jffs2/compr_rtime.c
775     +++ b/fs/jffs2/compr_rtime.c
776     @@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
777     unsigned char *cpage_out,
778     uint32_t *sourcelen, uint32_t *dstlen)
779     {
780     - short positions[256];
781     + unsigned short positions[256];
782     int outpos = 0;
783     int pos=0;
784    
785     @@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
786     unsigned char *cpage_out,
787     uint32_t srclen, uint32_t destlen)
788     {
789     - short positions[256];
790     + unsigned short positions[256];
791     int outpos = 0;
792     int pos=0;
793    
794     diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
795     index e4619b00f7c5..fa35ff79ab35 100644
796     --- a/fs/jffs2/nodelist.h
797     +++ b/fs/jffs2/nodelist.h
798     @@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
799     uint32_t version;
800     uint32_t data_crc;
801     uint32_t partial_crc;
802     - uint16_t csize;
803     + uint32_t csize;
804     uint16_t overlapped;
805     };
806    
807     diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
808     index 03310721712f..b6bd4affd9ad 100644
809     --- a/fs/jffs2/nodemgmt.c
810     +++ b/fs/jffs2/nodemgmt.c
811     @@ -179,6 +179,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
812     spin_unlock(&c->erase_completion_lock);
813    
814     schedule();
815     + remove_wait_queue(&c->erase_wait, &wait);
816     } else
817     spin_unlock(&c->erase_completion_lock);
818     } else if (ret)
819     @@ -211,20 +212,25 @@ out:
820     int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
821     uint32_t *len, uint32_t sumsize)
822     {
823     - int ret = -EAGAIN;
824     + int ret;
825     minsize = PAD(minsize);
826    
827     jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
828    
829     - spin_lock(&c->erase_completion_lock);
830     - while(ret == -EAGAIN) {
831     + while (true) {
832     + spin_lock(&c->erase_completion_lock);
833     ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
834     if (ret) {
835     jffs2_dbg(1, "%s(): looping, ret is %d\n",
836     __func__, ret);
837     }
838     + spin_unlock(&c->erase_completion_lock);
839     +
840     + if (ret == -EAGAIN)
841     + cond_resched();
842     + else
843     + break;
844     }
845     - spin_unlock(&c->erase_completion_lock);
846     if (!ret)
847     ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
848    
849     diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
850     index bd6e18be6e1a..39c0143fb3af 100644
851     --- a/fs/kernfs/dir.c
852     +++ b/fs/kernfs/dir.c
853     @@ -37,7 +37,7 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
854     hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
855     hash &= 0x7fffffffU;
856     /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
857     - if (hash < 1)
858     + if (hash < 2)
859     hash += 2;
860     if (hash >= INT_MAX)
861     hash = INT_MAX - 1;
862     diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
863     index e55126f85bd2..553946c9d952 100644
864     --- a/fs/kernfs/inode.c
865     +++ b/fs/kernfs/inode.c
866     @@ -48,14 +48,18 @@ void __init kernfs_inode_init(void)
867    
868     static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
869     {
870     + static DEFINE_MUTEX(iattr_mutex);
871     + struct kernfs_iattrs *ret;
872     struct iattr *iattrs;
873    
874     + mutex_lock(&iattr_mutex);
875     +
876     if (kn->iattr)
877     - return kn->iattr;
878     + goto out_unlock;
879    
880     kn->iattr = kzalloc(sizeof(struct kernfs_iattrs), GFP_KERNEL);
881     if (!kn->iattr)
882     - return NULL;
883     + goto out_unlock;
884     iattrs = &kn->iattr->ia_iattr;
885    
886     /* assign default attributes */
887     @@ -65,8 +69,10 @@ static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
888     iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME;
889    
890     simple_xattrs_init(&kn->iattr->xattrs);
891     -
892     - return kn->iattr;
893     +out_unlock:
894     + ret = kn->iattr;
895     + mutex_unlock(&iattr_mutex);
896     + return ret;
897     }
898    
899     static int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
900     diff --git a/fs/posix_acl.c b/fs/posix_acl.c
901     index 11c54fd51e16..9e363e41dacc 100644
902     --- a/fs/posix_acl.c
903     +++ b/fs/posix_acl.c
904     @@ -723,7 +723,7 @@ posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
905     void *buffer, size_t size)
906     {
907     posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
908     - posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
909     + posix_acl_xattr_entry *ext_entry;
910     int real_size, n;
911    
912     real_size = posix_acl_xattr_size(acl->a_count);
913     @@ -731,7 +731,8 @@ posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
914     return real_size;
915     if (real_size > size)
916     return -ERANGE;
917     -
918     +
919     + ext_entry = ext_acl->a_entries;
920     ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
921    
922     for (n=0; n < acl->a_count; n++, ext_entry++) {
923     diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
924     index 796272a2e129..e69d57be866b 100644
925     --- a/fs/xfs/xfs_da_btree.c
926     +++ b/fs/xfs/xfs_da_btree.c
927     @@ -1295,7 +1295,7 @@ xfs_da3_fixhashpath(
928     node = blk->bp->b_addr;
929     dp->d_ops->node_hdr_from_disk(&nodehdr, node);
930     btree = dp->d_ops->node_tree_p(node);
931     - if (be32_to_cpu(btree->hashval) == lasthash)
932     + if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
933     break;
934     blk->hashval = lasthash;
935     btree[blk->index].hashval = cpu_to_be32(lasthash);
936     diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
937     index 24819001f5c8..e488e9459a93 100644
938     --- a/include/linux/backing-dev.h
939     +++ b/include/linux/backing-dev.h
940     @@ -95,7 +95,7 @@ struct backing_dev_info {
941     unsigned int max_ratio, max_prop_frac;
942    
943     struct bdi_writeback wb; /* default writeback info for this bdi */
944     - spinlock_t wb_lock; /* protects work_list */
945     + spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
946    
947     struct list_head work_list;
948    
949     diff --git a/include/linux/tty.h b/include/linux/tty.h
950     index 90b4fdc8a61f..b90b5c221ff0 100644
951     --- a/include/linux/tty.h
952     +++ b/include/linux/tty.h
953     @@ -208,7 +208,7 @@ struct tty_port {
954     wait_queue_head_t delta_msr_wait; /* Modem status change */
955     unsigned long flags; /* TTY flags ASY_*/
956     unsigned char console:1, /* port is a console */
957     - low_latency:1; /* direct buffer flush */
958     + low_latency:1; /* optional: tune for latency */
959     struct mutex mutex; /* Locking */
960     struct mutex buf_mutex; /* Buffer alloc lock */
961     unsigned char *xmit_buf; /* Optional buffer */
962     diff --git a/kernel/exit.c b/kernel/exit.c
963     index 1e77fc645317..81b3d6789ee8 100644
964     --- a/kernel/exit.c
965     +++ b/kernel/exit.c
966     @@ -560,9 +560,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
967     struct list_head *dead)
968     {
969     list_move_tail(&p->sibling, &p->real_parent->children);
970     -
971     - if (p->exit_state == EXIT_DEAD)
972     - return;
973     /*
974     * If this is a threaded reparent there is no need to
975     * notify anyone anything has happened.
976     @@ -570,9 +567,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
977     if (same_thread_group(p->real_parent, father))
978     return;
979    
980     - /* We don't want people slaying init. */
981     + /*
982     + * We don't want people slaying init.
983     + *
984     + * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
985     + * can change ->exit_state to EXIT_ZOMBIE. If this is the final
986     + * state, do_notify_parent() was already called and ->exit_signal
987     + * doesn't matter.
988     + */
989     p->exit_signal = SIGCHLD;
990    
991     + if (p->exit_state == EXIT_DEAD)
992     + return;
993     +
994     /* If it has exited notify the new parent about this child's death. */
995     if (!p->ptrace &&
996     p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
997     @@ -784,6 +791,8 @@ void do_exit(long code)
998     exit_shm(tsk);
999     exit_files(tsk);
1000     exit_fs(tsk);
1001     + if (group_dead)
1002     + disassociate_ctty(1);
1003     exit_task_namespaces(tsk);
1004     exit_task_work(tsk);
1005     check_stack_usage();
1006     @@ -799,13 +808,9 @@ void do_exit(long code)
1007    
1008     cgroup_exit(tsk, 1);
1009    
1010     - if (group_dead)
1011     - disassociate_ctty(1);
1012     -
1013     module_put(task_thread_info(tsk)->exec_domain->module);
1014    
1015     proc_exit_connector(tsk);
1016     -
1017     /*
1018     * FIXME: do that only when needed, using sched_exit tracepoint
1019     */
1020     diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
1021     index 06c62de9c711..db95d8eb761b 100644
1022     --- a/kernel/pid_namespace.c
1023     +++ b/kernel/pid_namespace.c
1024     @@ -318,7 +318,9 @@ static void *pidns_get(struct task_struct *task)
1025     struct pid_namespace *ns;
1026    
1027     rcu_read_lock();
1028     - ns = get_pid_ns(task_active_pid_ns(task));
1029     + ns = task_active_pid_ns(task);
1030     + if (ns)
1031     + get_pid_ns(ns);
1032     rcu_read_unlock();
1033    
1034     return ns;
1035     diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1036     index dd06439b9c84..80a57afd8647 100644
1037     --- a/kernel/user_namespace.c
1038     +++ b/kernel/user_namespace.c
1039     @@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
1040    
1041     /* Find the matching extent */
1042     extents = map->nr_extents;
1043     - smp_read_barrier_depends();
1044     + smp_rmb();
1045     for (idx = 0; idx < extents; idx++) {
1046     first = map->extent[idx].first;
1047     last = first + map->extent[idx].count - 1;
1048     @@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
1049    
1050     /* Find the matching extent */
1051     extents = map->nr_extents;
1052     - smp_read_barrier_depends();
1053     + smp_rmb();
1054     for (idx = 0; idx < extents; idx++) {
1055     first = map->extent[idx].first;
1056     last = first + map->extent[idx].count - 1;
1057     @@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
1058    
1059     /* Find the matching extent */
1060     extents = map->nr_extents;
1061     - smp_read_barrier_depends();
1062     + smp_rmb();
1063     for (idx = 0; idx < extents; idx++) {
1064     first = map->extent[idx].lower_first;
1065     last = first + map->extent[idx].count - 1;
1066     @@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
1067     * were written before the count of the extents.
1068     *
1069     * To achieve this smp_wmb() is used on guarantee the write
1070     - * order and smp_read_barrier_depends() is guaranteed that we
1071     - * don't have crazy architectures returning stale data.
1072     - *
1073     + * order and smp_rmb() is guaranteed that we don't have crazy
1074     + * architectures returning stale data.
1075     */
1076     mutex_lock(&id_map_mutex);
1077    
1078     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1079     index ce682f7a4f29..09d9591b7708 100644
1080     --- a/mm/backing-dev.c
1081     +++ b/mm/backing-dev.c
1082     @@ -288,13 +288,19 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
1083     * Note, we wouldn't bother setting up the timer, but this function is on the
1084     * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
1085     * by delaying the wake-up.
1086     + *
1087     + * We have to be careful not to postpone flush work if it is scheduled for
1088     + * earlier. Thus we use queue_delayed_work().
1089     */
1090     void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
1091     {
1092     unsigned long timeout;
1093    
1094     timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
1095     - mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
1096     + spin_lock_bh(&bdi->wb_lock);
1097     + if (test_bit(BDI_registered, &bdi->state))
1098     + queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
1099     + spin_unlock_bh(&bdi->wb_lock);
1100     }
1101    
1102     /*
1103     @@ -307,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
1104     spin_unlock_bh(&bdi_lock);
1105    
1106     synchronize_rcu_expedited();
1107     -
1108     - /* bdi_list is now unused, clear it to mark @bdi dying */
1109     - INIT_LIST_HEAD(&bdi->bdi_list);
1110     }
1111    
1112     int bdi_register(struct backing_dev_info *bdi, struct device *parent,
1113     @@ -360,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
1114     */
1115     bdi_remove_from_list(bdi);
1116    
1117     + /* Make sure nobody queues further work */
1118     + spin_lock_bh(&bdi->wb_lock);
1119     + clear_bit(BDI_registered, &bdi->state);
1120     + spin_unlock_bh(&bdi->wb_lock);
1121     +
1122     /*
1123     * Drain work list and shutdown the delayed_work. At this point,
1124     * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
1125     diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1126     index 5f812455a450..60828cf02eb8 100644
1127     --- a/net/bluetooth/hci_event.c
1128     +++ b/net/bluetooth/hci_event.c
1129     @@ -3593,7 +3593,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1130    
1131     hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
1132    
1133     - if (ltk->type & HCI_SMP_STK) {
1134     + /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
1135     + * temporary key used to encrypt a connection following
1136     + * pairing. It is used during the Encrypted Session Setup to
1137     + * distribute the keys. Later, security can be re-established
1138     + * using a distributed LTK.
1139     + */
1140     + if (ltk->type == HCI_SMP_STK_SLAVE) {
1141     list_del(&ltk->list);
1142     kfree(ltk);
1143     }
1144     diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
1145     index 0356e1d437ca..f79fa8be203c 100644
1146     --- a/security/integrity/ima/ima.h
1147     +++ b/security/integrity/ima/ima.h
1148     @@ -27,7 +27,7 @@
1149     #include "../integrity.h"
1150    
1151     enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
1152     - IMA_SHOW_ASCII };
1153     + IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
1154     enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
1155    
1156     /* digest size for IMA, fits SHA1 or MD5 */
1157     diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
1158     index db01125926bd..468a3ba3c539 100644
1159     --- a/security/integrity/ima/ima_fs.c
1160     +++ b/security/integrity/ima/ima_fs.c
1161     @@ -160,6 +160,8 @@ static int ima_measurements_show(struct seq_file *m, void *v)
1162    
1163     if (is_ima_template && strcmp(field->field_id, "d") == 0)
1164     show = IMA_SHOW_BINARY_NO_FIELD_LEN;
1165     + if (is_ima_template && strcmp(field->field_id, "n") == 0)
1166     + show = IMA_SHOW_BINARY_OLD_STRING_FMT;
1167     field->field_show(m, show, &e->template_data[i]);
1168     }
1169     return 0;
1170     diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
1171     index 1683bbf289a4..e8592e7bfc21 100644
1172     --- a/security/integrity/ima/ima_template_lib.c
1173     +++ b/security/integrity/ima/ima_template_lib.c
1174     @@ -109,13 +109,16 @@ static void ima_show_template_data_binary(struct seq_file *m,
1175     enum data_formats datafmt,
1176     struct ima_field_data *field_data)
1177     {
1178     + u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
1179     + strlen(field_data->data) : field_data->len;
1180     +
1181     if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
1182     - ima_putc(m, &field_data->len, sizeof(u32));
1183     + ima_putc(m, &len, sizeof(len));
1184    
1185     - if (!field_data->len)
1186     + if (!len)
1187     return;
1188    
1189     - ima_putc(m, field_data->data, field_data->len);
1190     + ima_putc(m, field_data->data, len);
1191     }
1192    
1193     static void ima_show_template_field_data(struct seq_file *m,
1194     @@ -129,6 +132,7 @@ static void ima_show_template_field_data(struct seq_file *m,
1195     break;
1196     case IMA_SHOW_BINARY:
1197     case IMA_SHOW_BINARY_NO_FIELD_LEN:
1198     + case IMA_SHOW_BINARY_OLD_STRING_FMT:
1199     ima_show_template_data_binary(m, show, datafmt, field_data);
1200     break;
1201     default: