Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0114-4.9.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 10 months ago) by niro
File size: 96906 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Makefile b/Makefile
2     index 5e7706e94622..03df4fcacdf2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 14
9     +SUBLEVEL = 15
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
14     index 602af692efdc..6bcbbece082b 100644
15     --- a/arch/s390/include/asm/processor.h
16     +++ b/arch/s390/include/asm/processor.h
17     @@ -89,7 +89,8 @@ extern void execve_tail(void);
18     * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
19     */
20    
21     -#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
22     +#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
23     + (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
24     #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
25     (1UL << 30) : (1UL << 41))
26     #define TASK_SIZE TASK_SIZE_OF(current)
27     diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
28     index f9293bfefb7f..408b4f4fda0f 100644
29     --- a/arch/s390/kernel/crash_dump.c
30     +++ b/arch/s390/kernel/crash_dump.c
31     @@ -329,7 +329,11 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
32    
33     static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
34     {
35     - return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME);
36     + const char *note_name = "LINUX";
37     +
38     + if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
39     + note_name = KEXEC_CORE_NOTE_NAME;
40     + return nt_init_name(buf, type, desc, d_len, note_name);
41     }
42    
43     /*
44     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
45     index d027f2eb3559..e974e53ab597 100644
46     --- a/arch/s390/kernel/setup.c
47     +++ b/arch/s390/kernel/setup.c
48     @@ -819,10 +819,10 @@ static void __init setup_randomness(void)
49     {
50     struct sysinfo_3_2_2 *vmms;
51    
52     - vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
53     - if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
54     - add_device_randomness(&vmms, vmms->count);
55     - free_page((unsigned long) vmms);
56     + vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
57     + if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
58     + add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
59     + memblock_free((unsigned long) vmms, PAGE_SIZE);
60     }
61    
62     /*
63     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
64     index 47a1de77b18d..5ba494ed18c1 100644
65     --- a/arch/s390/kvm/kvm-s390.c
66     +++ b/arch/s390/kvm/kvm-s390.c
67     @@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
68     struct kvm_memory_slot *memslot;
69     int is_dirty = 0;
70    
71     + if (kvm_is_ucontrol(kvm))
72     + return -EINVAL;
73     +
74     mutex_lock(&kvm->slots_lock);
75    
76     r = -EINVAL;
77     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
78     index 6fa85944af83..fc5abff9b7fd 100644
79     --- a/arch/x86/include/asm/tlbflush.h
80     +++ b/arch/x86/include/asm/tlbflush.h
81     @@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
82    
83     static inline void __flush_tlb_all(void)
84     {
85     - if (static_cpu_has(X86_FEATURE_PGE))
86     + if (boot_cpu_has(X86_FEATURE_PGE))
87     __flush_tlb_global();
88     else
89     __flush_tlb();
90     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
91     index 64774f419c72..69b8f8a5ecb0 100644
92     --- a/arch/x86/kvm/vmx.c
93     +++ b/arch/x86/kvm/vmx.c
94     @@ -3693,7 +3693,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
95     }
96    
97     vmcs_write16(sf->selector, var.selector);
98     - vmcs_write32(sf->base, var.base);
99     + vmcs_writel(sf->base, var.base);
100     vmcs_write32(sf->limit, var.limit);
101     vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
102     }
103     @@ -8202,7 +8202,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
104     static void vmx_dump_sel(char *name, uint32_t sel)
105     {
106     pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
107     - name, vmcs_read32(sel),
108     + name, vmcs_read16(sel),
109     vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
110     vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
111     vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
112     diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
113     index 0d4fb3ebbbac..1680768d392c 100644
114     --- a/arch/x86/mm/gup.c
115     +++ b/arch/x86/mm/gup.c
116     @@ -120,6 +120,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
117     return 0;
118     }
119    
120     + if (!pte_allows_gup(pte_val(pte), write)) {
121     + pte_unmap(ptep);
122     + return 0;
123     + }
124     +
125     if (pte_devmap(pte)) {
126     pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
127     if (unlikely(!pgmap)) {
128     @@ -127,8 +132,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
129     pte_unmap(ptep);
130     return 0;
131     }
132     - } else if (!pte_allows_gup(pte_val(pte), write) ||
133     - pte_special(pte)) {
134     + } else if (pte_special(pte)) {
135     pte_unmap(ptep);
136     return 0;
137     }
138     diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
139     index 32cdc2c52e98..a45d32abea26 100644
140     --- a/arch/xtensa/kernel/setup.c
141     +++ b/arch/xtensa/kernel/setup.c
142     @@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
143    
144     __tagtable(BP_TAG_INITRD, parse_tag_initrd);
145    
146     +#endif /* CONFIG_BLK_DEV_INITRD */
147     +
148     #ifdef CONFIG_OF
149    
150     static int __init parse_tag_fdt(const bp_tag_t *tag)
151     @@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
152    
153     #endif /* CONFIG_OF */
154    
155     -#endif /* CONFIG_BLK_DEV_INITRD */
156     -
157     static int __init parse_tag_cmdline(const bp_tag_t* tag)
158     {
159     strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
160     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
161     index 6eb6733a7a5c..d1664df001f8 100644
162     --- a/drivers/acpi/nfit/core.c
163     +++ b/drivers/acpi/nfit/core.c
164     @@ -1603,7 +1603,7 @@ static size_t sizeof_nfit_set_info(int num_mappings)
165     + num_mappings * sizeof(struct nfit_set_info_map);
166     }
167    
168     -static int cmp_map(const void *m0, const void *m1)
169     +static int cmp_map_compat(const void *m0, const void *m1)
170     {
171     const struct nfit_set_info_map *map0 = m0;
172     const struct nfit_set_info_map *map1 = m1;
173     @@ -1612,6 +1612,14 @@ static int cmp_map(const void *m0, const void *m1)
174     sizeof(u64));
175     }
176    
177     +static int cmp_map(const void *m0, const void *m1)
178     +{
179     + const struct nfit_set_info_map *map0 = m0;
180     + const struct nfit_set_info_map *map1 = m1;
181     +
182     + return map0->region_offset - map1->region_offset;
183     +}
184     +
185     /* Retrieve the nth entry referencing this spa */
186     static struct acpi_nfit_memory_map *memdev_from_spa(
187     struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
188     @@ -1667,6 +1675,12 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
189     sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
190     cmp_map, NULL);
191     nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
192     +
193     + /* support namespaces created with the wrong sort order */
194     + sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
195     + cmp_map_compat, NULL);
196     + nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
197     +
198     ndr_desc->nd_set = nd_set;
199     devm_kfree(dev, info);
200    
201     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
202     index fadba88745dc..b793853ff05f 100644
203     --- a/drivers/bluetooth/ath3k.c
204     +++ b/drivers/bluetooth/ath3k.c
205     @@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
206     { USB_DEVICE(0x04CA, 0x300f) },
207     { USB_DEVICE(0x04CA, 0x3010) },
208     { USB_DEVICE(0x04CA, 0x3014) },
209     + { USB_DEVICE(0x04CA, 0x3018) },
210     { USB_DEVICE(0x0930, 0x0219) },
211     { USB_DEVICE(0x0930, 0x021c) },
212     { USB_DEVICE(0x0930, 0x0220) },
213     @@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
214     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
215     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
216     { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
217     + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
218     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
219     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
220     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
221     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
222     index 2f633df9f4e6..dd220fad366c 100644
223     --- a/drivers/bluetooth/btusb.c
224     +++ b/drivers/bluetooth/btusb.c
225     @@ -209,6 +209,7 @@ static const struct usb_device_id blacklist_table[] = {
226     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
227     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
228     { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
229     + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
230     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
231     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
232     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
233     diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
234     index b9629b2bfc05..d1651a50c349 100644
235     --- a/drivers/dma/imx-sdma.c
236     +++ b/drivers/dma/imx-sdma.c
237     @@ -298,6 +298,7 @@ struct sdma_engine;
238     * @event_id1 for channels that use 2 events
239     * @word_size peripheral access size
240     * @buf_tail ID of the buffer that was processed
241     + * @buf_ptail ID of the previous buffer that was processed
242     * @num_bd max NUM_BD. number of descriptors currently handling
243     */
244     struct sdma_channel {
245     @@ -309,6 +310,7 @@ struct sdma_channel {
246     unsigned int event_id1;
247     enum dma_slave_buswidth word_size;
248     unsigned int buf_tail;
249     + unsigned int buf_ptail;
250     unsigned int num_bd;
251     unsigned int period_len;
252     struct sdma_buffer_descriptor *bd;
253     @@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
254     sdmac->chn_real_count = bd->mode.count;
255     bd->mode.status |= BD_DONE;
256     bd->mode.count = sdmac->period_len;
257     + sdmac->buf_ptail = sdmac->buf_tail;
258     + sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
259    
260     /*
261     * The callback is called from the interrupt context in order
262     @@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
263    
264     dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
265    
266     - sdmac->buf_tail++;
267     - sdmac->buf_tail %= sdmac->num_bd;
268     -
269     if (error)
270     sdmac->status = old_status;
271     }
272     @@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
273     sdmac->flags = 0;
274    
275     sdmac->buf_tail = 0;
276     + sdmac->buf_ptail = 0;
277     + sdmac->chn_real_count = 0;
278    
279     dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
280     sg_len, channel);
281     @@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
282     sdmac->status = DMA_IN_PROGRESS;
283    
284     sdmac->buf_tail = 0;
285     + sdmac->buf_ptail = 0;
286     + sdmac->chn_real_count = 0;
287     sdmac->period_len = period_len;
288    
289     sdmac->flags |= IMX_DMA_SG_LOOP;
290     @@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
291     u32 residue;
292    
293     if (sdmac->flags & IMX_DMA_SG_LOOP)
294     - residue = (sdmac->num_bd - sdmac->buf_tail) *
295     + residue = (sdmac->num_bd - sdmac->buf_ptail) *
296     sdmac->period_len - sdmac->chn_real_count;
297     else
298     residue = sdmac->chn_count - sdmac->chn_real_count;
299     diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
300     index 7ddc32127d88..64a1df62cc65 100644
301     --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
302     +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
303     @@ -3814,9 +3814,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
304     default:
305     encoder->possible_crtcs = 0x3;
306     break;
307     + case 3:
308     + encoder->possible_crtcs = 0x7;
309     + break;
310     case 4:
311     encoder->possible_crtcs = 0xf;
312     break;
313     + case 5:
314     + encoder->possible_crtcs = 0x1f;
315     + break;
316     case 6:
317     encoder->possible_crtcs = 0x3f;
318     break;
319     diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
320     index 810c51d92b99..30672a3df8a9 100644
321     --- a/drivers/gpu/drm/ast/ast_post.c
322     +++ b/drivers/gpu/drm/ast/ast_post.c
323     @@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev)
324     /* TODO 1180 */
325     } else {
326     ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
327     - if (ch) {
328     - ast_open_key(ast);
329     - ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
330     - return ch & 0x04;
331     - }
332     + return !!(ch & 0x01);
333     }
334     - return 0;
335     + return false;
336     }
337    
338     static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
339     @@ -375,8 +371,8 @@ void ast_post_gpu(struct drm_device *dev)
340     pci_write_config_dword(ast->dev->pdev, 0x04, reg);
341    
342     ast_enable_vga(dev);
343     - ast_enable_mmio(dev);
344     ast_open_key(ast);
345     + ast_enable_mmio(dev);
346     ast_set_def_ext_reg(dev);
347    
348     if (ast->chip == AST2300 || ast->chip == AST2400)
349     @@ -1630,12 +1626,44 @@ static void ast_init_dram_2300(struct drm_device *dev)
350     temp |= 0x73;
351     ast_write32(ast, 0x12008, temp);
352    
353     + param.dram_freq = 396;
354     param.dram_type = AST_DDR3;
355     + temp = ast_mindwm(ast, 0x1e6e2070);
356     if (temp & 0x01000000)
357     param.dram_type = AST_DDR2;
358     - param.dram_chipid = ast->dram_type;
359     - param.dram_freq = ast->mclk;
360     - param.vram_size = ast->vram_size;
361     + switch (temp & 0x18000000) {
362     + case 0:
363     + param.dram_chipid = AST_DRAM_512Mx16;
364     + break;
365     + default:
366     + case 0x08000000:
367     + param.dram_chipid = AST_DRAM_1Gx16;
368     + break;
369     + case 0x10000000:
370     + param.dram_chipid = AST_DRAM_2Gx16;
371     + break;
372     + case 0x18000000:
373     + param.dram_chipid = AST_DRAM_4Gx16;
374     + break;
375     + }
376     + switch (temp & 0x0c) {
377     + default:
378     + case 0x00:
379     + param.vram_size = AST_VIDMEM_SIZE_8M;
380     + break;
381     +
382     + case 0x04:
383     + param.vram_size = AST_VIDMEM_SIZE_16M;
384     + break;
385     +
386     + case 0x08:
387     + param.vram_size = AST_VIDMEM_SIZE_32M;
388     + break;
389     +
390     + case 0x0c:
391     + param.vram_size = AST_VIDMEM_SIZE_64M;
392     + break;
393     + }
394    
395     if (param.dram_type == AST_DDR3) {
396     get_ddr3_info(ast, &param);
397     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
398     index a05bb3891119..2e42a0584a84 100644
399     --- a/drivers/gpu/drm/drm_atomic_helper.c
400     +++ b/drivers/gpu/drm/drm_atomic_helper.c
401     @@ -362,7 +362,7 @@ mode_fixup(struct drm_atomic_state *state)
402     struct drm_connector *connector;
403     struct drm_connector_state *conn_state;
404     int i;
405     - bool ret;
406     + int ret;
407    
408     for_each_crtc_in_state(state, crtc, crtc_state, i) {
409     if (!crtc_state->mode_changed &&
410     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
411     index ec77bd3e1f08..7491180698d1 100644
412     --- a/drivers/gpu/drm/drm_edid.c
413     +++ b/drivers/gpu/drm/drm_edid.c
414     @@ -145,6 +145,9 @@ static struct edid_quirk {
415    
416     /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
417     { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
418     +
419     + /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
420     + { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
421     };
422    
423     /*
424     diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
425     index 6c75e62c0b22..6a48d6637e5c 100644
426     --- a/drivers/gpu/drm/drm_fb_helper.c
427     +++ b/drivers/gpu/drm/drm_fb_helper.c
428     @@ -848,6 +848,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
429     if (!drm_fbdev_emulation)
430     return;
431    
432     + cancel_work_sync(&fb_helper->resume_work);
433     + cancel_work_sync(&fb_helper->dirty_work);
434     +
435     if (!list_empty(&fb_helper->kernel_fb_list)) {
436     list_del(&fb_helper->kernel_fb_list);
437     if (list_empty(&kernel_fb_helper_list)) {
438     diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
439     index 9a71ed546b90..f46aac1e85fb 100644
440     --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
441     +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
442     @@ -415,6 +415,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
443    
444     mutex_init(&dev_priv->mm.stolen_lock);
445    
446     + if (intel_vgpu_active(dev_priv)) {
447     + DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
448     + return 0;
449     + }
450     +
451     #ifdef CONFIG_INTEL_IOMMU
452     if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
453     DRM_INFO("DMAR active, disabling use of stolen memory\n");
454     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
455     index 055525013d2f..7b06280b23aa 100644
456     --- a/drivers/gpu/drm/i915/intel_dp.c
457     +++ b/drivers/gpu/drm/i915/intel_dp.c
458     @@ -2832,6 +2832,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
459     enum pipe pipe = intel_dp->pps_pipe;
460     i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
461    
462     + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
463     + return;
464     +
465     edp_panel_vdd_off_sync(intel_dp);
466    
467     /*
468     @@ -2859,9 +2862,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
469    
470     lockdep_assert_held(&dev_priv->pps_mutex);
471    
472     - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
473     - return;
474     -
475     for_each_intel_encoder(dev, encoder) {
476     struct intel_dp *intel_dp;
477     enum port port;
478     diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
479     index 7acbbbf97833..4534e4cadccf 100644
480     --- a/drivers/gpu/drm/i915/intel_opregion.c
481     +++ b/drivers/gpu/drm/i915/intel_opregion.c
482     @@ -1031,7 +1031,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
483     opregion->vbt_size = vbt_size;
484     } else {
485     vbt = base + OPREGION_VBT_OFFSET;
486     - vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
487     + /*
488     + * The VBT specification says that if the ASLE ext
489     + * mailbox is not used its area is reserved, but
490     + * on some CHT boards the VBT extends into the
491     + * ASLE ext area. Allow this even though it is
492     + * against the spec, so we do not end up rejecting
493     + * the VBT on those boards (and end up not finding the
494     + * LCD panel because of this).
495     + */
496     + vbt_size = (mboxes & MBOX_ASLE_EXT) ?
497     + OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
498     + vbt_size -= OPREGION_VBT_OFFSET;
499     if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
500     DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
501     opregion->vbt = vbt;
502     diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
503     index 8fc088843e55..89cf0090feac 100644
504     --- a/drivers/gpu/drm/imx/imx-tve.c
505     +++ b/drivers/gpu/drm/imx/imx-tve.c
506     @@ -98,6 +98,8 @@
507     /* TVE_TST_MODE_REG */
508     #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
509    
510     +#define IMX_TVE_DAC_VOLTAGE 2750000
511     +
512     enum {
513     TVE_MODE_TVOUT,
514     TVE_MODE_VGA,
515     @@ -628,9 +630,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
516    
517     tve->dac_reg = devm_regulator_get(dev, "dac");
518     if (!IS_ERR(tve->dac_reg)) {
519     - ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
520     - if (ret)
521     - return ret;
522     + if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE)
523     + dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE);
524     ret = regulator_enable(tve->dac_reg);
525     if (ret)
526     return ret;
527     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
528     index fc6217dfe401..35cc16f9fec9 100644
529     --- a/drivers/gpu/drm/ttm/ttm_bo.c
530     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
531     @@ -1654,7 +1654,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
532     struct ttm_buffer_object *bo;
533     int ret = -EBUSY;
534     int put_count;
535     - uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
536    
537     spin_lock(&glob->lru_lock);
538     list_for_each_entry(bo, &glob->swap_lru, swap) {
539     @@ -1685,7 +1684,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
540     * Move to system cached
541     */
542    
543     - if ((bo->mem.placement & swap_placement) != swap_placement) {
544     + if (bo->mem.mem_type != TTM_PL_SYSTEM ||
545     + bo->ttm->caching_state != tt_cached) {
546     struct ttm_mem_reg evict_mem;
547    
548     evict_mem = bo->mem;
549     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
550     index 18061a4bc2f2..36005bdf3749 100644
551     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
552     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
553     @@ -199,9 +199,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
554     VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
555     vmw_present_readback_ioctl,
556     DRM_MASTER | DRM_AUTH),
557     + /*
558     + * The permissions of the below ioctl are overridden in
559     + * vmw_generic_ioctl(). We require either
560     + * DRM_MASTER or capable(CAP_SYS_ADMIN).
561     + */
562     VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
563     vmw_kms_update_layout_ioctl,
564     - DRM_MASTER | DRM_CONTROL_ALLOW),
565     + DRM_RENDER_ALLOW),
566     VMW_IOCTL_DEF(VMW_CREATE_SHADER,
567     vmw_shader_define_ioctl,
568     DRM_AUTH | DRM_RENDER_ALLOW),
569     @@ -1125,6 +1130,10 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
570    
571     return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
572     _IOC_SIZE(cmd));
573     + } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
574     + if (!drm_is_current_master(file_priv) &&
575     + !capable(CAP_SYS_ADMIN))
576     + return -EACCES;
577     }
578    
579     if (unlikely(ioctl->cmd != cmd))
580     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
581     index 1e59a486bba8..59ff4197173a 100644
582     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
583     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
584     @@ -41,9 +41,9 @@
585     #include <drm/ttm/ttm_module.h>
586     #include "vmwgfx_fence.h"
587    
588     -#define VMWGFX_DRIVER_DATE "20160210"
589     +#define VMWGFX_DRIVER_DATE "20170221"
590     #define VMWGFX_DRIVER_MAJOR 2
591     -#define VMWGFX_DRIVER_MINOR 11
592     +#define VMWGFX_DRIVER_MINOR 12
593     #define VMWGFX_DRIVER_PATCHLEVEL 0
594     #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
595     #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
596     diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
597     index 6e49a4dd99c0..e0a8216ecf2b 100644
598     --- a/drivers/hv/hv.c
599     +++ b/drivers/hv/hv.c
600     @@ -220,7 +220,7 @@ int hv_init(void)
601     /* See if the hypercall page is already set */
602     rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
603    
604     - virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
605     + virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
606    
607     if (!virtaddr)
608     goto cleanup;
609     diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
610     index 729b0696626e..d61fd2c727c0 100644
611     --- a/drivers/infiniband/hw/mlx5/srq.c
612     +++ b/drivers/infiniband/hw/mlx5/srq.c
613     @@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
614     int err;
615     int i;
616     struct mlx5_wqe_srq_next_seg *next;
617     - int page_shift;
618     - int npages;
619    
620     err = mlx5_db_alloc(dev->mdev, &srq->db);
621     if (err) {
622     @@ -179,7 +177,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
623     err = -ENOMEM;
624     goto err_db;
625     }
626     - page_shift = srq->buf.page_shift;
627    
628     srq->head = 0;
629     srq->tail = srq->msrq.max - 1;
630     @@ -191,10 +188,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
631     cpu_to_be16((i + 1) & (srq->msrq.max - 1));
632     }
633    
634     - npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
635     - mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
636     - buf_size, page_shift, srq->buf.npages, npages);
637     - in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
638     + mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
639     + in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages);
640     if (!in->pas) {
641     err = -ENOMEM;
642     goto err_buf;
643     @@ -210,7 +205,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
644     }
645     srq->wq_sig = !!srq_signature;
646    
647     - in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
648     + in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
649     if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
650     in->type == IB_SRQT_XRC)
651     in->user_index = MLX5_IB_DEFAULT_UIDX;
652     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
653     index 81a8080c18b3..0616a65f0d78 100644
654     --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
655     +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
656     @@ -1511,12 +1511,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
657    
658     ret = ipoib_set_mode(dev, buf);
659    
660     - rtnl_unlock();
661     -
662     - if (!ret)
663     - return count;
664     + /* The assumption is that the function ipoib_set_mode returned
665     + * with the rtnl held by it, if not the value -EBUSY returned,
666     + * then no need to rtnl_unlock
667     + */
668     + if (ret != -EBUSY)
669     + rtnl_unlock();
670    
671     - return ret;
672     + return (!ret || ret == -EBUSY) ? count : ret;
673     }
674    
675     static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
676     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
677     index b58d9dca5c93..3ef7b8f049c4 100644
678     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
679     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
680     @@ -468,8 +468,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
681     priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
682    
683     ipoib_flush_paths(dev);
684     - rtnl_lock();
685     - return 0;
686     + return (!rtnl_trylock()) ? -EBUSY : 0;
687     }
688    
689     if (!strcmp(buf, "datagram\n")) {
690     @@ -478,8 +477,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
691     dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
692     rtnl_unlock();
693     ipoib_flush_paths(dev);
694     - rtnl_lock();
695     - return 0;
696     + return (!rtnl_trylock()) ? -EBUSY : 0;
697     }
698    
699     return -EINVAL;
700     @@ -703,6 +701,14 @@ int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
701     return ret;
702     }
703    
704     +static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
705     +{
706     + struct ipoib_pseudo_header *phdr;
707     +
708     + phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
709     + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
710     +}
711     +
712     void ipoib_flush_paths(struct net_device *dev)
713     {
714     struct ipoib_dev_priv *priv = netdev_priv(dev);
715     @@ -927,8 +933,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
716     }
717     if (skb_queue_len(&neigh->queue) <
718     IPOIB_MAX_PATH_REC_QUEUE) {
719     - /* put pseudoheader back on for next time */
720     - skb_push(skb, IPOIB_PSEUDO_LEN);
721     + push_pseudo_header(skb, neigh->daddr);
722     __skb_queue_tail(&neigh->queue, skb);
723     } else {
724     ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
725     @@ -946,10 +951,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
726    
727     if (!path->query && path_rec_start(dev, path))
728     goto err_path;
729     - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
730     + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
731     + push_pseudo_header(skb, neigh->daddr);
732     __skb_queue_tail(&neigh->queue, skb);
733     - else
734     + } else {
735     goto err_drop;
736     + }
737     }
738    
739     spin_unlock_irqrestore(&priv->lock, flags);
740     @@ -985,8 +992,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
741     }
742     if (path) {
743     if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
744     - /* put pseudoheader back on for next time */
745     - skb_push(skb, IPOIB_PSEUDO_LEN);
746     + push_pseudo_header(skb, phdr->hwaddr);
747     __skb_queue_tail(&path->queue, skb);
748     } else {
749     ++dev->stats.tx_dropped;
750     @@ -1018,8 +1024,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
751     return;
752     } else if ((path->query || !path_rec_start(dev, path)) &&
753     skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
754     - /* put pseudoheader back on for next time */
755     - skb_push(skb, IPOIB_PSEUDO_LEN);
756     + push_pseudo_header(skb, phdr->hwaddr);
757     __skb_queue_tail(&path->queue, skb);
758     } else {
759     ++dev->stats.tx_dropped;
760     @@ -1100,8 +1105,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
761     }
762    
763     if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
764     - /* put pseudoheader back on for next time */
765     - skb_push(skb, sizeof(*phdr));
766     + push_pseudo_header(skb, phdr->hwaddr);
767     spin_lock_irqsave(&priv->lock, flags);
768     __skb_queue_tail(&neigh->queue, skb);
769     spin_unlock_irqrestore(&priv->lock, flags);
770     @@ -1133,7 +1137,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
771     unsigned short type,
772     const void *daddr, const void *saddr, unsigned len)
773     {
774     - struct ipoib_pseudo_header *phdr;
775     struct ipoib_header *header;
776    
777     header = (struct ipoib_header *) skb_push(skb, sizeof *header);
778     @@ -1146,8 +1149,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
779     * destination address into skb hard header so we can figure out where
780     * to send the packet later.
781     */
782     - phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
783     - memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
784     + push_pseudo_header(skb, daddr);
785    
786     return IPOIB_HARD_LEN;
787     }
788     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
789     index e7dcf14a76e2..1eee8f7e75ca 100644
790     --- a/drivers/infiniband/ulp/srp/ib_srp.c
791     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
792     @@ -366,7 +366,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
793     struct srp_fr_desc *d;
794     struct ib_mr *mr;
795     int i, ret = -EINVAL;
796     - enum ib_mr_type mr_type;
797    
798     if (pool_size <= 0)
799     goto err;
800     @@ -380,13 +379,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
801     spin_lock_init(&pool->lock);
802     INIT_LIST_HEAD(&pool->free_list);
803    
804     - if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
805     - mr_type = IB_MR_TYPE_SG_GAPS;
806     - else
807     - mr_type = IB_MR_TYPE_MEM_REG;
808     -
809     for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
810     - mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
811     + mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
812     + max_page_list_len);
813     if (IS_ERR(mr)) {
814     ret = PTR_ERR(mr);
815     goto destroy_pool;
816     @@ -1877,17 +1872,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
817     if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
818     spin_lock_irqsave(&ch->lock, flags);
819     ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
820     + if (rsp->tag == ch->tsk_mgmt_tag) {
821     + ch->tsk_mgmt_status = -1;
822     + if (be32_to_cpu(rsp->resp_data_len) >= 4)
823     + ch->tsk_mgmt_status = rsp->data[3];
824     + complete(&ch->tsk_mgmt_done);
825     + } else {
826     + shost_printk(KERN_ERR, target->scsi_host,
827     + "Received tsk mgmt response too late for tag %#llx\n",
828     + rsp->tag);
829     + }
830     spin_unlock_irqrestore(&ch->lock, flags);
831     -
832     - ch->tsk_mgmt_status = -1;
833     - if (be32_to_cpu(rsp->resp_data_len) >= 4)
834     - ch->tsk_mgmt_status = rsp->data[3];
835     - complete(&ch->tsk_mgmt_done);
836     } else {
837     scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
838     - if (scmnd) {
839     + if (scmnd && scmnd->host_scribble) {
840     req = (void *)scmnd->host_scribble;
841     scmnd = srp_claim_req(ch, req, NULL, scmnd);
842     + } else {
843     + scmnd = NULL;
844     }
845     if (!scmnd) {
846     shost_printk(KERN_ERR, target->scsi_host,
847     @@ -2519,19 +2521,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
848     }
849    
850     static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
851     - u8 func)
852     + u8 func, u8 *status)
853     {
854     struct srp_target_port *target = ch->target;
855     struct srp_rport *rport = target->rport;
856     struct ib_device *dev = target->srp_host->srp_dev->dev;
857     struct srp_iu *iu;
858     struct srp_tsk_mgmt *tsk_mgmt;
859     + int res;
860    
861     if (!ch->connected || target->qp_in_error)
862     return -1;
863    
864     - init_completion(&ch->tsk_mgmt_done);
865     -
866     /*
867     * Lock the rport mutex to avoid that srp_create_ch_ib() is
868     * invoked while a task management function is being sent.
869     @@ -2554,10 +2555,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
870    
871     tsk_mgmt->opcode = SRP_TSK_MGMT;
872     int_to_scsilun(lun, &tsk_mgmt->lun);
873     - tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
874     tsk_mgmt->tsk_mgmt_func = func;
875     tsk_mgmt->task_tag = req_tag;
876    
877     + spin_lock_irq(&ch->lock);
878     + ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
879     + tsk_mgmt->tag = ch->tsk_mgmt_tag;
880     + spin_unlock_irq(&ch->lock);
881     +
882     + init_completion(&ch->tsk_mgmt_done);
883     +
884     ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
885     DMA_TO_DEVICE);
886     if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
887     @@ -2566,13 +2573,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
888    
889     return -1;
890     }
891     + res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
892     + msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
893     + if (res > 0 && status)
894     + *status = ch->tsk_mgmt_status;
895     mutex_unlock(&rport->mutex);
896    
897     - if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
898     - msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
899     - return -1;
900     + WARN_ON_ONCE(res < 0);
901    
902     - return 0;
903     + return res > 0 ? 0 : -1;
904     }
905    
906     static int srp_abort(struct scsi_cmnd *scmnd)
907     @@ -2598,7 +2607,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
908     shost_printk(KERN_ERR, target->scsi_host,
909     "Sending SRP abort for tag %#x\n", tag);
910     if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
911     - SRP_TSK_ABORT_TASK) == 0)
912     + SRP_TSK_ABORT_TASK, NULL) == 0)
913     ret = SUCCESS;
914     else if (target->rport->state == SRP_RPORT_LOST)
915     ret = FAST_IO_FAIL;
916     @@ -2616,14 +2625,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
917     struct srp_target_port *target = host_to_target(scmnd->device->host);
918     struct srp_rdma_ch *ch;
919     int i;
920     + u8 status;
921    
922     shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
923    
924     ch = &target->ch[0];
925     if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
926     - SRP_TSK_LUN_RESET))
927     + SRP_TSK_LUN_RESET, &status))
928     return FAILED;
929     - if (ch->tsk_mgmt_status)
930     + if (status)
931     return FAILED;
932    
933     for (i = 0; i < target->ch_count; i++) {
934     @@ -2652,9 +2662,8 @@ static int srp_slave_alloc(struct scsi_device *sdev)
935     struct Scsi_Host *shost = sdev->host;
936     struct srp_target_port *target = host_to_target(shost);
937     struct srp_device *srp_dev = target->srp_host->srp_dev;
938     - struct ib_device *ibdev = srp_dev->dev;
939    
940     - if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
941     + if (true)
942     blk_queue_virt_boundary(sdev->request_queue,
943     ~srp_dev->mr_page_mask);
944    
945     diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
946     index 21c69695f9d4..32ed40db3ca2 100644
947     --- a/drivers/infiniband/ulp/srp/ib_srp.h
948     +++ b/drivers/infiniband/ulp/srp/ib_srp.h
949     @@ -163,6 +163,7 @@ struct srp_rdma_ch {
950     int max_ti_iu_len;
951     int comp_vector;
952    
953     + u64 tsk_mgmt_tag;
954     struct completion tsk_mgmt_done;
955     u8 tsk_mgmt_status;
956     bool connected;
957     diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
958     index b5ed3bd082b5..e9ebc4f31d16 100644
959     --- a/drivers/memory/atmel-ebi.c
960     +++ b/drivers/memory/atmel-ebi.c
961     @@ -93,7 +93,7 @@ static void at91sam9_ebi_get_config(struct at91_ebi_dev *ebid,
962     struct at91_ebi_dev_config *conf)
963     {
964     struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
965     - unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
966     + unsigned int clk_period = NSEC_PER_SEC / clk_get_rate(ebid->ebi->clk);
967     struct at91sam9_ebi_dev_config *config = &conf->sam9;
968     struct at91sam9_smc_timings *timings = &config->timings;
969     unsigned int val;
970     @@ -102,43 +102,43 @@ static void at91sam9_ebi_get_config(struct at91_ebi_dev *ebid,
971     config->mode = val & ~AT91_SMC_TDF;
972    
973     val = (val & AT91_SMC_TDF) >> 16;
974     - timings->tdf_ns = clk_rate * val;
975     + timings->tdf_ns = clk_period * val;
976    
977     regmap_fields_read(fields->setup, conf->cs, &val);
978     timings->ncs_rd_setup_ns = (val >> 24) & 0x1f;
979     timings->ncs_rd_setup_ns += ((val >> 29) & 0x1) * 128;
980     - timings->ncs_rd_setup_ns *= clk_rate;
981     + timings->ncs_rd_setup_ns *= clk_period;
982     timings->nrd_setup_ns = (val >> 16) & 0x1f;
983     timings->nrd_setup_ns += ((val >> 21) & 0x1) * 128;
984     - timings->nrd_setup_ns *= clk_rate;
985     + timings->nrd_setup_ns *= clk_period;
986     timings->ncs_wr_setup_ns = (val >> 8) & 0x1f;
987     timings->ncs_wr_setup_ns += ((val >> 13) & 0x1) * 128;
988     - timings->ncs_wr_setup_ns *= clk_rate;
989     + timings->ncs_wr_setup_ns *= clk_period;
990     timings->nwe_setup_ns = val & 0x1f;
991     timings->nwe_setup_ns += ((val >> 5) & 0x1) * 128;
992     - timings->nwe_setup_ns *= clk_rate;
993     + timings->nwe_setup_ns *= clk_period;
994    
995     regmap_fields_read(fields->pulse, conf->cs, &val);
996     timings->ncs_rd_pulse_ns = (val >> 24) & 0x3f;
997     timings->ncs_rd_pulse_ns += ((val >> 30) & 0x1) * 256;
998     - timings->ncs_rd_pulse_ns *= clk_rate;
999     + timings->ncs_rd_pulse_ns *= clk_period;
1000     timings->nrd_pulse_ns = (val >> 16) & 0x3f;
1001     timings->nrd_pulse_ns += ((val >> 22) & 0x1) * 256;
1002     - timings->nrd_pulse_ns *= clk_rate;
1003     + timings->nrd_pulse_ns *= clk_period;
1004     timings->ncs_wr_pulse_ns = (val >> 8) & 0x3f;
1005     timings->ncs_wr_pulse_ns += ((val >> 14) & 0x1) * 256;
1006     - timings->ncs_wr_pulse_ns *= clk_rate;
1007     + timings->ncs_wr_pulse_ns *= clk_period;
1008     timings->nwe_pulse_ns = val & 0x3f;
1009     timings->nwe_pulse_ns += ((val >> 6) & 0x1) * 256;
1010     - timings->nwe_pulse_ns *= clk_rate;
1011     + timings->nwe_pulse_ns *= clk_period;
1012    
1013     regmap_fields_read(fields->cycle, conf->cs, &val);
1014     timings->nrd_cycle_ns = (val >> 16) & 0x7f;
1015     timings->nrd_cycle_ns += ((val >> 23) & 0x3) * 256;
1016     - timings->nrd_cycle_ns *= clk_rate;
1017     + timings->nrd_cycle_ns *= clk_period;
1018     timings->nwe_cycle_ns = val & 0x7f;
1019     timings->nwe_cycle_ns += ((val >> 7) & 0x3) * 256;
1020     - timings->nwe_cycle_ns *= clk_rate;
1021     + timings->nwe_cycle_ns *= clk_period;
1022     }
1023    
1024     static int at91_xlate_timing(struct device_node *np, const char *prop,
1025     @@ -334,6 +334,7 @@ static int at91sam9_ebi_apply_config(struct at91_ebi_dev *ebid,
1026     struct at91_ebi_dev_config *conf)
1027     {
1028     unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
1029     + unsigned int clk_period = NSEC_PER_SEC / clk_rate;
1030     struct at91sam9_ebi_dev_config *config = &conf->sam9;
1031     struct at91sam9_smc_timings *timings = &config->timings;
1032     struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
1033     @@ -376,7 +377,7 @@ static int at91sam9_ebi_apply_config(struct at91_ebi_dev *ebid,
1034     val |= AT91SAM9_SMC_NWECYCLE(coded_val);
1035     regmap_fields_write(fields->cycle, conf->cs, val);
1036    
1037     - val = DIV_ROUND_UP(timings->tdf_ns, clk_rate);
1038     + val = DIV_ROUND_UP(timings->tdf_ns, clk_period);
1039     if (val > AT91_SMC_TDF_MAX)
1040     val = AT91_SMC_TDF_MAX;
1041     regmap_fields_write(fields->mode, conf->cs,
1042     diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
1043     index a144073593fa..52ee3da85366 100644
1044     --- a/drivers/misc/cxl/cxl.h
1045     +++ b/drivers/misc/cxl/cxl.h
1046     @@ -419,6 +419,9 @@ struct cxl_afu {
1047     struct mutex contexts_lock;
1048     spinlock_t afu_cntl_lock;
1049    
1050     + /* -1: AFU deconfigured/locked, >= 0: number of readers */
1051     + atomic_t configured_state;
1052     +
1053     /* AFU error buffer fields and bin attribute for sysfs */
1054     u64 eb_len, eb_offset;
1055     struct bin_attribute attr_eb;
1056     diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
1057     index 62e0dfb5f15b..cc1706a92ace 100644
1058     --- a/drivers/misc/cxl/main.c
1059     +++ b/drivers/misc/cxl/main.c
1060     @@ -268,7 +268,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
1061     idr_init(&afu->contexts_idr);
1062     mutex_init(&afu->contexts_lock);
1063     spin_lock_init(&afu->afu_cntl_lock);
1064     -
1065     + atomic_set(&afu->configured_state, -1);
1066     afu->prefault_mode = CXL_PREFAULT_NONE;
1067     afu->irqs_max = afu->adapter->user_irqs;
1068    
1069     diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
1070     index e96be9ca4e60..dd99b06e121a 100644
1071     --- a/drivers/misc/cxl/pci.c
1072     +++ b/drivers/misc/cxl/pci.c
1073     @@ -1129,6 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
1074     if ((rc = cxl_native_register_psl_irq(afu)))
1075     goto err2;
1076    
1077     + atomic_set(&afu->configured_state, 0);
1078     return 0;
1079    
1080     err2:
1081     @@ -1141,6 +1142,14 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
1082    
1083     static void pci_deconfigure_afu(struct cxl_afu *afu)
1084     {
1085     + /*
1086     + * It's okay to deconfigure when AFU is already locked, otherwise wait
1087     + * until there are no readers
1088     + */
1089     + if (atomic_read(&afu->configured_state) != -1) {
1090     + while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
1091     + schedule();
1092     + }
1093     cxl_native_release_psl_irq(afu);
1094     if (afu->adapter->native->sl_ops->release_serr_irq)
1095     afu->adapter->native->sl_ops->release_serr_irq(afu);
1096     diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
1097     index 3519acebfdab..512a4897dbf6 100644
1098     --- a/drivers/misc/cxl/vphb.c
1099     +++ b/drivers/misc/cxl/vphb.c
1100     @@ -76,23 +76,32 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
1101     return (bus << 8) + devfn;
1102     }
1103    
1104     -static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
1105     - struct cxl_afu **_afu, int *_record)
1106     +static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
1107     {
1108     - struct pci_controller *phb;
1109     - struct cxl_afu *afu;
1110     - int record;
1111     + struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
1112    
1113     - phb = pci_bus_to_host(bus);
1114     - if (phb == NULL)
1115     - return PCIBIOS_DEVICE_NOT_FOUND;
1116     + return phb ? phb->private_data : NULL;
1117     +}
1118     +
1119     +static void cxl_afu_configured_put(struct cxl_afu *afu)
1120     +{
1121     + atomic_dec_if_positive(&afu->configured_state);
1122     +}
1123     +
1124     +static bool cxl_afu_configured_get(struct cxl_afu *afu)
1125     +{
1126     + return atomic_inc_unless_negative(&afu->configured_state);
1127     +}
1128     +
1129     +static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
1130     + struct cxl_afu *afu, int *_record)
1131     +{
1132     + int record;
1133    
1134     - afu = (struct cxl_afu *)phb->private_data;
1135     record = cxl_pcie_cfg_record(bus->number, devfn);
1136     if (record > afu->crs_num)
1137     return PCIBIOS_DEVICE_NOT_FOUND;
1138    
1139     - *_afu = afu;
1140     *_record = record;
1141     return 0;
1142     }
1143     @@ -106,9 +115,14 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
1144     u16 val16;
1145     u32 val32;
1146    
1147     - rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
1148     + afu = pci_bus_to_afu(bus);
1149     + /* Grab a reader lock on afu. */
1150     + if (afu == NULL || !cxl_afu_configured_get(afu))
1151     + return PCIBIOS_DEVICE_NOT_FOUND;
1152     +
1153     + rc = cxl_pcie_config_info(bus, devfn, afu, &record);
1154     if (rc)
1155     - return rc;
1156     + goto out;
1157    
1158     switch (len) {
1159     case 1:
1160     @@ -127,10 +141,9 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
1161     WARN_ON(1);
1162     }
1163    
1164     - if (rc)
1165     - return PCIBIOS_DEVICE_NOT_FOUND;
1166     -
1167     - return PCIBIOS_SUCCESSFUL;
1168     +out:
1169     + cxl_afu_configured_put(afu);
1170     + return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
1171     }
1172    
1173     static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
1174     @@ -139,9 +152,14 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
1175     int rc, record;
1176     struct cxl_afu *afu;
1177    
1178     - rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
1179     + afu = pci_bus_to_afu(bus);
1180     + /* Grab a reader lock on afu. */
1181     + if (afu == NULL || !cxl_afu_configured_get(afu))
1182     + return PCIBIOS_DEVICE_NOT_FOUND;
1183     +
1184     + rc = cxl_pcie_config_info(bus, devfn, afu, &record);
1185     if (rc)
1186     - return rc;
1187     + goto out;
1188    
1189     switch (len) {
1190     case 1:
1191     @@ -157,10 +175,9 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
1192     WARN_ON(1);
1193     }
1194    
1195     - if (rc)
1196     - return PCIBIOS_SET_FAILED;
1197     -
1198     - return PCIBIOS_SUCCESSFUL;
1199     +out:
1200     + cxl_afu_configured_put(afu);
1201     + return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
1202     }
1203    
1204     static struct pci_ops cxl_pcie_pci_ops =
1205     diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
1206     index 930c8165f2a8..0a4e81a253fb 100644
1207     --- a/drivers/net/ethernet/marvell/mvpp2.c
1208     +++ b/drivers/net/ethernet/marvell/mvpp2.c
1209     @@ -991,7 +991,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
1210     txq_pcpu->buffs + txq_pcpu->txq_put_index;
1211     tx_buf->skb = skb;
1212     tx_buf->size = tx_desc->data_size;
1213     - tx_buf->phys = tx_desc->buf_phys_addr;
1214     + tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
1215     txq_pcpu->txq_put_index++;
1216     if (txq_pcpu->txq_put_index == txq_pcpu->size)
1217     txq_pcpu->txq_put_index = 0;
1218     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1219     index b892dac70f4b..2458e6e05276 100644
1220     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1221     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1222     @@ -1660,7 +1660,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1223     pfirst->len, pfirst->next,
1224     pfirst->prev);
1225     skb_unlink(pfirst, &bus->glom);
1226     - if (brcmf_sdio_fromevntchan(pfirst->data))
1227     + if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN]))
1228     brcmf_rx_event(bus->sdiodev->dev, pfirst);
1229     else
1230     brcmf_rx_frame(bus->sdiodev->dev, pfirst,
1231     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1232     index aefca644219b..a38ae34b74e4 100644
1233     --- a/drivers/nvdimm/namespace_devs.c
1234     +++ b/drivers/nvdimm/namespace_devs.c
1235     @@ -1700,6 +1700,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1236     struct device *create_namespace_pmem(struct nd_region *nd_region,
1237     struct nd_namespace_label *nd_label)
1238     {
1239     + u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1240     u64 cookie = nd_region_interleave_set_cookie(nd_region);
1241     struct nd_label_ent *label_ent;
1242     struct nd_namespace_pmem *nspm;
1243     @@ -1718,7 +1719,11 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
1244     if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1245     dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1246     nd_label->uuid);
1247     - return ERR_PTR(-EAGAIN);
1248     + if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
1249     + return ERR_PTR(-EAGAIN);
1250     +
1251     + dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1252     + nd_label->uuid);
1253     }
1254    
1255     nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1256     @@ -1733,9 +1738,14 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
1257     res->name = dev_name(&nd_region->dev);
1258     res->flags = IORESOURCE_MEM;
1259    
1260     - for (i = 0; i < nd_region->ndr_mappings; i++)
1261     - if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1262     - break;
1263     + for (i = 0; i < nd_region->ndr_mappings; i++) {
1264     + if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1265     + continue;
1266     + if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1267     + continue;
1268     + break;
1269     + }
1270     +
1271     if (i < nd_region->ndr_mappings) {
1272     struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
1273    
1274     diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
1275     index d3b2fca8deec..d869236b474f 100644
1276     --- a/drivers/nvdimm/nd.h
1277     +++ b/drivers/nvdimm/nd.h
1278     @@ -327,6 +327,7 @@ struct nd_region *to_nd_region(struct device *dev);
1279     int nd_region_to_nstype(struct nd_region *nd_region);
1280     int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
1281     u64 nd_region_interleave_set_cookie(struct nd_region *nd_region);
1282     +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
1283     void nvdimm_bus_lock(struct device *dev);
1284     void nvdimm_bus_unlock(struct device *dev);
1285     bool is_nvdimm_bus_locked(struct device *dev);
1286     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
1287     index 6af5e629140c..9cf6f1a88fce 100644
1288     --- a/drivers/nvdimm/region_devs.c
1289     +++ b/drivers/nvdimm/region_devs.c
1290     @@ -505,6 +505,15 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
1291     return 0;
1292     }
1293    
1294     +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
1295     +{
1296     + struct nd_interleave_set *nd_set = nd_region->nd_set;
1297     +
1298     + if (nd_set)
1299     + return nd_set->altcookie;
1300     + return 0;
1301     +}
1302     +
1303     void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
1304     {
1305     struct nd_label_ent *label_ent, *e;
1306     diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
1307     index acb2be0c8c2c..e96973b95e7a 100644
1308     --- a/drivers/pci/hotplug/pnv_php.c
1309     +++ b/drivers/pci/hotplug/pnv_php.c
1310     @@ -82,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref)
1311     static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot)
1312     {
1313    
1314     - if (WARN_ON(!php_slot))
1315     + if (!php_slot)
1316     return;
1317    
1318     kref_put(&php_slot->kref, pnv_php_free_slot);
1319     @@ -436,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
1320     if (ret)
1321     return ret;
1322    
1323     - /* Proceed if there have nothing behind the slot */
1324     - if (presence == OPAL_PCI_SLOT_EMPTY)
1325     + /*
1326     + * Proceed if there have nothing behind the slot. However,
1327     + * we should leave the slot in registered state at the
1328     + * beginning. Otherwise, the PCI devices inserted afterwards
1329     + * won't be probed and populated.
1330     + */
1331     + if (presence == OPAL_PCI_SLOT_EMPTY) {
1332     + if (!php_slot->power_state_check) {
1333     + php_slot->power_state_check = true;
1334     +
1335     + return 0;
1336     + }
1337     +
1338     goto scan;
1339     + }
1340    
1341     /*
1342     * If the power supply to the slot is off, we can't detect
1343     @@ -713,8 +725,12 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
1344     added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
1345     } else if (sts & PCI_EXP_SLTSTA_PDC) {
1346     ret = pnv_pci_get_presence_state(php_slot->id, &presence);
1347     - if (!ret)
1348     + if (ret) {
1349     + dev_warn(&pdev->dev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
1350     + php_slot->name, ret, sts);
1351     return IRQ_HANDLED;
1352     + }
1353     +
1354     added = !!(presence == OPAL_PCI_SLOT_PRESENT);
1355     } else {
1356     return IRQ_NONE;
1357     @@ -799,6 +815,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
1358     struct pci_dev *pdev = php_slot->pdev;
1359     int irq, ret;
1360    
1361     + /*
1362     + * The MSI/MSIx interrupt might have been occupied by other
1363     + * drivers. Don't populate the surprise hotplug capability
1364     + * in that case.
1365     + */
1366     + if (pci_dev_msi_enabled(pdev))
1367     + return;
1368     +
1369     ret = pci_enable_device(pdev);
1370     if (ret) {
1371     dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
1372     diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
1373     index 117fccf7934a..01a6a83f625d 100644
1374     --- a/drivers/pwm/pwm-pca9685.c
1375     +++ b/drivers/pwm/pwm-pca9685.c
1376     @@ -65,7 +65,6 @@
1377     #define PCA9685_MAXCHAN 0x10
1378    
1379     #define LED_FULL (1 << 4)
1380     -#define MODE1_RESTART (1 << 7)
1381     #define MODE1_SLEEP (1 << 4)
1382     #define MODE2_INVRT (1 << 4)
1383     #define MODE2_OUTDRV (1 << 2)
1384     @@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
1385     udelay(500);
1386    
1387     pca->period_ns = period_ns;
1388     -
1389     - /*
1390     - * If the duty cycle did not change, restart PWM with
1391     - * the same duty cycle to period ratio and return.
1392     - */
1393     - if (duty_ns == pca->duty_ns) {
1394     - regmap_update_bits(pca->regmap, PCA9685_MODE1,
1395     - MODE1_RESTART, 0x1);
1396     - return 0;
1397     - }
1398     } else {
1399     dev_err(chip->dev,
1400     "prescaler not set: period out of bounds!\n");
1401     diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
1402     index 9d66b4fb174b..415d10a67b7a 100644
1403     --- a/drivers/s390/block/dcssblk.c
1404     +++ b/drivers/s390/block/dcssblk.c
1405     @@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
1406     dev_info = bdev->bd_disk->private_data;
1407     if (!dev_info)
1408     return -ENODEV;
1409     - dev_sz = dev_info->end - dev_info->start;
1410     + dev_sz = dev_info->end - dev_info->start + 1;
1411     offset = secnum * 512;
1412     *kaddr = (void *) dev_info->start + offset;
1413     *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
1414     diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
1415     index 8225da619014..4182f60124da 100644
1416     --- a/drivers/s390/cio/ioasm.c
1417     +++ b/drivers/s390/cio/ioasm.c
1418     @@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
1419     int chsc(void *chsc_area)
1420     {
1421     typedef struct { char _[4096]; } addr_type;
1422     - int cc;
1423     + int cc = -EIO;
1424    
1425     asm volatile(
1426     " .insn rre,0xb25f0000,%2,0\n"
1427     - " ipm %0\n"
1428     + "0: ipm %0\n"
1429     " srl %0,28\n"
1430     - : "=d" (cc), "=m" (*(addr_type *) chsc_area)
1431     + "1:\n"
1432     + EX_TABLE(0b, 1b)
1433     + : "+d" (cc), "=m" (*(addr_type *) chsc_area)
1434     : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
1435     : "cc");
1436     trace_s390_cio_chsc(chsc_area, cc);
1437     diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
1438     index 5d06253c2a7a..30e9fbbff051 100644
1439     --- a/drivers/s390/cio/qdio_thinint.c
1440     +++ b/drivers/s390/cio/qdio_thinint.c
1441     @@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
1442     struct qdio_q *q;
1443     int i;
1444    
1445     - for_each_input_queue(irq, q, i) {
1446     - if (!references_shared_dsci(irq) &&
1447     - has_multiple_inq_on_dsci(irq))
1448     - xchg(q->irq_ptr->dsci, 0);
1449     + if (!references_shared_dsci(irq) &&
1450     + has_multiple_inq_on_dsci(irq))
1451     + xchg(irq->dsci, 0);
1452    
1453     + for_each_input_queue(irq, q, i) {
1454     if (q->u.in.queue_start_poll) {
1455     /* skip if polling is enabled or already in work */
1456     if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1457     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1458     index ea9617c7b403..cc38a3509f78 100644
1459     --- a/drivers/target/target_core_device.c
1460     +++ b/drivers/target/target_core_device.c
1461     @@ -77,12 +77,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
1462     &deve->read_bytes);
1463    
1464     se_lun = rcu_dereference(deve->se_lun);
1465     +
1466     + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
1467     + se_lun = NULL;
1468     + goto out_unlock;
1469     + }
1470     +
1471     se_cmd->se_lun = rcu_dereference(deve->se_lun);
1472     se_cmd->pr_res_key = deve->pr_res_key;
1473     se_cmd->orig_fe_lun = unpacked_lun;
1474     se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
1475     -
1476     - percpu_ref_get(&se_lun->lun_ref);
1477     se_cmd->lun_ref_active = true;
1478    
1479     if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
1480     @@ -96,6 +100,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
1481     goto ref_dev;
1482     }
1483     }
1484     +out_unlock:
1485     rcu_read_unlock();
1486    
1487     if (!se_lun) {
1488     @@ -815,6 +820,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1489     xcopy_lun = &dev->xcopy_lun;
1490     rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
1491     init_completion(&xcopy_lun->lun_ref_comp);
1492     + init_completion(&xcopy_lun->lun_shutdown_comp);
1493     INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
1494     INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
1495     mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
1496     diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
1497     index d99752c6cd60..2744251178ad 100644
1498     --- a/drivers/target/target_core_tpg.c
1499     +++ b/drivers/target/target_core_tpg.c
1500     @@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
1501     {
1502     struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
1503    
1504     - complete(&lun->lun_ref_comp);
1505     + complete(&lun->lun_shutdown_comp);
1506     }
1507    
1508     int core_tpg_register(
1509     @@ -571,6 +571,7 @@ struct se_lun *core_tpg_alloc_lun(
1510     lun->lun_link_magic = SE_LUN_LINK_MAGIC;
1511     atomic_set(&lun->lun_acl_count, 0);
1512     init_completion(&lun->lun_ref_comp);
1513     + init_completion(&lun->lun_shutdown_comp);
1514     INIT_LIST_HEAD(&lun->lun_deve_list);
1515     INIT_LIST_HEAD(&lun->lun_dev_link);
1516     atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1517     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1518     index 767d1eb6e035..cae4dea6464e 100644
1519     --- a/drivers/target/target_core_transport.c
1520     +++ b/drivers/target/target_core_transport.c
1521     @@ -2702,10 +2702,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
1522     }
1523     EXPORT_SYMBOL(target_wait_for_sess_cmds);
1524    
1525     +static void target_lun_confirm(struct percpu_ref *ref)
1526     +{
1527     + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
1528     +
1529     + complete(&lun->lun_ref_comp);
1530     +}
1531     +
1532     void transport_clear_lun_ref(struct se_lun *lun)
1533     {
1534     - percpu_ref_kill(&lun->lun_ref);
1535     + /*
1536     + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
1537     + * the initial reference and schedule confirm kill to be
1538     + * executed after one full RCU grace period has completed.
1539     + */
1540     + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
1541     + /*
1542     + * The first completion waits for percpu_ref_switch_to_atomic_rcu()
1543     + * to call target_lun_confirm after lun->lun_ref has been marked
1544     + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
1545     + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
1546     + * fails for all new incoming I/O.
1547     + */
1548     wait_for_completion(&lun->lun_ref_comp);
1549     + /*
1550     + * The second completion waits for percpu_ref_put_many() to
1551     + * invoke ->release() after lun->lun_ref has switched to
1552     + * atomic_t mode, and lun->lun_ref.count has reached zero.
1553     + *
1554     + * At this point all target-core lun->lun_ref references have
1555     + * been dropped via transport_lun_remove_cmd(), and it's safe
1556     + * to proceed with the remaining LUN shutdown.
1557     + */
1558     + wait_for_completion(&lun->lun_shutdown_comp);
1559     }
1560    
1561     static bool
1562     diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
1563     index a7fa016f31eb..6d1e2f746ab4 100644
1564     --- a/drivers/tty/n_hdlc.c
1565     +++ b/drivers/tty/n_hdlc.c
1566     @@ -114,7 +114,7 @@
1567     #define DEFAULT_TX_BUF_COUNT 3
1568    
1569     struct n_hdlc_buf {
1570     - struct n_hdlc_buf *link;
1571     + struct list_head list_item;
1572     int count;
1573     char buf[1];
1574     };
1575     @@ -122,8 +122,7 @@ struct n_hdlc_buf {
1576     #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
1577    
1578     struct n_hdlc_buf_list {
1579     - struct n_hdlc_buf *head;
1580     - struct n_hdlc_buf *tail;
1581     + struct list_head list;
1582     int count;
1583     spinlock_t spinlock;
1584     };
1585     @@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
1586     * @backup_tty - TTY to use if tty gets closed
1587     * @tbusy - reentrancy flag for tx wakeup code
1588     * @woke_up - FIXME: describe this field
1589     - * @tbuf - currently transmitting tx buffer
1590     * @tx_buf_list - list of pending transmit frame buffers
1591     * @rx_buf_list - list of received frame buffers
1592     * @tx_free_buf_list - list unused transmit frame buffers
1593     @@ -149,7 +147,6 @@ struct n_hdlc {
1594     struct tty_struct *backup_tty;
1595     int tbusy;
1596     int woke_up;
1597     - struct n_hdlc_buf *tbuf;
1598     struct n_hdlc_buf_list tx_buf_list;
1599     struct n_hdlc_buf_list rx_buf_list;
1600     struct n_hdlc_buf_list tx_free_buf_list;
1601     @@ -159,6 +156,8 @@ struct n_hdlc {
1602     /*
1603     * HDLC buffer list manipulation functions
1604     */
1605     +static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
1606     + struct n_hdlc_buf *buf);
1607     static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
1608     struct n_hdlc_buf *buf);
1609     static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
1610     @@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
1611     {
1612     struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
1613     struct n_hdlc_buf *buf;
1614     - unsigned long flags;
1615    
1616     while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
1617     n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
1618     - spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
1619     - if (n_hdlc->tbuf) {
1620     - n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
1621     - n_hdlc->tbuf = NULL;
1622     - }
1623     - spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
1624     }
1625    
1626     static struct tty_ldisc_ops n_hdlc_ldisc = {
1627     @@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
1628     } else
1629     break;
1630     }
1631     - kfree(n_hdlc->tbuf);
1632     kfree(n_hdlc);
1633    
1634     } /* end of n_hdlc_release() */
1635     @@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
1636     n_hdlc->woke_up = 0;
1637     spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
1638    
1639     - /* get current transmit buffer or get new transmit */
1640     - /* buffer from list of pending transmit buffers */
1641     -
1642     - tbuf = n_hdlc->tbuf;
1643     - if (!tbuf)
1644     - tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
1645     -
1646     + tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
1647     while (tbuf) {
1648     if (debuglevel >= DEBUG_LEVEL_INFO)
1649     printk("%s(%d)sending frame %p, count=%d\n",
1650     @@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
1651    
1652     /* rollback was possible and has been done */
1653     if (actual == -ERESTARTSYS) {
1654     - n_hdlc->tbuf = tbuf;
1655     + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
1656     break;
1657     }
1658     /* if transmit error, throw frame away by */
1659     @@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
1660    
1661     /* free current transmit buffer */
1662     n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
1663     -
1664     - /* this tx buffer is done */
1665     - n_hdlc->tbuf = NULL;
1666     -
1667     +
1668     /* wait up sleeping writers */
1669     wake_up_interruptible(&tty->write_wait);
1670    
1671     @@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
1672     if (debuglevel >= DEBUG_LEVEL_INFO)
1673     printk("%s(%d)frame %p pending\n",
1674     __FILE__,__LINE__,tbuf);
1675     -
1676     - /* buffer not accepted by driver */
1677     - /* set this buffer as pending buffer */
1678     - n_hdlc->tbuf = tbuf;
1679     +
1680     + /*
1681     + * the buffer was not accepted by driver,
1682     + * return it back into tx queue
1683     + */
1684     + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
1685     break;
1686     }
1687     }
1688     @@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
1689     int error = 0;
1690     int count;
1691     unsigned long flags;
1692     -
1693     + struct n_hdlc_buf *buf = NULL;
1694     +
1695     if (debuglevel >= DEBUG_LEVEL_INFO)
1696     printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
1697     __FILE__,__LINE__,cmd);
1698     @@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
1699     /* report count of read data available */
1700     /* in next available frame (if any) */
1701     spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
1702     - if (n_hdlc->rx_buf_list.head)
1703     - count = n_hdlc->rx_buf_list.head->count;
1704     + buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
1705     + struct n_hdlc_buf, list_item);
1706     + if (buf)
1707     + count = buf->count;
1708     else
1709     count = 0;
1710     spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
1711     @@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
1712     count = tty_chars_in_buffer(tty);
1713     /* add size of next output frame in queue */
1714     spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
1715     - if (n_hdlc->tx_buf_list.head)
1716     - count += n_hdlc->tx_buf_list.head->count;
1717     + buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
1718     + struct n_hdlc_buf, list_item);
1719     + if (buf)
1720     + count += buf->count;
1721     spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
1722     error = put_user(count, (int __user *)arg);
1723     break;
1724     @@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
1725     poll_wait(filp, &tty->write_wait, wait);
1726    
1727     /* set bits for operations that won't block */
1728     - if (n_hdlc->rx_buf_list.head)
1729     + if (!list_empty(&n_hdlc->rx_buf_list.list))
1730     mask |= POLLIN | POLLRDNORM; /* readable */
1731     if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
1732     mask |= POLLHUP;
1733     if (tty_hung_up_p(filp))
1734     mask |= POLLHUP;
1735     if (!tty_is_writelocked(tty) &&
1736     - n_hdlc->tx_free_buf_list.head)
1737     + !list_empty(&n_hdlc->tx_free_buf_list.list))
1738     mask |= POLLOUT | POLLWRNORM; /* writable */
1739     }
1740     return mask;
1741     @@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
1742     spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
1743     spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
1744     spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
1745     -
1746     +
1747     + INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
1748     + INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
1749     + INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
1750     + INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
1751     +
1752     /* allocate free rx buffer list */
1753     for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
1754     buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
1755     @@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
1756     } /* end of n_hdlc_alloc() */
1757    
1758     /**
1759     + * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
1760     + * @buf_list - pointer to the buffer list
1761     + * @buf - pointer to the buffer
1762     + */
1763     +static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
1764     + struct n_hdlc_buf *buf)
1765     +{
1766     + unsigned long flags;
1767     +
1768     + spin_lock_irqsave(&buf_list->spinlock, flags);
1769     +
1770     + list_add(&buf->list_item, &buf_list->list);
1771     + buf_list->count++;
1772     +
1773     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1774     +}
1775     +
1776     +/**
1777     * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
1778     - * @list - pointer to buffer list
1779     + * @buf_list - pointer to buffer list
1780     * @buf - pointer to buffer
1781     */
1782     -static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
1783     +static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
1784     struct n_hdlc_buf *buf)
1785     {
1786     unsigned long flags;
1787     - spin_lock_irqsave(&list->spinlock,flags);
1788     -
1789     - buf->link=NULL;
1790     - if (list->tail)
1791     - list->tail->link = buf;
1792     - else
1793     - list->head = buf;
1794     - list->tail = buf;
1795     - (list->count)++;
1796     -
1797     - spin_unlock_irqrestore(&list->spinlock,flags);
1798     -
1799     +
1800     + spin_lock_irqsave(&buf_list->spinlock, flags);
1801     +
1802     + list_add_tail(&buf->list_item, &buf_list->list);
1803     + buf_list->count++;
1804     +
1805     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1806     } /* end of n_hdlc_buf_put() */
1807    
1808     /**
1809     * n_hdlc_buf_get - remove and return an HDLC buffer from list
1810     - * @list - pointer to HDLC buffer list
1811     + * @buf_list - pointer to HDLC buffer list
1812     *
1813     * Remove and return an HDLC buffer from the head of the specified HDLC buffer
1814     * list.
1815     * Returns a pointer to HDLC buffer if available, otherwise %NULL.
1816     */
1817     -static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
1818     +static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
1819     {
1820     unsigned long flags;
1821     struct n_hdlc_buf *buf;
1822     - spin_lock_irqsave(&list->spinlock,flags);
1823     -
1824     - buf = list->head;
1825     +
1826     + spin_lock_irqsave(&buf_list->spinlock, flags);
1827     +
1828     + buf = list_first_entry_or_null(&buf_list->list,
1829     + struct n_hdlc_buf, list_item);
1830     if (buf) {
1831     - list->head = buf->link;
1832     - (list->count)--;
1833     + list_del(&buf->list_item);
1834     + buf_list->count--;
1835     }
1836     - if (!list->head)
1837     - list->tail = NULL;
1838     -
1839     - spin_unlock_irqrestore(&list->spinlock,flags);
1840     +
1841     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1842     return buf;
1843     -
1844     } /* end of n_hdlc_buf_get() */
1845    
1846     static char hdlc_banner[] __initdata =
1847     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1848     index b98c1578f45a..4d09bd495a88 100644
1849     --- a/drivers/tty/serial/8250/8250_pci.c
1850     +++ b/drivers/tty/serial/8250/8250_pci.c
1851     @@ -2688,6 +2688,8 @@ enum pci_board_num_t {
1852     pbn_b0_4_1152000_200,
1853     pbn_b0_8_1152000_200,
1854    
1855     + pbn_b0_4_1250000,
1856     +
1857     pbn_b0_2_1843200,
1858     pbn_b0_4_1843200,
1859    
1860     @@ -2919,6 +2921,13 @@ static struct pciserial_board pci_boards[] = {
1861     .uart_offset = 0x200,
1862     },
1863    
1864     + [pbn_b0_4_1250000] = {
1865     + .flags = FL_BASE0,
1866     + .num_ports = 4,
1867     + .base_baud = 1250000,
1868     + .uart_offset = 8,
1869     + },
1870     +
1871     [pbn_b0_2_1843200] = {
1872     .flags = FL_BASE0,
1873     .num_ports = 2,
1874     @@ -5549,6 +5558,10 @@ static struct pci_device_id serial_pci_tbl[] = {
1875     { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
1876     { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
1877    
1878     + /* MKS Tenta SCOM-080x serial cards */
1879     + { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
1880     + { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
1881     +
1882     /*
1883     * These entries match devices with class COMMUNICATION_SERIAL,
1884     * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
1885     diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
1886     index 81dd075356b9..d4fb0afc0097 100644
1887     --- a/fs/afs/mntpt.c
1888     +++ b/fs/afs/mntpt.c
1889     @@ -202,7 +202,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
1890    
1891     /* try and do the mount */
1892     _debug("--- attempting mount %s -o %s ---", devname, options);
1893     - mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options);
1894     + mnt = vfs_submount(mntpt, &afs_fs_type, devname, options);
1895     _debug("--- mount result %p ---", mnt);
1896    
1897     free_page((unsigned long) devname);
1898     diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
1899     index e44271dfceb6..5db6c8d745ea 100644
1900     --- a/fs/autofs4/waitq.c
1901     +++ b/fs/autofs4/waitq.c
1902     @@ -431,8 +431,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
1903     memcpy(&wq->name, &qstr, sizeof(struct qstr));
1904     wq->dev = autofs4_get_dev(sbi);
1905     wq->ino = autofs4_get_ino(sbi);
1906     - wq->uid = current_real_cred()->uid;
1907     - wq->gid = current_real_cred()->gid;
1908     + wq->uid = current_cred()->uid;
1909     + wq->gid = current_cred()->gid;
1910     wq->pid = pid;
1911     wq->tgid = tgid;
1912     wq->status = -EINTR; /* Status return if interrupted */
1913     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1914     index 6a26c7bd1286..e3e1a80b351e 100644
1915     --- a/fs/ceph/mds_client.c
1916     +++ b/fs/ceph/mds_client.c
1917     @@ -628,6 +628,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
1918     {
1919     dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1920    
1921     + /* Never leave an unregistered request on an unsafe list! */
1922     + list_del_init(&req->r_unsafe_item);
1923     +
1924     if (req->r_tid == mdsc->oldest_tid) {
1925     struct rb_node *p = rb_next(&req->r_node);
1926     mdsc->oldest_tid = 0;
1927     @@ -1036,7 +1039,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1928     while (!list_empty(&session->s_unsafe)) {
1929     req = list_first_entry(&session->s_unsafe,
1930     struct ceph_mds_request, r_unsafe_item);
1931     - list_del_init(&req->r_unsafe_item);
1932     pr_warn_ratelimited(" dropping unsafe request %llu\n",
1933     req->r_tid);
1934     __unregister_request(mdsc, req);
1935     @@ -2423,7 +2425,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1936     * useful we could do with a revised return value.
1937     */
1938     dout("got safe reply %llu, mds%d\n", tid, mds);
1939     - list_del_init(&req->r_unsafe_item);
1940    
1941     /* last unsafe request during umount? */
1942     if (mdsc->stopping && !__get_oldest_req(mdsc))
1943     diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
1944     index ec9dbbcca3b9..9156be545b0f 100644
1945     --- a/fs/cifs/cifs_dfs_ref.c
1946     +++ b/fs/cifs/cifs_dfs_ref.c
1947     @@ -245,7 +245,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
1948     * @fullpath: full path in UNC format
1949     * @ref: server's referral
1950     */
1951     -static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb,
1952     +static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
1953     + struct cifs_sb_info *cifs_sb,
1954     const char *fullpath, const struct dfs_info3_param *ref)
1955     {
1956     struct vfsmount *mnt;
1957     @@ -259,7 +260,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb,
1958     if (IS_ERR(mountdata))
1959     return (struct vfsmount *)mountdata;
1960    
1961     - mnt = vfs_kern_mount(&cifs_fs_type, 0, devname, mountdata);
1962     + mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata);
1963     kfree(mountdata);
1964     kfree(devname);
1965     return mnt;
1966     @@ -334,7 +335,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
1967     mnt = ERR_PTR(-EINVAL);
1968     break;
1969     }
1970     - mnt = cifs_dfs_do_refmount(cifs_sb,
1971     + mnt = cifs_dfs_do_refmount(mntpt, cifs_sb,
1972     full_path, referrals + i);
1973     cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n",
1974     __func__, referrals[i].node_name, mnt);
1975     diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1976     index f17fcf89e18e..1e30f74a9527 100644
1977     --- a/fs/debugfs/inode.c
1978     +++ b/fs/debugfs/inode.c
1979     @@ -187,9 +187,9 @@ static const struct super_operations debugfs_super_operations = {
1980    
1981     static struct vfsmount *debugfs_automount(struct path *path)
1982     {
1983     - struct vfsmount *(*f)(void *);
1984     - f = (struct vfsmount *(*)(void *))path->dentry->d_fsdata;
1985     - return f(d_inode(path->dentry)->i_private);
1986     + debugfs_automount_t f;
1987     + f = (debugfs_automount_t)path->dentry->d_fsdata;
1988     + return f(path->dentry, d_inode(path->dentry)->i_private);
1989     }
1990    
1991     static const struct dentry_operations debugfs_dops = {
1992     @@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir);
1993     */
1994     struct dentry *debugfs_create_automount(const char *name,
1995     struct dentry *parent,
1996     - struct vfsmount *(*f)(void *),
1997     + debugfs_automount_t f,
1998     void *data)
1999     {
2000     struct dentry *dentry = start_creating(name, parent);
2001     diff --git a/fs/fat/inode.c b/fs/fat/inode.c
2002     index 338d2f73eb29..a2c05f2ada6d 100644
2003     --- a/fs/fat/inode.c
2004     +++ b/fs/fat/inode.c
2005     @@ -1359,6 +1359,16 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
2006     return 0;
2007     }
2008    
2009     +static void fat_dummy_inode_init(struct inode *inode)
2010     +{
2011     + /* Initialize this dummy inode to work as no-op. */
2012     + MSDOS_I(inode)->mmu_private = 0;
2013     + MSDOS_I(inode)->i_start = 0;
2014     + MSDOS_I(inode)->i_logstart = 0;
2015     + MSDOS_I(inode)->i_attrs = 0;
2016     + MSDOS_I(inode)->i_pos = 0;
2017     +}
2018     +
2019     static int fat_read_root(struct inode *inode)
2020     {
2021     struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
2022     @@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
2023     fat_inode = new_inode(sb);
2024     if (!fat_inode)
2025     goto out_fail;
2026     - MSDOS_I(fat_inode)->i_pos = 0;
2027     + fat_dummy_inode_init(fat_inode);
2028     sbi->fat_inode = fat_inode;
2029    
2030     fsinfo_inode = new_inode(sb);
2031     if (!fsinfo_inode)
2032     goto out_fail;
2033     + fat_dummy_inode_init(fsinfo_inode);
2034     fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
2035     sbi->fsinfo_inode = fsinfo_inode;
2036     insert_inode_hash(fsinfo_inode);
2037     diff --git a/fs/mount.h b/fs/mount.h
2038     index d2e25d7b64b3..d8295f273a2f 100644
2039     --- a/fs/mount.h
2040     +++ b/fs/mount.h
2041     @@ -89,7 +89,6 @@ static inline int is_mounted(struct vfsmount *mnt)
2042     }
2043    
2044     extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
2045     -extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
2046    
2047     extern int __legitimize_mnt(struct vfsmount *, unsigned);
2048     extern bool legitimize_mnt(struct vfsmount *, unsigned);
2049     diff --git a/fs/namei.c b/fs/namei.c
2050     index 5b4eed221530..d5e5140c1045 100644
2051     --- a/fs/namei.c
2052     +++ b/fs/namei.c
2053     @@ -1100,7 +1100,6 @@ static int follow_automount(struct path *path, struct nameidata *nd,
2054     bool *need_mntput)
2055     {
2056     struct vfsmount *mnt;
2057     - const struct cred *old_cred;
2058     int err;
2059    
2060     if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
2061     @@ -1129,9 +1128,7 @@ static int follow_automount(struct path *path, struct nameidata *nd,
2062     if (nd->total_link_count >= 40)
2063     return -ELOOP;
2064    
2065     - old_cred = override_creds(&init_cred);
2066     mnt = path->dentry->d_op->d_automount(path);
2067     - revert_creds(old_cred);
2068     if (IS_ERR(mnt)) {
2069     /*
2070     * The filesystem is allowed to return -EISDIR here to indicate
2071     diff --git a/fs/namespace.c b/fs/namespace.c
2072     index 7cea503ae06d..5e35057f07ac 100644
2073     --- a/fs/namespace.c
2074     +++ b/fs/namespace.c
2075     @@ -641,28 +641,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
2076     }
2077    
2078     /*
2079     - * find the last mount at @dentry on vfsmount @mnt.
2080     - * mount_lock must be held.
2081     - */
2082     -struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
2083     -{
2084     - struct mount *p, *res = NULL;
2085     - p = __lookup_mnt(mnt, dentry);
2086     - if (!p)
2087     - goto out;
2088     - if (!(p->mnt.mnt_flags & MNT_UMOUNT))
2089     - res = p;
2090     - hlist_for_each_entry_continue(p, mnt_hash) {
2091     - if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
2092     - break;
2093     - if (!(p->mnt.mnt_flags & MNT_UMOUNT))
2094     - res = p;
2095     - }
2096     -out:
2097     - return res;
2098     -}
2099     -
2100     -/*
2101     * lookup_mnt - Return the first child mount mounted at path
2102     *
2103     * "First" means first mounted chronologically. If you create the
2104     @@ -882,6 +860,13 @@ void mnt_set_mountpoint(struct mount *mnt,
2105     hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
2106     }
2107    
2108     +static void __attach_mnt(struct mount *mnt, struct mount *parent)
2109     +{
2110     + hlist_add_head_rcu(&mnt->mnt_hash,
2111     + m_hash(&parent->mnt, mnt->mnt_mountpoint));
2112     + list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
2113     +}
2114     +
2115     /*
2116     * vfsmount lock must be held for write
2117     */
2118     @@ -890,28 +875,45 @@ static void attach_mnt(struct mount *mnt,
2119     struct mountpoint *mp)
2120     {
2121     mnt_set_mountpoint(parent, mp, mnt);
2122     - hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
2123     - list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
2124     + __attach_mnt(mnt, parent);
2125     }
2126    
2127     -static void attach_shadowed(struct mount *mnt,
2128     - struct mount *parent,
2129     - struct mount *shadows)
2130     +void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
2131     {
2132     - if (shadows) {
2133     - hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
2134     - list_add(&mnt->mnt_child, &shadows->mnt_child);
2135     - } else {
2136     - hlist_add_head_rcu(&mnt->mnt_hash,
2137     - m_hash(&parent->mnt, mnt->mnt_mountpoint));
2138     - list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
2139     - }
2140     + struct mountpoint *old_mp = mnt->mnt_mp;
2141     + struct dentry *old_mountpoint = mnt->mnt_mountpoint;
2142     + struct mount *old_parent = mnt->mnt_parent;
2143     +
2144     + list_del_init(&mnt->mnt_child);
2145     + hlist_del_init(&mnt->mnt_mp_list);
2146     + hlist_del_init_rcu(&mnt->mnt_hash);
2147     +
2148     + attach_mnt(mnt, parent, mp);
2149     +
2150     + put_mountpoint(old_mp);
2151     +
2152     + /*
2153     + * Safely avoid even the suggestion this code might sleep or
2154     + * lock the mount hash by taking advantage of the knowledge that
2155     + * mnt_change_mountpoint will not release the final reference
2156     + * to a mountpoint.
2157     + *
2158     + * During mounting, the mount passed in as the parent mount will
2159     + * continue to use the old mountpoint and during unmounting, the
2160     + * old mountpoint will continue to exist until namespace_unlock,
2161     + * which happens well after mnt_change_mountpoint.
2162     + */
2163     + spin_lock(&old_mountpoint->d_lock);
2164     + old_mountpoint->d_lockref.count--;
2165     + spin_unlock(&old_mountpoint->d_lock);
2166     +
2167     + mnt_add_count(old_parent, -1);
2168     }
2169    
2170     /*
2171     * vfsmount lock must be held for write
2172     */
2173     -static void commit_tree(struct mount *mnt, struct mount *shadows)
2174     +static void commit_tree(struct mount *mnt)
2175     {
2176     struct mount *parent = mnt->mnt_parent;
2177     struct mount *m;
2178     @@ -929,7 +931,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
2179     n->mounts += n->pending_mounts;
2180     n->pending_mounts = 0;
2181    
2182     - attach_shadowed(mnt, parent, shadows);
2183     + __attach_mnt(mnt, parent);
2184     touch_mnt_namespace(n);
2185     }
2186    
2187     @@ -993,6 +995,21 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
2188     }
2189     EXPORT_SYMBOL_GPL(vfs_kern_mount);
2190    
2191     +struct vfsmount *
2192     +vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
2193     + const char *name, void *data)
2194     +{
2195     + /* Until it is worked out how to pass the user namespace
2196     + * through from the parent mount to the submount don't support
2197     + * unprivileged mounts with submounts.
2198     + */
2199     + if (mountpoint->d_sb->s_user_ns != &init_user_ns)
2200     + return ERR_PTR(-EPERM);
2201     +
2202     + return vfs_kern_mount(type, MS_SUBMOUNT, name, data);
2203     +}
2204     +EXPORT_SYMBOL_GPL(vfs_submount);
2205     +
2206     static struct mount *clone_mnt(struct mount *old, struct dentry *root,
2207     int flag)
2208     {
2209     @@ -1737,7 +1754,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
2210     continue;
2211    
2212     for (s = r; s; s = next_mnt(s, r)) {
2213     - struct mount *t = NULL;
2214     if (!(flag & CL_COPY_UNBINDABLE) &&
2215     IS_MNT_UNBINDABLE(s)) {
2216     s = skip_mnt_tree(s);
2217     @@ -1759,14 +1775,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
2218     goto out;
2219     lock_mount_hash();
2220     list_add_tail(&q->mnt_list, &res->mnt_list);
2221     - mnt_set_mountpoint(parent, p->mnt_mp, q);
2222     - if (!list_empty(&parent->mnt_mounts)) {
2223     - t = list_last_entry(&parent->mnt_mounts,
2224     - struct mount, mnt_child);
2225     - if (t->mnt_mp != p->mnt_mp)
2226     - t = NULL;
2227     - }
2228     - attach_shadowed(q, parent, t);
2229     + attach_mnt(q, parent, p->mnt_mp);
2230     unlock_mount_hash();
2231     }
2232     }
2233     @@ -1967,10 +1976,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
2234     {
2235     HLIST_HEAD(tree_list);
2236     struct mnt_namespace *ns = dest_mnt->mnt_ns;
2237     + struct mountpoint *smp;
2238     struct mount *child, *p;
2239     struct hlist_node *n;
2240     int err;
2241    
2242     + /* Preallocate a mountpoint in case the new mounts need
2243     + * to be tucked under other mounts.
2244     + */
2245     + smp = get_mountpoint(source_mnt->mnt.mnt_root);
2246     + if (IS_ERR(smp))
2247     + return PTR_ERR(smp);
2248     +
2249     /* Is there space to add these mounts to the mount namespace? */
2250     if (!parent_path) {
2251     err = count_mounts(ns, source_mnt);
2252     @@ -1997,16 +2014,19 @@ static int attach_recursive_mnt(struct mount *source_mnt,
2253     touch_mnt_namespace(source_mnt->mnt_ns);
2254     } else {
2255     mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2256     - commit_tree(source_mnt, NULL);
2257     + commit_tree(source_mnt);
2258     }
2259    
2260     hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2261     struct mount *q;
2262     hlist_del_init(&child->mnt_hash);
2263     - q = __lookup_mnt_last(&child->mnt_parent->mnt,
2264     - child->mnt_mountpoint);
2265     - commit_tree(child, q);
2266     + q = __lookup_mnt(&child->mnt_parent->mnt,
2267     + child->mnt_mountpoint);
2268     + if (q)
2269     + mnt_change_mountpoint(child, smp, q);
2270     + commit_tree(child);
2271     }
2272     + put_mountpoint(smp);
2273     unlock_mount_hash();
2274    
2275     return 0;
2276     @@ -2021,6 +2041,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
2277     cleanup_group_ids(source_mnt, NULL);
2278     out:
2279     ns->pending_mounts = 0;
2280     +
2281     + read_seqlock_excl(&mount_lock);
2282     + put_mountpoint(smp);
2283     + read_sequnlock_excl(&mount_lock);
2284     +
2285     return err;
2286     }
2287    
2288     @@ -2769,7 +2794,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2289    
2290     flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2291     MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2292     - MS_STRICTATIME | MS_NOREMOTELOCK);
2293     + MS_STRICTATIME | MS_NOREMOTELOCK | MS_SUBMOUNT);
2294    
2295     if (flags & MS_REMOUNT)
2296     retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2297     diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
2298     index 5551e8ef67fd..e49d831c4e85 100644
2299     --- a/fs/nfs/namespace.c
2300     +++ b/fs/nfs/namespace.c
2301     @@ -226,7 +226,7 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
2302     const char *devname,
2303     struct nfs_clone_mount *mountdata)
2304     {
2305     - return vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
2306     + return vfs_submount(mountdata->dentry, &nfs_xdev_fs_type, devname, mountdata);
2307     }
2308    
2309     /**
2310     diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
2311     index d21104912676..d8b040bd9814 100644
2312     --- a/fs/nfs/nfs4namespace.c
2313     +++ b/fs/nfs/nfs4namespace.c
2314     @@ -279,7 +279,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
2315     mountdata->hostname,
2316     mountdata->mnt_path);
2317    
2318     - mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, mountdata);
2319     + mnt = vfs_submount(mountdata->dentry, &nfs4_referral_fs_type, page, mountdata);
2320     if (!IS_ERR(mnt))
2321     break;
2322     }
2323     diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
2324     index c48859f16e7b..67c24351a67f 100644
2325     --- a/fs/orangefs/super.c
2326     +++ b/fs/orangefs/super.c
2327     @@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb)
2328     return &orangefs_inode->vfs_inode;
2329     }
2330    
2331     +static void orangefs_i_callback(struct rcu_head *head)
2332     +{
2333     + struct inode *inode = container_of(head, struct inode, i_rcu);
2334     + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
2335     + kmem_cache_free(orangefs_inode_cache, orangefs_inode);
2336     +}
2337     +
2338     static void orangefs_destroy_inode(struct inode *inode)
2339     {
2340     struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
2341     @@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode)
2342     "%s: deallocated %p destroying inode %pU\n",
2343     __func__, orangefs_inode, get_khandle_from_ino(inode));
2344    
2345     - kmem_cache_free(orangefs_inode_cache, orangefs_inode);
2346     + call_rcu(&inode->i_rcu, orangefs_i_callback);
2347     }
2348    
2349     /*
2350     diff --git a/fs/pnode.c b/fs/pnode.c
2351     index 234a9ac49958..b394ca5307ec 100644
2352     --- a/fs/pnode.c
2353     +++ b/fs/pnode.c
2354     @@ -324,6 +324,21 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
2355     return ret;
2356     }
2357    
2358     +static struct mount *find_topper(struct mount *mnt)
2359     +{
2360     + /* If there is exactly one mount covering mnt completely return it. */
2361     + struct mount *child;
2362     +
2363     + if (!list_is_singular(&mnt->mnt_mounts))
2364     + return NULL;
2365     +
2366     + child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
2367     + if (child->mnt_mountpoint != mnt->mnt.mnt_root)
2368     + return NULL;
2369     +
2370     + return child;
2371     +}
2372     +
2373     /*
2374     * return true if the refcount is greater than count
2375     */
2376     @@ -344,9 +359,8 @@ static inline int do_refcount_check(struct mount *mnt, int count)
2377     */
2378     int propagate_mount_busy(struct mount *mnt, int refcnt)
2379     {
2380     - struct mount *m, *child;
2381     + struct mount *m, *child, *topper;
2382     struct mount *parent = mnt->mnt_parent;
2383     - int ret = 0;
2384    
2385     if (mnt == parent)
2386     return do_refcount_check(mnt, refcnt);
2387     @@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
2388    
2389     for (m = propagation_next(parent, parent); m;
2390     m = propagation_next(m, parent)) {
2391     - child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
2392     - if (child && list_empty(&child->mnt_mounts) &&
2393     - (ret = do_refcount_check(child, 1)))
2394     - break;
2395     + int count = 1;
2396     + child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
2397     + if (!child)
2398     + continue;
2399     +
2400     + /* Is there exactly one mount on the child that covers
2401     + * it completely whose reference should be ignored?
2402     + */
2403     + topper = find_topper(child);
2404     + if (topper)
2405     + count += 1;
2406     + else if (!list_empty(&child->mnt_mounts))
2407     + continue;
2408     +
2409     + if (do_refcount_check(child, count))
2410     + return 1;
2411     }
2412     - return ret;
2413     + return 0;
2414     }
2415    
2416     /*
2417     @@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount *mnt)
2418    
2419     for (m = propagation_next(parent, parent); m;
2420     m = propagation_next(m, parent)) {
2421     - child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
2422     + child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
2423     if (child)
2424     child->mnt.mnt_flags &= ~MNT_LOCKED;
2425     }
2426     @@ -401,9 +427,11 @@ static void mark_umount_candidates(struct mount *mnt)
2427    
2428     for (m = propagation_next(parent, parent); m;
2429     m = propagation_next(m, parent)) {
2430     - struct mount *child = __lookup_mnt_last(&m->mnt,
2431     + struct mount *child = __lookup_mnt(&m->mnt,
2432     mnt->mnt_mountpoint);
2433     - if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
2434     + if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
2435     + continue;
2436     + if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
2437     SET_MNT_MARK(child);
2438     }
2439     }
2440     @@ -422,8 +450,8 @@ static void __propagate_umount(struct mount *mnt)
2441    
2442     for (m = propagation_next(parent, parent); m;
2443     m = propagation_next(m, parent)) {
2444     -
2445     - struct mount *child = __lookup_mnt_last(&m->mnt,
2446     + struct mount *topper;
2447     + struct mount *child = __lookup_mnt(&m->mnt,
2448     mnt->mnt_mountpoint);
2449     /*
2450     * umount the child only if the child has no children
2451     @@ -432,6 +460,15 @@ static void __propagate_umount(struct mount *mnt)
2452     if (!child || !IS_MNT_MARKED(child))
2453     continue;
2454     CLEAR_MNT_MARK(child);
2455     +
2456     + /* If there is exactly one mount covering all of child
2457     + * replace child with that mount.
2458     + */
2459     + topper = find_topper(child);
2460     + if (topper)
2461     + mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
2462     + topper);
2463     +
2464     if (list_empty(&child->mnt_mounts)) {
2465     list_del_init(&child->mnt_child);
2466     child->mnt.mnt_flags |= MNT_UMOUNT;
2467     diff --git a/fs/pnode.h b/fs/pnode.h
2468     index 550f5a8b4fcf..dc87e65becd2 100644
2469     --- a/fs/pnode.h
2470     +++ b/fs/pnode.h
2471     @@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt, const struct path *root);
2472     unsigned int mnt_get_count(struct mount *mnt);
2473     void mnt_set_mountpoint(struct mount *, struct mountpoint *,
2474     struct mount *);
2475     +void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
2476     + struct mount *mnt);
2477     struct mount *copy_tree(struct mount *, struct dentry *, int);
2478     bool is_path_reachable(struct mount *, struct dentry *,
2479     const struct path *root);
2480     diff --git a/fs/super.c b/fs/super.c
2481     index c183835566c1..1058bf3e8724 100644
2482     --- a/fs/super.c
2483     +++ b/fs/super.c
2484     @@ -470,7 +470,7 @@ struct super_block *sget_userns(struct file_system_type *type,
2485     struct super_block *old;
2486     int err;
2487    
2488     - if (!(flags & MS_KERNMOUNT) &&
2489     + if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) &&
2490     !(type->fs_flags & FS_USERNS_MOUNT) &&
2491     !capable(CAP_SYS_ADMIN))
2492     return ERR_PTR(-EPERM);
2493     @@ -500,7 +500,7 @@ struct super_block *sget_userns(struct file_system_type *type,
2494     }
2495     if (!s) {
2496     spin_unlock(&sb_lock);
2497     - s = alloc_super(type, flags, user_ns);
2498     + s = alloc_super(type, (flags & ~MS_SUBMOUNT), user_ns);
2499     if (!s)
2500     return ERR_PTR(-ENOMEM);
2501     goto retry;
2502     @@ -541,8 +541,15 @@ struct super_block *sget(struct file_system_type *type,
2503     {
2504     struct user_namespace *user_ns = current_user_ns();
2505    
2506     + /* We don't yet pass the user namespace of the parent
2507     + * mount through to here so always use &init_user_ns
2508     + * until that changes.
2509     + */
2510     + if (flags & MS_SUBMOUNT)
2511     + user_ns = &init_user_ns;
2512     +
2513     /* Ensure the requestor has permissions over the target filesystem */
2514     - if (!(flags & MS_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
2515     + if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) && !ns_capable(user_ns, CAP_SYS_ADMIN))
2516     return ERR_PTR(-EPERM);
2517    
2518     return sget_userns(type, test, set, flags, user_ns, data);
2519     diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
2520     index 9a9041784dcf..412906609954 100644
2521     --- a/include/linux/ceph/osdmap.h
2522     +++ b/include/linux/ceph/osdmap.h
2523     @@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
2524     case CEPH_POOL_TYPE_EC:
2525     return false;
2526     default:
2527     - BUG_ON(1);
2528     + BUG();
2529     }
2530     }
2531    
2532     diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
2533     index 1b413a9aab81..b20a0945b550 100644
2534     --- a/include/linux/debugfs.h
2535     +++ b/include/linux/debugfs.h
2536     @@ -96,9 +96,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
2537     struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
2538     const char *dest);
2539    
2540     +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
2541     struct dentry *debugfs_create_automount(const char *name,
2542     struct dentry *parent,
2543     - struct vfsmount *(*f)(void *),
2544     + debugfs_automount_t f,
2545     void *data);
2546    
2547     void debugfs_remove(struct dentry *dentry);
2548     diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
2549     index 8458c5351e56..77e7af32543f 100644
2550     --- a/include/linux/libnvdimm.h
2551     +++ b/include/linux/libnvdimm.h
2552     @@ -70,6 +70,8 @@ struct nd_cmd_desc {
2553    
2554     struct nd_interleave_set {
2555     u64 cookie;
2556     + /* compatibility with initial buggy Linux implementation */
2557     + u64 altcookie;
2558     };
2559    
2560     struct nd_mapping_desc {
2561     diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
2562     index c15373894a42..b37dee3acaba 100644
2563     --- a/include/linux/lockd/lockd.h
2564     +++ b/include/linux/lockd/lockd.h
2565     @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
2566     static inline int nlm_compare_locks(const struct file_lock *fl1,
2567     const struct file_lock *fl2)
2568     {
2569     - return fl1->fl_pid == fl2->fl_pid
2570     + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
2571     + && fl1->fl_pid == fl2->fl_pid
2572     && fl1->fl_owner == fl2->fl_owner
2573     && fl1->fl_start == fl2->fl_start
2574     && fl1->fl_end == fl2->fl_end
2575     diff --git a/include/linux/mount.h b/include/linux/mount.h
2576     index 1172cce949a4..e0f3a82eee6d 100644
2577     --- a/include/linux/mount.h
2578     +++ b/include/linux/mount.h
2579     @@ -90,6 +90,9 @@ struct file_system_type;
2580     extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
2581     int flags, const char *name,
2582     void *data);
2583     +extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
2584     + struct file_system_type *type,
2585     + const char *name, void *data);
2586    
2587     extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
2588     extern void mark_mounts_for_expiry(struct list_head *mounts);
2589     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2590     index 48bc1ac1da43..6233e8fd95b5 100644
2591     --- a/include/target/target_core_base.h
2592     +++ b/include/target/target_core_base.h
2593     @@ -732,6 +732,7 @@ struct se_lun {
2594     struct config_group lun_group;
2595     struct se_port_stat_grps port_stat_grps;
2596     struct completion lun_ref_comp;
2597     + struct completion lun_shutdown_comp;
2598     struct percpu_ref lun_ref;
2599     struct list_head lun_dev_link;
2600     struct hlist_node link;
2601     diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
2602     index acb2b6152ba0..474995568f35 100644
2603     --- a/include/uapi/linux/fs.h
2604     +++ b/include/uapi/linux/fs.h
2605     @@ -132,6 +132,7 @@ struct inodes_stat_t {
2606     #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
2607    
2608     /* These sb flags are internal to the kernel */
2609     +#define MS_SUBMOUNT (1<<26)
2610     #define MS_NOREMOTELOCK (1<<27)
2611     #define MS_NOSEC (1<<28)
2612     #define MS_BORN (1<<29)
2613     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2614     index 8696ce6bf2f6..90b66ed6f0e2 100644
2615     --- a/kernel/trace/trace.c
2616     +++ b/kernel/trace/trace.c
2617     @@ -7241,7 +7241,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2618     ftrace_init_tracefs(tr, d_tracer);
2619     }
2620    
2621     -static struct vfsmount *trace_automount(void *ingore)
2622     +static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
2623     {
2624     struct vfsmount *mnt;
2625     struct file_system_type *type;
2626     @@ -7254,7 +7254,7 @@ static struct vfsmount *trace_automount(void *ingore)
2627     type = get_fs_type("tracefs");
2628     if (!type)
2629     return NULL;
2630     - mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
2631     + mnt = vfs_submount(mntpt, type, "tracefs", NULL);
2632     put_filesystem(type);
2633     if (IS_ERR(mnt))
2634     return NULL;
2635     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2636     index 4c6ade54d833..0de26691f0f5 100644
2637     --- a/mm/memcontrol.c
2638     +++ b/mm/memcontrol.c
2639     @@ -4139,17 +4139,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
2640     kfree(memcg->nodeinfo[node]);
2641     }
2642    
2643     -static void mem_cgroup_free(struct mem_cgroup *memcg)
2644     +static void __mem_cgroup_free(struct mem_cgroup *memcg)
2645     {
2646     int node;
2647    
2648     - memcg_wb_domain_exit(memcg);
2649     for_each_node(node)
2650     free_mem_cgroup_per_node_info(memcg, node);
2651     free_percpu(memcg->stat);
2652     kfree(memcg);
2653     }
2654    
2655     +static void mem_cgroup_free(struct mem_cgroup *memcg)
2656     +{
2657     + memcg_wb_domain_exit(memcg);
2658     + __mem_cgroup_free(memcg);
2659     +}
2660     +
2661     static struct mem_cgroup *mem_cgroup_alloc(void)
2662     {
2663     struct mem_cgroup *memcg;
2664     @@ -4200,7 +4205,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
2665     fail:
2666     if (memcg->id.id > 0)
2667     idr_remove(&mem_cgroup_idr, memcg->id.id);
2668     - mem_cgroup_free(memcg);
2669     + __mem_cgroup_free(memcg);
2670     return NULL;
2671     }
2672    
2673     diff --git a/mm/mlock.c b/mm/mlock.c
2674     index cdbed8aaa426..665ab75b5533 100644
2675     --- a/mm/mlock.c
2676     +++ b/mm/mlock.c
2677     @@ -441,7 +441,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
2678    
2679     while (start < end) {
2680     struct page *page;
2681     - unsigned int page_mask;
2682     + unsigned int page_mask = 0;
2683     unsigned long page_increm;
2684     struct pagevec pvec;
2685     struct zone *zone;
2686     @@ -455,8 +455,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
2687     * suits munlock very well (and if somehow an abnormal page
2688     * has sneaked into the range, we won't oops here: great).
2689     */
2690     - page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
2691     - &page_mask);
2692     + page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
2693    
2694     if (page && !IS_ERR(page)) {
2695     if (PageTransTail(page)) {
2696     @@ -467,8 +466,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
2697     /*
2698     * Any THP page found by follow_page_mask() may
2699     * have gotten split before reaching
2700     - * munlock_vma_page(), so we need to recompute
2701     - * the page_mask here.
2702     + * munlock_vma_page(), so we need to compute
2703     + * the page_mask here instead.
2704     */
2705     page_mask = munlock_vma_page(page);
2706     unlock_page(page);
2707     diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
2708     index 3b5fd4188f2a..58ad23a44109 100644
2709     --- a/net/mac80211/agg-rx.c
2710     +++ b/net/mac80211/agg-rx.c
2711     @@ -398,6 +398,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
2712     tid_agg_rx->timeout = timeout;
2713     tid_agg_rx->stored_mpdu_num = 0;
2714     tid_agg_rx->auto_seq = auto_seq;
2715     + tid_agg_rx->started = false;
2716     tid_agg_rx->reorder_buf_filtered = 0;
2717     status = WLAN_STATUS_SUCCESS;
2718    
2719     diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
2720     index 28a3a0957c9e..76a8bcd8ef11 100644
2721     --- a/net/mac80211/pm.c
2722     +++ b/net/mac80211/pm.c
2723     @@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
2724     break;
2725     }
2726    
2727     + flush_delayed_work(&sdata->dec_tailroom_needed_wk);
2728     drv_remove_interface(local, sdata);
2729     }
2730    
2731     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2732     index 2384b4aae064..a697ddf56334 100644
2733     --- a/net/mac80211/rx.c
2734     +++ b/net/mac80211/rx.c
2735     @@ -4,7 +4,7 @@
2736     * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
2737     * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
2738     * Copyright 2013-2014 Intel Mobile Communications GmbH
2739     - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
2740     + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
2741     *
2742     * This program is free software; you can redistribute it and/or modify
2743     * it under the terms of the GNU General Public License version 2 as
2744     @@ -1034,6 +1034,18 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
2745     buf_size = tid_agg_rx->buf_size;
2746     head_seq_num = tid_agg_rx->head_seq_num;
2747    
2748     + /*
2749     + * If the current MPDU's SN is smaller than the SSN, it shouldn't
2750     + * be reordered.
2751     + */
2752     + if (unlikely(!tid_agg_rx->started)) {
2753     + if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
2754     + ret = false;
2755     + goto out;
2756     + }
2757     + tid_agg_rx->started = true;
2758     + }
2759     +
2760     /* frame with out of date sequence number */
2761     if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
2762     dev_kfree_skb(skb);
2763     @@ -4080,15 +4092,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2764     ieee80211_is_beacon(hdr->frame_control)))
2765     ieee80211_scan_rx(local, skb);
2766    
2767     - if (pubsta) {
2768     - rx.sta = container_of(pubsta, struct sta_info, sta);
2769     - rx.sdata = rx.sta->sdata;
2770     - if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2771     - return;
2772     - goto out;
2773     - } else if (ieee80211_is_data(fc)) {
2774     + if (ieee80211_is_data(fc)) {
2775     struct sta_info *sta, *prev_sta;
2776    
2777     + if (pubsta) {
2778     + rx.sta = container_of(pubsta, struct sta_info, sta);
2779     + rx.sdata = rx.sta->sdata;
2780     + if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2781     + return;
2782     + goto out;
2783     + }
2784     +
2785     prev_sta = NULL;
2786    
2787     for_each_sta_info(local, hdr->addr2, sta, tmp) {
2788     diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
2789     index dd06ef0b8861..15599c70a38f 100644
2790     --- a/net/mac80211/sta_info.h
2791     +++ b/net/mac80211/sta_info.h
2792     @@ -189,6 +189,7 @@ struct tid_ampdu_tx {
2793     * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
2794     * and ssn.
2795     * @removed: this session is removed (but might have been found due to RCU)
2796     + * @started: this session has started (head ssn or higher was received)
2797     *
2798     * This structure's lifetime is managed by RCU, assignments to
2799     * the array holding it must hold the aggregation mutex.
2800     @@ -212,8 +213,9 @@ struct tid_ampdu_rx {
2801     u16 ssn;
2802     u16 buf_size;
2803     u16 timeout;
2804     - bool auto_seq;
2805     - bool removed;
2806     + u8 auto_seq:1,
2807     + removed:1,
2808     + started:1;
2809     };
2810    
2811     /**
2812     diff --git a/net/mac80211/status.c b/net/mac80211/status.c
2813     index ddf71c648cab..ad37b4e58c2f 100644
2814     --- a/net/mac80211/status.c
2815     +++ b/net/mac80211/status.c
2816     @@ -51,7 +51,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
2817     struct ieee80211_hdr *hdr = (void *)skb->data;
2818     int ac;
2819    
2820     - if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
2821     + if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
2822     + IEEE80211_TX_CTL_AMPDU)) {
2823     ieee80211_free_txskb(&local->hw, skb);
2824     return;
2825     }
2826     diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
2827     index d08e214ec6e7..223d88e25e05 100755
2828     --- a/tools/testing/ktest/ktest.pl
2829     +++ b/tools/testing/ktest/ktest.pl
2830     @@ -2629,7 +2629,7 @@ sub do_run_test {
2831     }
2832    
2833     waitpid $child_pid, 0;
2834     - $child_exit = $?;
2835     + $child_exit = $? >> 8;
2836    
2837     my $end_time = time;
2838     $test_time = $end_time - $start_time;