Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0153-4.4.54-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2889 - (hide annotations) (download)
Mon Mar 27 13:49:26 2017 UTC (7 years, 2 months ago) by niro
File size: 53714 byte(s)
linux-4.4.54
1 niro 2889 diff --git a/Makefile b/Makefile
2     index 10aec937e9e4..7f54ac081cf3 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 53
9     +SUBLEVEL = 54
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
14     index c1ea67db8404..c61ed7890cef 100644
15     --- a/arch/s390/include/asm/processor.h
16     +++ b/arch/s390/include/asm/processor.h
17     @@ -74,7 +74,8 @@ extern void execve_tail(void);
18     * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
19     */
20    
21     -#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
22     +#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
23     + (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
24     #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
25     (1UL << 30) : (1UL << 41))
26     #define TASK_SIZE TASK_SIZE_OF(current)
27     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
28     index 1f581eb61bc2..d097d71685df 100644
29     --- a/arch/s390/kernel/setup.c
30     +++ b/arch/s390/kernel/setup.c
31     @@ -805,10 +805,10 @@ static void __init setup_randomness(void)
32     {
33     struct sysinfo_3_2_2 *vmms;
34    
35     - vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
36     - if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
37     - add_device_randomness(&vmms, vmms->count);
38     - free_page((unsigned long) vmms);
39     + vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
40     + if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
41     + add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
42     + memblock_free((unsigned long) vmms, PAGE_SIZE);
43     }
44    
45     /*
46     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
47     index 575dc123bda2..23e3f5d77a24 100644
48     --- a/arch/s390/kvm/kvm-s390.c
49     +++ b/arch/s390/kvm/kvm-s390.c
50     @@ -295,6 +295,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
51     struct kvm_memory_slot *memslot;
52     int is_dirty = 0;
53    
54     + if (kvm_is_ucontrol(kvm))
55     + return -EINVAL;
56     +
57     mutex_lock(&kvm->slots_lock);
58    
59     r = -EINVAL;
60     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
61     index 64f60a48def1..3a7ae80dc49d 100644
62     --- a/arch/x86/kvm/vmx.c
63     +++ b/arch/x86/kvm/vmx.c
64     @@ -3499,7 +3499,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
65     }
66    
67     vmcs_write16(sf->selector, var.selector);
68     - vmcs_write32(sf->base, var.base);
69     + vmcs_writel(sf->base, var.base);
70     vmcs_write32(sf->limit, var.limit);
71     vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
72     }
73     @@ -7905,7 +7905,7 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
74     static void vmx_dump_sel(char *name, uint32_t sel)
75     {
76     pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
77     - name, vmcs_read32(sel),
78     + name, vmcs_read16(sel),
79     vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
80     vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
81     vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
82     diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
83     index 9735691f37f1..49ccbd9022f6 100644
84     --- a/arch/xtensa/kernel/setup.c
85     +++ b/arch/xtensa/kernel/setup.c
86     @@ -133,6 +133,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
87    
88     __tagtable(BP_TAG_INITRD, parse_tag_initrd);
89    
90     +#endif /* CONFIG_BLK_DEV_INITRD */
91     +
92     #ifdef CONFIG_OF
93    
94     static int __init parse_tag_fdt(const bp_tag_t *tag)
95     @@ -145,8 +147,6 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
96    
97     #endif /* CONFIG_OF */
98    
99     -#endif /* CONFIG_BLK_DEV_INITRD */
100     -
101     static int __init parse_tag_cmdline(const bp_tag_t* tag)
102     {
103     strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
104     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
105     index 0beaa52df66b..5df8e1234505 100644
106     --- a/drivers/bluetooth/ath3k.c
107     +++ b/drivers/bluetooth/ath3k.c
108     @@ -94,6 +94,7 @@ static const struct usb_device_id ath3k_table[] = {
109     { USB_DEVICE(0x04CA, 0x300f) },
110     { USB_DEVICE(0x04CA, 0x3010) },
111     { USB_DEVICE(0x04CA, 0x3014) },
112     + { USB_DEVICE(0x04CA, 0x3018) },
113     { USB_DEVICE(0x0930, 0x0219) },
114     { USB_DEVICE(0x0930, 0x021c) },
115     { USB_DEVICE(0x0930, 0x0220) },
116     @@ -160,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
117     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
118     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
119     { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
120     + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
121     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
122     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
123     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
124     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
125     index c306b483de60..cd6b141b9825 100644
126     --- a/drivers/bluetooth/btusb.c
127     +++ b/drivers/bluetooth/btusb.c
128     @@ -208,6 +208,7 @@ static const struct usb_device_id blacklist_table[] = {
129     { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
130     { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
131     { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
132     + { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
133     { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
134     { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
135     { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
136     diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
137     index c161eeda417b..267749a94c5a 100644
138     --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
139     +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
140     @@ -3704,9 +3704,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
141     default:
142     encoder->possible_crtcs = 0x3;
143     break;
144     + case 3:
145     + encoder->possible_crtcs = 0x7;
146     + break;
147     case 4:
148     encoder->possible_crtcs = 0xf;
149     break;
150     + case 5:
151     + encoder->possible_crtcs = 0x1f;
152     + break;
153     case 6:
154     encoder->possible_crtcs = 0x3f;
155     break;
156     diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
157     index 810c51d92b99..30672a3df8a9 100644
158     --- a/drivers/gpu/drm/ast/ast_post.c
159     +++ b/drivers/gpu/drm/ast/ast_post.c
160     @@ -58,13 +58,9 @@ bool ast_is_vga_enabled(struct drm_device *dev)
161     /* TODO 1180 */
162     } else {
163     ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
164     - if (ch) {
165     - ast_open_key(ast);
166     - ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
167     - return ch & 0x04;
168     - }
169     + return !!(ch & 0x01);
170     }
171     - return 0;
172     + return false;
173     }
174    
175     static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
176     @@ -375,8 +371,8 @@ void ast_post_gpu(struct drm_device *dev)
177     pci_write_config_dword(ast->dev->pdev, 0x04, reg);
178    
179     ast_enable_vga(dev);
180     - ast_enable_mmio(dev);
181     ast_open_key(ast);
182     + ast_enable_mmio(dev);
183     ast_set_def_ext_reg(dev);
184    
185     if (ast->chip == AST2300 || ast->chip == AST2400)
186     @@ -1630,12 +1626,44 @@ static void ast_init_dram_2300(struct drm_device *dev)
187     temp |= 0x73;
188     ast_write32(ast, 0x12008, temp);
189    
190     + param.dram_freq = 396;
191     param.dram_type = AST_DDR3;
192     + temp = ast_mindwm(ast, 0x1e6e2070);
193     if (temp & 0x01000000)
194     param.dram_type = AST_DDR2;
195     - param.dram_chipid = ast->dram_type;
196     - param.dram_freq = ast->mclk;
197     - param.vram_size = ast->vram_size;
198     + switch (temp & 0x18000000) {
199     + case 0:
200     + param.dram_chipid = AST_DRAM_512Mx16;
201     + break;
202     + default:
203     + case 0x08000000:
204     + param.dram_chipid = AST_DRAM_1Gx16;
205     + break;
206     + case 0x10000000:
207     + param.dram_chipid = AST_DRAM_2Gx16;
208     + break;
209     + case 0x18000000:
210     + param.dram_chipid = AST_DRAM_4Gx16;
211     + break;
212     + }
213     + switch (temp & 0x0c) {
214     + default:
215     + case 0x00:
216     + param.vram_size = AST_VIDMEM_SIZE_8M;
217     + break;
218     +
219     + case 0x04:
220     + param.vram_size = AST_VIDMEM_SIZE_16M;
221     + break;
222     +
223     + case 0x08:
224     + param.vram_size = AST_VIDMEM_SIZE_32M;
225     + break;
226     +
227     + case 0x0c:
228     + param.vram_size = AST_VIDMEM_SIZE_64M;
229     + break;
230     + }
231    
232     if (param.dram_type == AST_DDR3) {
233     get_ddr3_info(ast, &param);
234     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
235     index 1ac29d703c12..ea443fafb934 100644
236     --- a/drivers/gpu/drm/drm_atomic_helper.c
237     +++ b/drivers/gpu/drm/drm_atomic_helper.c
238     @@ -265,7 +265,7 @@ mode_fixup(struct drm_atomic_state *state)
239     struct drm_connector *connector;
240     struct drm_connector_state *conn_state;
241     int i;
242     - bool ret;
243     + int ret;
244    
245     for_each_crtc_in_state(state, crtc, crtc_state, i) {
246     if (!crtc_state->mode_changed &&
247     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
248     index 8c9ac021608f..cc1e16fd7e76 100644
249     --- a/drivers/gpu/drm/drm_edid.c
250     +++ b/drivers/gpu/drm/drm_edid.c
251     @@ -144,6 +144,9 @@ static struct edid_quirk {
252    
253     /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
254     { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
255     +
256     + /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
257     + { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
258     };
259    
260     /*
261     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
262     index 3f802163f7d4..e7c18519274a 100644
263     --- a/drivers/gpu/drm/i915/intel_pm.c
264     +++ b/drivers/gpu/drm/i915/intel_pm.c
265     @@ -6803,7 +6803,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
266    
267     static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
268     {
269     - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
270     + u32 val;
271     +
272     + /*
273     + * On driver load, a pipe may be active and driving a DSI display.
274     + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
275     + * (and never recovering) in this case. intel_dsi_post_disable() will
276     + * clear it when we turn off the display.
277     + */
278     + val = I915_READ(DSPCLK_GATE_D);
279     + val &= DPOUNIT_CLOCK_GATE_DISABLE;
280     + val |= VRHUNIT_CLOCK_GATE_DISABLE;
281     + I915_WRITE(DSPCLK_GATE_D, val);
282    
283     /*
284     * Disable trickle feed and enable pnd deadline calculation
285     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
286     index 4ae8b56b1847..037c38bb5333 100644
287     --- a/drivers/gpu/drm/ttm/ttm_bo.c
288     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
289     @@ -1621,7 +1621,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
290     struct ttm_buffer_object *bo;
291     int ret = -EBUSY;
292     int put_count;
293     - uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
294    
295     spin_lock(&glob->lru_lock);
296     list_for_each_entry(bo, &glob->swap_lru, swap) {
297     @@ -1657,7 +1656,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
298     if (unlikely(ret != 0))
299     goto out;
300    
301     - if ((bo->mem.placement & swap_placement) != swap_placement) {
302     + if (bo->mem.mem_type != TTM_PL_SYSTEM ||
303     + bo->ttm->caching_state != tt_cached) {
304     struct ttm_mem_reg evict_mem;
305    
306     evict_mem = bo->mem;
307     diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
308     index 89fd0113aa5c..57c191798699 100644
309     --- a/drivers/hv/hv.c
310     +++ b/drivers/hv/hv.c
311     @@ -219,7 +219,7 @@ int hv_init(void)
312     /* See if the hypercall page is already set */
313     rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
314    
315     - virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
316     + virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
317    
318     if (!virtaddr)
319     goto cleanup;
320     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
321     index 3ba7de5f9379..2018d24344de 100644
322     --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
323     +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
324     @@ -1488,12 +1488,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
325    
326     ret = ipoib_set_mode(dev, buf);
327    
328     - rtnl_unlock();
329     -
330     - if (!ret)
331     - return count;
332     + /* The assumption is that the function ipoib_set_mode returned
333     + * with the rtnl held by it, if not the value -EBUSY returned,
334     + * then no need to rtnl_unlock
335     + */
336     + if (ret != -EBUSY)
337     + rtnl_unlock();
338    
339     - return ret;
340     + return (!ret || ret == -EBUSY) ? count : ret;
341     }
342    
343     static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
344     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
345     index 8a4d10452d61..8efcff1beb8f 100644
346     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
347     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
348     @@ -464,8 +464,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
349     priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
350    
351     ipoib_flush_paths(dev);
352     - rtnl_lock();
353     - return 0;
354     + return (!rtnl_trylock()) ? -EBUSY : 0;
355     }
356    
357     if (!strcmp(buf, "datagram\n")) {
358     @@ -474,8 +473,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
359     dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
360     rtnl_unlock();
361     ipoib_flush_paths(dev);
362     - rtnl_lock();
363     - return 0;
364     + return (!rtnl_trylock()) ? -EBUSY : 0;
365     }
366    
367     return -EINVAL;
368     @@ -628,6 +626,14 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
369     spin_unlock_irq(&priv->lock);
370     }
371    
372     +static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
373     +{
374     + struct ipoib_pseudo_header *phdr;
375     +
376     + phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
377     + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
378     +}
379     +
380     void ipoib_flush_paths(struct net_device *dev)
381     {
382     struct ipoib_dev_priv *priv = netdev_priv(dev);
383     @@ -852,8 +858,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
384     }
385     if (skb_queue_len(&neigh->queue) <
386     IPOIB_MAX_PATH_REC_QUEUE) {
387     - /* put pseudoheader back on for next time */
388     - skb_push(skb, IPOIB_PSEUDO_LEN);
389     + push_pseudo_header(skb, neigh->daddr);
390     __skb_queue_tail(&neigh->queue, skb);
391     } else {
392     ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
393     @@ -871,10 +876,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
394    
395     if (!path->query && path_rec_start(dev, path))
396     goto err_path;
397     - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
398     + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
399     + push_pseudo_header(skb, neigh->daddr);
400     __skb_queue_tail(&neigh->queue, skb);
401     - else
402     + } else {
403     goto err_drop;
404     + }
405     }
406    
407     spin_unlock_irqrestore(&priv->lock, flags);
408     @@ -910,8 +917,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
409     }
410     if (path) {
411     if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
412     - /* put pseudoheader back on for next time */
413     - skb_push(skb, IPOIB_PSEUDO_LEN);
414     + push_pseudo_header(skb, phdr->hwaddr);
415     __skb_queue_tail(&path->queue, skb);
416     } else {
417     ++dev->stats.tx_dropped;
418     @@ -943,8 +949,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
419     return;
420     } else if ((path->query || !path_rec_start(dev, path)) &&
421     skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
422     - /* put pseudoheader back on for next time */
423     - skb_push(skb, IPOIB_PSEUDO_LEN);
424     + push_pseudo_header(skb, phdr->hwaddr);
425     __skb_queue_tail(&path->queue, skb);
426     } else {
427     ++dev->stats.tx_dropped;
428     @@ -1025,8 +1030,7 @@ send_using_neigh:
429     }
430    
431     if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
432     - /* put pseudoheader back on for next time */
433     - skb_push(skb, sizeof(*phdr));
434     + push_pseudo_header(skb, phdr->hwaddr);
435     spin_lock_irqsave(&priv->lock, flags);
436     __skb_queue_tail(&neigh->queue, skb);
437     spin_unlock_irqrestore(&priv->lock, flags);
438     @@ -1058,7 +1062,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
439     unsigned short type,
440     const void *daddr, const void *saddr, unsigned len)
441     {
442     - struct ipoib_pseudo_header *phdr;
443     struct ipoib_header *header;
444    
445     header = (struct ipoib_header *) skb_push(skb, sizeof *header);
446     @@ -1071,8 +1074,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
447     * destination address into skb hard header so we can figure out where
448     * to send the packet later.
449     */
450     - phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
451     - memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
452     + push_pseudo_header(skb, daddr);
453    
454     return IPOIB_HARD_LEN;
455     }
456     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
457     index 5f0f4fc58f43..e397f1b0af09 100644
458     --- a/drivers/infiniband/ulp/srp/ib_srp.c
459     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
460     @@ -1787,17 +1787,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
461     if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
462     spin_lock_irqsave(&ch->lock, flags);
463     ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
464     + if (rsp->tag == ch->tsk_mgmt_tag) {
465     + ch->tsk_mgmt_status = -1;
466     + if (be32_to_cpu(rsp->resp_data_len) >= 4)
467     + ch->tsk_mgmt_status = rsp->data[3];
468     + complete(&ch->tsk_mgmt_done);
469     + } else {
470     + shost_printk(KERN_ERR, target->scsi_host,
471     + "Received tsk mgmt response too late for tag %#llx\n",
472     + rsp->tag);
473     + }
474     spin_unlock_irqrestore(&ch->lock, flags);
475     -
476     - ch->tsk_mgmt_status = -1;
477     - if (be32_to_cpu(rsp->resp_data_len) >= 4)
478     - ch->tsk_mgmt_status = rsp->data[3];
479     - complete(&ch->tsk_mgmt_done);
480     } else {
481     scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
482     - if (scmnd) {
483     + if (scmnd && scmnd->host_scribble) {
484     req = (void *)scmnd->host_scribble;
485     scmnd = srp_claim_req(ch, req, NULL, scmnd);
486     + } else {
487     + scmnd = NULL;
488     }
489     if (!scmnd) {
490     shost_printk(KERN_ERR, target->scsi_host,
491     @@ -2469,19 +2476,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
492     }
493    
494     static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
495     - u8 func)
496     + u8 func, u8 *status)
497     {
498     struct srp_target_port *target = ch->target;
499     struct srp_rport *rport = target->rport;
500     struct ib_device *dev = target->srp_host->srp_dev->dev;
501     struct srp_iu *iu;
502     struct srp_tsk_mgmt *tsk_mgmt;
503     + int res;
504    
505     if (!ch->connected || target->qp_in_error)
506     return -1;
507    
508     - init_completion(&ch->tsk_mgmt_done);
509     -
510     /*
511     * Lock the rport mutex to avoid that srp_create_ch_ib() is
512     * invoked while a task management function is being sent.
513     @@ -2504,10 +2510,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
514    
515     tsk_mgmt->opcode = SRP_TSK_MGMT;
516     int_to_scsilun(lun, &tsk_mgmt->lun);
517     - tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
518     tsk_mgmt->tsk_mgmt_func = func;
519     tsk_mgmt->task_tag = req_tag;
520    
521     + spin_lock_irq(&ch->lock);
522     + ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
523     + tsk_mgmt->tag = ch->tsk_mgmt_tag;
524     + spin_unlock_irq(&ch->lock);
525     +
526     + init_completion(&ch->tsk_mgmt_done);
527     +
528     ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
529     DMA_TO_DEVICE);
530     if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
531     @@ -2516,13 +2528,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
532    
533     return -1;
534     }
535     + res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
536     + msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
537     + if (res > 0 && status)
538     + *status = ch->tsk_mgmt_status;
539     mutex_unlock(&rport->mutex);
540    
541     - if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
542     - msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
543     - return -1;
544     + WARN_ON_ONCE(res < 0);
545    
546     - return 0;
547     + return res > 0 ? 0 : -1;
548     }
549    
550     static int srp_abort(struct scsi_cmnd *scmnd)
551     @@ -2548,7 +2562,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
552     shost_printk(KERN_ERR, target->scsi_host,
553     "Sending SRP abort for tag %#x\n", tag);
554     if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
555     - SRP_TSK_ABORT_TASK) == 0)
556     + SRP_TSK_ABORT_TASK, NULL) == 0)
557     ret = SUCCESS;
558     else if (target->rport->state == SRP_RPORT_LOST)
559     ret = FAST_IO_FAIL;
560     @@ -2566,14 +2580,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
561     struct srp_target_port *target = host_to_target(scmnd->device->host);
562     struct srp_rdma_ch *ch;
563     int i;
564     + u8 status;
565    
566     shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
567    
568     ch = &target->ch[0];
569     if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
570     - SRP_TSK_LUN_RESET))
571     + SRP_TSK_LUN_RESET, &status))
572     return FAILED;
573     - if (ch->tsk_mgmt_status)
574     + if (status)
575     return FAILED;
576    
577     for (i = 0; i < target->ch_count; i++) {
578     diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
579     index f6af531f9f32..109eea94d0f9 100644
580     --- a/drivers/infiniband/ulp/srp/ib_srp.h
581     +++ b/drivers/infiniband/ulp/srp/ib_srp.h
582     @@ -168,6 +168,7 @@ struct srp_rdma_ch {
583     int max_ti_iu_len;
584     int comp_vector;
585    
586     + u64 tsk_mgmt_tag;
587     struct completion tsk_mgmt_done;
588     u8 tsk_mgmt_status;
589     bool connected;
590     diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
591     index 25aba9886990..0e67145bc418 100644
592     --- a/drivers/net/ethernet/marvell/mvpp2.c
593     +++ b/drivers/net/ethernet/marvell/mvpp2.c
594     @@ -993,7 +993,7 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
595     txq_pcpu->buffs + txq_pcpu->txq_put_index;
596     tx_buf->skb = skb;
597     tx_buf->size = tx_desc->data_size;
598     - tx_buf->phys = tx_desc->buf_phys_addr;
599     + tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
600     txq_pcpu->txq_put_index++;
601     if (txq_pcpu->txq_put_index == txq_pcpu->size)
602     txq_pcpu->txq_put_index = 0;
603     diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
604     index 860d4aed8274..43617ded3773 100644
605     --- a/drivers/net/ieee802154/fakelb.c
606     +++ b/drivers/net/ieee802154/fakelb.c
607     @@ -30,7 +30,7 @@
608     static int numlbs = 2;
609    
610     static LIST_HEAD(fakelb_phys);
611     -static DEFINE_SPINLOCK(fakelb_phys_lock);
612     +static DEFINE_MUTEX(fakelb_phys_lock);
613    
614     static LIST_HEAD(fakelb_ifup_phys);
615     static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
616     @@ -180,9 +180,9 @@ static int fakelb_add_one(struct device *dev)
617     if (err)
618     goto err_reg;
619    
620     - spin_lock(&fakelb_phys_lock);
621     + mutex_lock(&fakelb_phys_lock);
622     list_add_tail(&phy->list, &fakelb_phys);
623     - spin_unlock(&fakelb_phys_lock);
624     + mutex_unlock(&fakelb_phys_lock);
625    
626     return 0;
627    
628     @@ -214,10 +214,10 @@ static int fakelb_probe(struct platform_device *pdev)
629     return 0;
630    
631     err_slave:
632     - spin_lock(&fakelb_phys_lock);
633     + mutex_lock(&fakelb_phys_lock);
634     list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
635     fakelb_del(phy);
636     - spin_unlock(&fakelb_phys_lock);
637     + mutex_unlock(&fakelb_phys_lock);
638     return err;
639     }
640    
641     @@ -225,10 +225,10 @@ static int fakelb_remove(struct platform_device *pdev)
642     {
643     struct fakelb_phy *phy, *tmp;
644    
645     - spin_lock(&fakelb_phys_lock);
646     + mutex_lock(&fakelb_phys_lock);
647     list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
648     fakelb_del(phy);
649     - spin_unlock(&fakelb_phys_lock);
650     + mutex_unlock(&fakelb_phys_lock);
651     return 0;
652     }
653    
654     diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
655     index 117fccf7934a..01a6a83f625d 100644
656     --- a/drivers/pwm/pwm-pca9685.c
657     +++ b/drivers/pwm/pwm-pca9685.c
658     @@ -65,7 +65,6 @@
659     #define PCA9685_MAXCHAN 0x10
660    
661     #define LED_FULL (1 << 4)
662     -#define MODE1_RESTART (1 << 7)
663     #define MODE1_SLEEP (1 << 4)
664     #define MODE2_INVRT (1 << 4)
665     #define MODE2_OUTDRV (1 << 2)
666     @@ -117,16 +116,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
667     udelay(500);
668    
669     pca->period_ns = period_ns;
670     -
671     - /*
672     - * If the duty cycle did not change, restart PWM with
673     - * the same duty cycle to period ratio and return.
674     - */
675     - if (duty_ns == pca->duty_ns) {
676     - regmap_update_bits(pca->regmap, PCA9685_MODE1,
677     - MODE1_RESTART, 0x1);
678     - return 0;
679     - }
680     } else {
681     dev_err(chip->dev,
682     "prescaler not set: period out of bounds!\n");
683     diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
684     index 94a8f4ab57bc..ae1dc37e4068 100644
685     --- a/drivers/s390/block/dcssblk.c
686     +++ b/drivers/s390/block/dcssblk.c
687     @@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
688     dev_info = bdev->bd_disk->private_data;
689     if (!dev_info)
690     return -ENODEV;
691     - dev_sz = dev_info->end - dev_info->start;
692     + dev_sz = dev_info->end - dev_info->start + 1;
693     offset = secnum * 512;
694     addr = (void *) (dev_info->start + offset);
695     *pfn = virt_to_phys(addr) >> PAGE_SHIFT;
696     diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
697     index 5d06253c2a7a..30e9fbbff051 100644
698     --- a/drivers/s390/cio/qdio_thinint.c
699     +++ b/drivers/s390/cio/qdio_thinint.c
700     @@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
701     struct qdio_q *q;
702     int i;
703    
704     - for_each_input_queue(irq, q, i) {
705     - if (!references_shared_dsci(irq) &&
706     - has_multiple_inq_on_dsci(irq))
707     - xchg(q->irq_ptr->dsci, 0);
708     + if (!references_shared_dsci(irq) &&
709     + has_multiple_inq_on_dsci(irq))
710     + xchg(irq->dsci, 0);
711    
712     + for_each_input_queue(irq, q, i) {
713     if (q->u.in.queue_start_poll) {
714     /* skip if polling is enabled or already in work */
715     if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
716     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
717     index 356c80fbb304..bb6a6c35324a 100644
718     --- a/drivers/target/target_core_device.c
719     +++ b/drivers/target/target_core_device.c
720     @@ -77,12 +77,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
721     &deve->read_bytes);
722    
723     se_lun = rcu_dereference(deve->se_lun);
724     +
725     + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
726     + se_lun = NULL;
727     + goto out_unlock;
728     + }
729     +
730     se_cmd->se_lun = rcu_dereference(deve->se_lun);
731     se_cmd->pr_res_key = deve->pr_res_key;
732     se_cmd->orig_fe_lun = unpacked_lun;
733     se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
734     -
735     - percpu_ref_get(&se_lun->lun_ref);
736     se_cmd->lun_ref_active = true;
737    
738     if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
739     @@ -96,6 +100,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
740     goto ref_dev;
741     }
742     }
743     +out_unlock:
744     rcu_read_unlock();
745    
746     if (!se_lun) {
747     @@ -826,6 +831,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
748     xcopy_lun = &dev->xcopy_lun;
749     rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
750     init_completion(&xcopy_lun->lun_ref_comp);
751     + init_completion(&xcopy_lun->lun_shutdown_comp);
752     INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
753     INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
754     mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
755     diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
756     index 028854cda97b..2794c6ec5c3c 100644
757     --- a/drivers/target/target_core_tpg.c
758     +++ b/drivers/target/target_core_tpg.c
759     @@ -539,7 +539,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
760     {
761     struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
762    
763     - complete(&lun->lun_ref_comp);
764     + complete(&lun->lun_shutdown_comp);
765     }
766    
767     int core_tpg_register(
768     @@ -666,6 +666,7 @@ struct se_lun *core_tpg_alloc_lun(
769     lun->lun_link_magic = SE_LUN_LINK_MAGIC;
770     atomic_set(&lun->lun_acl_count, 0);
771     init_completion(&lun->lun_ref_comp);
772     + init_completion(&lun->lun_shutdown_comp);
773     INIT_LIST_HEAD(&lun->lun_deve_list);
774     INIT_LIST_HEAD(&lun->lun_dev_link);
775     atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
776     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
777     index befe22744802..df2059984e14 100644
778     --- a/drivers/target/target_core_transport.c
779     +++ b/drivers/target/target_core_transport.c
780     @@ -2680,10 +2680,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
781     }
782     EXPORT_SYMBOL(target_wait_for_sess_cmds);
783    
784     +static void target_lun_confirm(struct percpu_ref *ref)
785     +{
786     + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
787     +
788     + complete(&lun->lun_ref_comp);
789     +}
790     +
791     void transport_clear_lun_ref(struct se_lun *lun)
792     {
793     - percpu_ref_kill(&lun->lun_ref);
794     + /*
795     + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
796     + * the initial reference and schedule confirm kill to be
797     + * executed after one full RCU grace period has completed.
798     + */
799     + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
800     + /*
801     + * The first completion waits for percpu_ref_switch_to_atomic_rcu()
802     + * to call target_lun_confirm after lun->lun_ref has been marked
803     + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
804     + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
805     + * fails for all new incoming I/O.
806     + */
807     wait_for_completion(&lun->lun_ref_comp);
808     + /*
809     + * The second completion waits for percpu_ref_put_many() to
810     + * invoke ->release() after lun->lun_ref has switched to
811     + * atomic_t mode, and lun->lun_ref.count has reached zero.
812     + *
813     + * At this point all target-core lun->lun_ref references have
814     + * been dropped via transport_lun_remove_cmd(), and it's safe
815     + * to proceed with the remaining LUN shutdown.
816     + */
817     + wait_for_completion(&lun->lun_shutdown_comp);
818     }
819    
820     static bool
821     diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
822     index 644ddb841d9f..6d1e2f746ab4 100644
823     --- a/drivers/tty/n_hdlc.c
824     +++ b/drivers/tty/n_hdlc.c
825     @@ -114,7 +114,7 @@
826     #define DEFAULT_TX_BUF_COUNT 3
827    
828     struct n_hdlc_buf {
829     - struct n_hdlc_buf *link;
830     + struct list_head list_item;
831     int count;
832     char buf[1];
833     };
834     @@ -122,8 +122,7 @@ struct n_hdlc_buf {
835     #define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
836    
837     struct n_hdlc_buf_list {
838     - struct n_hdlc_buf *head;
839     - struct n_hdlc_buf *tail;
840     + struct list_head list;
841     int count;
842     spinlock_t spinlock;
843     };
844     @@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
845     * @backup_tty - TTY to use if tty gets closed
846     * @tbusy - reentrancy flag for tx wakeup code
847     * @woke_up - FIXME: describe this field
848     - * @tbuf - currently transmitting tx buffer
849     * @tx_buf_list - list of pending transmit frame buffers
850     * @rx_buf_list - list of received frame buffers
851     * @tx_free_buf_list - list unused transmit frame buffers
852     @@ -149,7 +147,6 @@ struct n_hdlc {
853     struct tty_struct *backup_tty;
854     int tbusy;
855     int woke_up;
856     - struct n_hdlc_buf *tbuf;
857     struct n_hdlc_buf_list tx_buf_list;
858     struct n_hdlc_buf_list rx_buf_list;
859     struct n_hdlc_buf_list tx_free_buf_list;
860     @@ -159,7 +156,8 @@ struct n_hdlc {
861     /*
862     * HDLC buffer list manipulation functions
863     */
864     -static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
865     +static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
866     + struct n_hdlc_buf *buf);
867     static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
868     struct n_hdlc_buf *buf);
869     static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
870     @@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
871     {
872     struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
873     struct n_hdlc_buf *buf;
874     - unsigned long flags;
875    
876     while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
877     n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
878     - spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
879     - if (n_hdlc->tbuf) {
880     - n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
881     - n_hdlc->tbuf = NULL;
882     - }
883     - spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
884     }
885    
886     static struct tty_ldisc_ops n_hdlc_ldisc = {
887     @@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
888     } else
889     break;
890     }
891     - kfree(n_hdlc->tbuf);
892     kfree(n_hdlc);
893    
894     } /* end of n_hdlc_release() */
895     @@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
896     n_hdlc->woke_up = 0;
897     spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
898    
899     - /* get current transmit buffer or get new transmit */
900     - /* buffer from list of pending transmit buffers */
901     -
902     - tbuf = n_hdlc->tbuf;
903     - if (!tbuf)
904     - tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
905     -
906     + tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
907     while (tbuf) {
908     if (debuglevel >= DEBUG_LEVEL_INFO)
909     printk("%s(%d)sending frame %p, count=%d\n",
910     @@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
911    
912     /* rollback was possible and has been done */
913     if (actual == -ERESTARTSYS) {
914     - n_hdlc->tbuf = tbuf;
915     + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
916     break;
917     }
918     /* if transmit error, throw frame away by */
919     @@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
920    
921     /* free current transmit buffer */
922     n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
923     -
924     - /* this tx buffer is done */
925     - n_hdlc->tbuf = NULL;
926     -
927     +
928     /* wait up sleeping writers */
929     wake_up_interruptible(&tty->write_wait);
930    
931     @@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
932     if (debuglevel >= DEBUG_LEVEL_INFO)
933     printk("%s(%d)frame %p pending\n",
934     __FILE__,__LINE__,tbuf);
935     -
936     - /* buffer not accepted by driver */
937     - /* set this buffer as pending buffer */
938     - n_hdlc->tbuf = tbuf;
939     +
940     + /*
941     + * the buffer was not accepted by driver,
942     + * return it back into tx queue
943     + */
944     + n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
945     break;
946     }
947     }
948     @@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
949     int error = 0;
950     int count;
951     unsigned long flags;
952     -
953     + struct n_hdlc_buf *buf = NULL;
954     +
955     if (debuglevel >= DEBUG_LEVEL_INFO)
956     printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
957     __FILE__,__LINE__,cmd);
958     @@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
959     /* report count of read data available */
960     /* in next available frame (if any) */
961     spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
962     - if (n_hdlc->rx_buf_list.head)
963     - count = n_hdlc->rx_buf_list.head->count;
964     + buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
965     + struct n_hdlc_buf, list_item);
966     + if (buf)
967     + count = buf->count;
968     else
969     count = 0;
970     spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
971     @@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
972     count = tty_chars_in_buffer(tty);
973     /* add size of next output frame in queue */
974     spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
975     - if (n_hdlc->tx_buf_list.head)
976     - count += n_hdlc->tx_buf_list.head->count;
977     + buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
978     + struct n_hdlc_buf, list_item);
979     + if (buf)
980     + count += buf->count;
981     spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
982     error = put_user(count, (int __user *)arg);
983     break;
984     @@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
985     poll_wait(filp, &tty->write_wait, wait);
986    
987     /* set bits for operations that won't block */
988     - if (n_hdlc->rx_buf_list.head)
989     + if (!list_empty(&n_hdlc->rx_buf_list.list))
990     mask |= POLLIN | POLLRDNORM; /* readable */
991     if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
992     mask |= POLLHUP;
993     if (tty_hung_up_p(filp))
994     mask |= POLLHUP;
995     if (!tty_is_writelocked(tty) &&
996     - n_hdlc->tx_free_buf_list.head)
997     + !list_empty(&n_hdlc->tx_free_buf_list.list))
998     mask |= POLLOUT | POLLWRNORM; /* writable */
999     }
1000     return mask;
1001     @@ -853,11 +841,16 @@ static struct n_hdlc *n_hdlc_alloc(void)
1002     if (!n_hdlc)
1003     return NULL;
1004    
1005     - n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
1006     - n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
1007     - n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
1008     - n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
1009     -
1010     + spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
1011     + spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
1012     + spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
1013     + spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
1014     +
1015     + INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
1016     + INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
1017     + INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
1018     + INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
1019     +
1020     /* allocate free rx buffer list */
1021     for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
1022     buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
1023     @@ -885,63 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
1024     } /* end of n_hdlc_alloc() */
1025    
1026     /**
1027     - * n_hdlc_buf_list_init - initialize specified HDLC buffer list
1028     - * @list - pointer to buffer list
1029     + * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
1030     + * @buf_list - pointer to the buffer list
1031     + * @buf - pointer to the buffer
1032     */
1033     -static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
1034     +static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
1035     + struct n_hdlc_buf *buf)
1036     {
1037     - memset(list, 0, sizeof(*list));
1038     - spin_lock_init(&list->spinlock);
1039     -} /* end of n_hdlc_buf_list_init() */
1040     + unsigned long flags;
1041     +
1042     + spin_lock_irqsave(&buf_list->spinlock, flags);
1043     +
1044     + list_add(&buf->list_item, &buf_list->list);
1045     + buf_list->count++;
1046     +
1047     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1048     +}
1049    
1050     /**
1051     * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
1052     - * @list - pointer to buffer list
1053     + * @buf_list - pointer to buffer list
1054     * @buf - pointer to buffer
1055     */
1056     -static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
1057     +static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
1058     struct n_hdlc_buf *buf)
1059     {
1060     unsigned long flags;
1061     - spin_lock_irqsave(&list->spinlock,flags);
1062     -
1063     - buf->link=NULL;
1064     - if (list->tail)
1065     - list->tail->link = buf;
1066     - else
1067     - list->head = buf;
1068     - list->tail = buf;
1069     - (list->count)++;
1070     -
1071     - spin_unlock_irqrestore(&list->spinlock,flags);
1072     -
1073     +
1074     + spin_lock_irqsave(&buf_list->spinlock, flags);
1075     +
1076     + list_add_tail(&buf->list_item, &buf_list->list);
1077     + buf_list->count++;
1078     +
1079     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1080     } /* end of n_hdlc_buf_put() */
1081    
1082     /**
1083     * n_hdlc_buf_get - remove and return an HDLC buffer from list
1084     - * @list - pointer to HDLC buffer list
1085     + * @buf_list - pointer to HDLC buffer list
1086     *
1087     * Remove and return an HDLC buffer from the head of the specified HDLC buffer
1088     * list.
1089     * Returns a pointer to HDLC buffer if available, otherwise %NULL.
1090     */
1091     -static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
1092     +static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
1093     {
1094     unsigned long flags;
1095     struct n_hdlc_buf *buf;
1096     - spin_lock_irqsave(&list->spinlock,flags);
1097     -
1098     - buf = list->head;
1099     +
1100     + spin_lock_irqsave(&buf_list->spinlock, flags);
1101     +
1102     + buf = list_first_entry_or_null(&buf_list->list,
1103     + struct n_hdlc_buf, list_item);
1104     if (buf) {
1105     - list->head = buf->link;
1106     - (list->count)--;
1107     + list_del(&buf->list_item);
1108     + buf_list->count--;
1109     }
1110     - if (!list->head)
1111     - list->tail = NULL;
1112     -
1113     - spin_unlock_irqrestore(&list->spinlock,flags);
1114     +
1115     + spin_unlock_irqrestore(&buf_list->spinlock, flags);
1116     return buf;
1117     -
1118     } /* end of n_hdlc_buf_get() */
1119    
1120     static char hdlc_banner[] __initdata =
1121     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1122     index 029de3f99752..5b24ffd93649 100644
1123     --- a/drivers/tty/serial/8250/8250_pci.c
1124     +++ b/drivers/tty/serial/8250/8250_pci.c
1125     @@ -2880,6 +2880,8 @@ enum pci_board_num_t {
1126     pbn_b0_4_1152000_200,
1127     pbn_b0_8_1152000_200,
1128    
1129     + pbn_b0_4_1250000,
1130     +
1131     pbn_b0_2_1843200,
1132     pbn_b0_4_1843200,
1133    
1134     @@ -3113,6 +3115,13 @@ static struct pciserial_board pci_boards[] = {
1135     .uart_offset = 0x200,
1136     },
1137    
1138     + [pbn_b0_4_1250000] = {
1139     + .flags = FL_BASE0,
1140     + .num_ports = 4,
1141     + .base_baud = 1250000,
1142     + .uart_offset = 8,
1143     + },
1144     +
1145     [pbn_b0_2_1843200] = {
1146     .flags = FL_BASE0,
1147     .num_ports = 2,
1148     @@ -5778,6 +5787,10 @@ static struct pci_device_id serial_pci_tbl[] = {
1149     { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
1150     { PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
1151    
1152     + /* MKS Tenta SCOM-080x serial cards */
1153     + { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
1154     + { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
1155     +
1156     /*
1157     * These entries match devices with class COMMUNICATION_SERIAL,
1158     * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
1159     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1160     index 239bc9cba28c..f54f77037d22 100644
1161     --- a/fs/ceph/mds_client.c
1162     +++ b/fs/ceph/mds_client.c
1163     @@ -644,6 +644,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
1164     {
1165     dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1166    
1167     + /* Never leave an unregistered request on an unsafe list! */
1168     + list_del_init(&req->r_unsafe_item);
1169     +
1170     if (req->r_tid == mdsc->oldest_tid) {
1171     struct rb_node *p = rb_next(&req->r_node);
1172     mdsc->oldest_tid = 0;
1173     @@ -1051,7 +1054,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1174     while (!list_empty(&session->s_unsafe)) {
1175     req = list_first_entry(&session->s_unsafe,
1176     struct ceph_mds_request, r_unsafe_item);
1177     - list_del_init(&req->r_unsafe_item);
1178     pr_warn_ratelimited(" dropping unsafe request %llu\n",
1179     req->r_tid);
1180     __unregister_request(mdsc, req);
1181     @@ -2477,7 +2479,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1182     * useful we could do with a revised return value.
1183     */
1184     dout("got safe reply %llu, mds%d\n", tid, mds);
1185     - list_del_init(&req->r_unsafe_item);
1186    
1187     /* last unsafe request during umount? */
1188     if (mdsc->stopping && !__get_oldest_req(mdsc))
1189     diff --git a/fs/fat/inode.c b/fs/fat/inode.c
1190     index 509411dd3698..cf644d52c0cf 100644
1191     --- a/fs/fat/inode.c
1192     +++ b/fs/fat/inode.c
1193     @@ -1269,6 +1269,16 @@ out:
1194     return 0;
1195     }
1196    
1197     +static void fat_dummy_inode_init(struct inode *inode)
1198     +{
1199     + /* Initialize this dummy inode to work as no-op. */
1200     + MSDOS_I(inode)->mmu_private = 0;
1201     + MSDOS_I(inode)->i_start = 0;
1202     + MSDOS_I(inode)->i_logstart = 0;
1203     + MSDOS_I(inode)->i_attrs = 0;
1204     + MSDOS_I(inode)->i_pos = 0;
1205     +}
1206     +
1207     static int fat_read_root(struct inode *inode)
1208     {
1209     struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
1210     @@ -1713,12 +1723,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1211     fat_inode = new_inode(sb);
1212     if (!fat_inode)
1213     goto out_fail;
1214     - MSDOS_I(fat_inode)->i_pos = 0;
1215     + fat_dummy_inode_init(fat_inode);
1216     sbi->fat_inode = fat_inode;
1217    
1218     fsinfo_inode = new_inode(sb);
1219     if (!fsinfo_inode)
1220     goto out_fail;
1221     + fat_dummy_inode_init(fsinfo_inode);
1222     fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
1223     sbi->fsinfo_inode = fsinfo_inode;
1224     insert_inode_hash(fsinfo_inode);
1225     diff --git a/fs/mount.h b/fs/mount.h
1226     index 14db05d424f7..3dc7dea5a357 100644
1227     --- a/fs/mount.h
1228     +++ b/fs/mount.h
1229     @@ -86,7 +86,6 @@ static inline int is_mounted(struct vfsmount *mnt)
1230     }
1231    
1232     extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
1233     -extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
1234    
1235     extern int __legitimize_mnt(struct vfsmount *, unsigned);
1236     extern bool legitimize_mnt(struct vfsmount *, unsigned);
1237     diff --git a/fs/namespace.c b/fs/namespace.c
1238     index da98a1bbd8b5..7df3d406d3e0 100644
1239     --- a/fs/namespace.c
1240     +++ b/fs/namespace.c
1241     @@ -638,28 +638,6 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
1242     }
1243    
1244     /*
1245     - * find the last mount at @dentry on vfsmount @mnt.
1246     - * mount_lock must be held.
1247     - */
1248     -struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
1249     -{
1250     - struct mount *p, *res = NULL;
1251     - p = __lookup_mnt(mnt, dentry);
1252     - if (!p)
1253     - goto out;
1254     - if (!(p->mnt.mnt_flags & MNT_UMOUNT))
1255     - res = p;
1256     - hlist_for_each_entry_continue(p, mnt_hash) {
1257     - if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
1258     - break;
1259     - if (!(p->mnt.mnt_flags & MNT_UMOUNT))
1260     - res = p;
1261     - }
1262     -out:
1263     - return res;
1264     -}
1265     -
1266     -/*
1267     * lookup_mnt - Return the first child mount mounted at path
1268     *
1269     * "First" means first mounted chronologically. If you create the
1270     @@ -879,6 +857,13 @@ void mnt_set_mountpoint(struct mount *mnt,
1271     hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
1272     }
1273    
1274     +static void __attach_mnt(struct mount *mnt, struct mount *parent)
1275     +{
1276     + hlist_add_head_rcu(&mnt->mnt_hash,
1277     + m_hash(&parent->mnt, mnt->mnt_mountpoint));
1278     + list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1279     +}
1280     +
1281     /*
1282     * vfsmount lock must be held for write
1283     */
1284     @@ -887,28 +872,45 @@ static void attach_mnt(struct mount *mnt,
1285     struct mountpoint *mp)
1286     {
1287     mnt_set_mountpoint(parent, mp, mnt);
1288     - hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
1289     - list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1290     + __attach_mnt(mnt, parent);
1291     }
1292    
1293     -static void attach_shadowed(struct mount *mnt,
1294     - struct mount *parent,
1295     - struct mount *shadows)
1296     +void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1297     {
1298     - if (shadows) {
1299     - hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
1300     - list_add(&mnt->mnt_child, &shadows->mnt_child);
1301     - } else {
1302     - hlist_add_head_rcu(&mnt->mnt_hash,
1303     - m_hash(&parent->mnt, mnt->mnt_mountpoint));
1304     - list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1305     - }
1306     + struct mountpoint *old_mp = mnt->mnt_mp;
1307     + struct dentry *old_mountpoint = mnt->mnt_mountpoint;
1308     + struct mount *old_parent = mnt->mnt_parent;
1309     +
1310     + list_del_init(&mnt->mnt_child);
1311     + hlist_del_init(&mnt->mnt_mp_list);
1312     + hlist_del_init_rcu(&mnt->mnt_hash);
1313     +
1314     + attach_mnt(mnt, parent, mp);
1315     +
1316     + put_mountpoint(old_mp);
1317     +
1318     + /*
1319     + * Safely avoid even the suggestion this code might sleep or
1320     + * lock the mount hash by taking advantage of the knowledge that
1321     + * mnt_change_mountpoint will not release the final reference
1322     + * to a mountpoint.
1323     + *
1324     + * During mounting, the mount passed in as the parent mount will
1325     + * continue to use the old mountpoint and during unmounting, the
1326     + * old mountpoint will continue to exist until namespace_unlock,
1327     + * which happens well after mnt_change_mountpoint.
1328     + */
1329     + spin_lock(&old_mountpoint->d_lock);
1330     + old_mountpoint->d_lockref.count--;
1331     + spin_unlock(&old_mountpoint->d_lock);
1332     +
1333     + mnt_add_count(old_parent, -1);
1334     }
1335    
1336     /*
1337     * vfsmount lock must be held for write
1338     */
1339     -static void commit_tree(struct mount *mnt, struct mount *shadows)
1340     +static void commit_tree(struct mount *mnt)
1341     {
1342     struct mount *parent = mnt->mnt_parent;
1343     struct mount *m;
1344     @@ -923,7 +925,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
1345    
1346     list_splice(&head, n->list.prev);
1347    
1348     - attach_shadowed(mnt, parent, shadows);
1349     + __attach_mnt(mnt, parent);
1350     touch_mnt_namespace(n);
1351     }
1352    
1353     @@ -1718,7 +1720,6 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1354     continue;
1355    
1356     for (s = r; s; s = next_mnt(s, r)) {
1357     - struct mount *t = NULL;
1358     if (!(flag & CL_COPY_UNBINDABLE) &&
1359     IS_MNT_UNBINDABLE(s)) {
1360     s = skip_mnt_tree(s);
1361     @@ -1740,14 +1741,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1362     goto out;
1363     lock_mount_hash();
1364     list_add_tail(&q->mnt_list, &res->mnt_list);
1365     - mnt_set_mountpoint(parent, p->mnt_mp, q);
1366     - if (!list_empty(&parent->mnt_mounts)) {
1367     - t = list_last_entry(&parent->mnt_mounts,
1368     - struct mount, mnt_child);
1369     - if (t->mnt_mp != p->mnt_mp)
1370     - t = NULL;
1371     - }
1372     - attach_shadowed(q, parent, t);
1373     + attach_mnt(q, parent, p->mnt_mp);
1374     unlock_mount_hash();
1375     }
1376     }
1377     @@ -1925,10 +1919,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1378     struct path *parent_path)
1379     {
1380     HLIST_HEAD(tree_list);
1381     + struct mountpoint *smp;
1382     struct mount *child, *p;
1383     struct hlist_node *n;
1384     int err;
1385    
1386     + /* Preallocate a mountpoint in case the new mounts need
1387     + * to be tucked under other mounts.
1388     + */
1389     + smp = get_mountpoint(source_mnt->mnt.mnt_root);
1390     + if (IS_ERR(smp))
1391     + return PTR_ERR(smp);
1392     +
1393     if (IS_MNT_SHARED(dest_mnt)) {
1394     err = invent_group_ids(source_mnt, true);
1395     if (err)
1396     @@ -1948,16 +1950,19 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1397     touch_mnt_namespace(source_mnt->mnt_ns);
1398     } else {
1399     mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1400     - commit_tree(source_mnt, NULL);
1401     + commit_tree(source_mnt);
1402     }
1403    
1404     hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1405     struct mount *q;
1406     hlist_del_init(&child->mnt_hash);
1407     - q = __lookup_mnt_last(&child->mnt_parent->mnt,
1408     - child->mnt_mountpoint);
1409     - commit_tree(child, q);
1410     + q = __lookup_mnt(&child->mnt_parent->mnt,
1411     + child->mnt_mountpoint);
1412     + if (q)
1413     + mnt_change_mountpoint(child, smp, q);
1414     + commit_tree(child);
1415     }
1416     + put_mountpoint(smp);
1417     unlock_mount_hash();
1418    
1419     return 0;
1420     @@ -1970,6 +1975,10 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1421     unlock_mount_hash();
1422     cleanup_group_ids(source_mnt, NULL);
1423     out:
1424     + read_seqlock_excl(&mount_lock);
1425     + put_mountpoint(smp);
1426     + read_sequnlock_excl(&mount_lock);
1427     +
1428     return err;
1429     }
1430    
1431     diff --git a/fs/pnode.c b/fs/pnode.c
1432     index 99899705b105..b9f2af59b9a6 100644
1433     --- a/fs/pnode.c
1434     +++ b/fs/pnode.c
1435     @@ -324,6 +324,21 @@ out:
1436     return ret;
1437     }
1438    
1439     +static struct mount *find_topper(struct mount *mnt)
1440     +{
1441     + /* If there is exactly one mount covering mnt completely return it. */
1442     + struct mount *child;
1443     +
1444     + if (!list_is_singular(&mnt->mnt_mounts))
1445     + return NULL;
1446     +
1447     + child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
1448     + if (child->mnt_mountpoint != mnt->mnt.mnt_root)
1449     + return NULL;
1450     +
1451     + return child;
1452     +}
1453     +
1454     /*
1455     * return true if the refcount is greater than count
1456     */
1457     @@ -344,9 +359,8 @@ static inline int do_refcount_check(struct mount *mnt, int count)
1458     */
1459     int propagate_mount_busy(struct mount *mnt, int refcnt)
1460     {
1461     - struct mount *m, *child;
1462     + struct mount *m, *child, *topper;
1463     struct mount *parent = mnt->mnt_parent;
1464     - int ret = 0;
1465    
1466     if (mnt == parent)
1467     return do_refcount_check(mnt, refcnt);
1468     @@ -361,12 +375,24 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
1469    
1470     for (m = propagation_next(parent, parent); m;
1471     m = propagation_next(m, parent)) {
1472     - child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
1473     - if (child && list_empty(&child->mnt_mounts) &&
1474     - (ret = do_refcount_check(child, 1)))
1475     - break;
1476     + int count = 1;
1477     + child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
1478     + if (!child)
1479     + continue;
1480     +
1481     + /* Is there exactly one mount on the child that covers
1482     + * it completely whose reference should be ignored?
1483     + */
1484     + topper = find_topper(child);
1485     + if (topper)
1486     + count += 1;
1487     + else if (!list_empty(&child->mnt_mounts))
1488     + continue;
1489     +
1490     + if (do_refcount_check(child, count))
1491     + return 1;
1492     }
1493     - return ret;
1494     + return 0;
1495     }
1496    
1497     /*
1498     @@ -383,7 +409,7 @@ void propagate_mount_unlock(struct mount *mnt)
1499    
1500     for (m = propagation_next(parent, parent); m;
1501     m = propagation_next(m, parent)) {
1502     - child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
1503     + child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
1504     if (child)
1505     child->mnt.mnt_flags &= ~MNT_LOCKED;
1506     }
1507     @@ -401,9 +427,11 @@ static void mark_umount_candidates(struct mount *mnt)
1508    
1509     for (m = propagation_next(parent, parent); m;
1510     m = propagation_next(m, parent)) {
1511     - struct mount *child = __lookup_mnt_last(&m->mnt,
1512     + struct mount *child = __lookup_mnt(&m->mnt,
1513     mnt->mnt_mountpoint);
1514     - if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
1515     + if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
1516     + continue;
1517     + if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
1518     SET_MNT_MARK(child);
1519     }
1520     }
1521     @@ -422,8 +450,8 @@ static void __propagate_umount(struct mount *mnt)
1522    
1523     for (m = propagation_next(parent, parent); m;
1524     m = propagation_next(m, parent)) {
1525     -
1526     - struct mount *child = __lookup_mnt_last(&m->mnt,
1527     + struct mount *topper;
1528     + struct mount *child = __lookup_mnt(&m->mnt,
1529     mnt->mnt_mountpoint);
1530     /*
1531     * umount the child only if the child has no children
1532     @@ -432,6 +460,15 @@ static void __propagate_umount(struct mount *mnt)
1533     if (!child || !IS_MNT_MARKED(child))
1534     continue;
1535     CLEAR_MNT_MARK(child);
1536     +
1537     + /* If there is exactly one mount covering all of child
1538     + * replace child with that mount.
1539     + */
1540     + topper = find_topper(child);
1541     + if (topper)
1542     + mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
1543     + topper);
1544     +
1545     if (list_empty(&child->mnt_mounts)) {
1546     list_del_init(&child->mnt_child);
1547     child->mnt.mnt_flags |= MNT_UMOUNT;
1548     diff --git a/fs/pnode.h b/fs/pnode.h
1549     index 0fcdbe7ca648..623f01772bec 100644
1550     --- a/fs/pnode.h
1551     +++ b/fs/pnode.h
1552     @@ -49,6 +49,8 @@ int get_dominating_id(struct mount *mnt, const struct path *root);
1553     unsigned int mnt_get_count(struct mount *mnt);
1554     void mnt_set_mountpoint(struct mount *, struct mountpoint *,
1555     struct mount *);
1556     +void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
1557     + struct mount *mnt);
1558     struct mount *copy_tree(struct mount *, struct dentry *, int);
1559     bool is_path_reachable(struct mount *, struct dentry *,
1560     const struct path *root);
1561     diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
1562     index e55c08bc3a96..0abc56140c83 100644
1563     --- a/include/linux/ceph/osdmap.h
1564     +++ b/include/linux/ceph/osdmap.h
1565     @@ -49,7 +49,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
1566     case CEPH_POOL_TYPE_EC:
1567     return false;
1568     default:
1569     - BUG_ON(1);
1570     + BUG();
1571     }
1572     }
1573    
1574     diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
1575     index c15373894a42..b37dee3acaba 100644
1576     --- a/include/linux/lockd/lockd.h
1577     +++ b/include/linux/lockd/lockd.h
1578     @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
1579     static inline int nlm_compare_locks(const struct file_lock *fl1,
1580     const struct file_lock *fl2)
1581     {
1582     - return fl1->fl_pid == fl2->fl_pid
1583     + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
1584     + && fl1->fl_pid == fl2->fl_pid
1585     && fl1->fl_owner == fl2->fl_owner
1586     && fl1->fl_start == fl2->fl_start
1587     && fl1->fl_end == fl2->fl_end
1588     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
1589     index 800fe16cc36f..ed66414b91f0 100644
1590     --- a/include/target/target_core_base.h
1591     +++ b/include/target/target_core_base.h
1592     @@ -740,6 +740,7 @@ struct se_lun {
1593     struct config_group lun_group;
1594     struct se_port_stat_grps port_stat_grps;
1595     struct completion lun_ref_comp;
1596     + struct completion lun_shutdown_comp;
1597     struct percpu_ref lun_ref;
1598     struct list_head lun_dev_link;
1599     struct hlist_node link;
1600     diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
1601     index 00a43a70e1fc..0402fa45b343 100644
1602     --- a/net/mac80211/pm.c
1603     +++ b/net/mac80211/pm.c
1604     @@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1605     break;
1606     }
1607    
1608     + flush_delayed_work(&sdata->dec_tailroom_needed_wk);
1609     drv_remove_interface(local, sdata);
1610     }
1611    
1612     diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
1613     index d08e214ec6e7..223d88e25e05 100755
1614     --- a/tools/testing/ktest/ktest.pl
1615     +++ b/tools/testing/ktest/ktest.pl
1616     @@ -2629,7 +2629,7 @@ sub do_run_test {
1617     }
1618    
1619     waitpid $child_pid, 0;
1620     - $child_exit = $?;
1621     + $child_exit = $? >> 8;
1622    
1623     my $end_time = time;
1624     $test_time = $end_time - $start_time;