Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0130-4.9.31-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 131446 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index b78a45bcf9b1..3601995f63f9 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 30
9     +SUBLEVEL = 31
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
14     index 9c4b57a7b265..d8199e12fb6e 100644
15     --- a/arch/arm64/net/bpf_jit_comp.c
16     +++ b/arch/arm64/net/bpf_jit_comp.c
17     @@ -252,8 +252,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
18     */
19     off = offsetof(struct bpf_array, ptrs);
20     emit_a64_mov_i64(tmp, off, ctx);
21     - emit(A64_LDR64(tmp, r2, tmp), ctx);
22     - emit(A64_LDR64(prg, tmp, r3), ctx);
23     + emit(A64_ADD(1, tmp, r2, tmp), ctx);
24     + emit(A64_LSL(1, prg, r3, 3), ctx);
25     + emit(A64_LDR64(prg, tmp, prg), ctx);
26     emit(A64_CBZ(1, prg, jmp_offset), ctx);
27    
28     /* goto *(prog->bpf_func + prologue_size); */
29     diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
30     index e84d8fbc2e21..378c37aa6914 100644
31     --- a/arch/powerpc/platforms/cell/spu_base.c
32     +++ b/arch/powerpc/platforms/cell/spu_base.c
33     @@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
34     (REGION_ID(ea) != USER_REGION_ID)) {
35    
36     spin_unlock(&spu->register_lock);
37     - ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
38     + ret = hash_page(ea,
39     + _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
40     + 0x300, dsisr);
41     spin_lock(&spu->register_lock);
42    
43     if (!ret) {
44     diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
45     index ce6f56980aef..cf190728360b 100644
46     --- a/arch/sparc/include/asm/pgtable_32.h
47     +++ b/arch/sparc/include/asm/pgtable_32.h
48     @@ -91,9 +91,9 @@ extern unsigned long pfn_base;
49     * ZERO_PAGE is a global shared page that is always zero: used
50     * for zero-mapped memory areas etc..
51     */
52     -extern unsigned long empty_zero_page;
53     +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
54    
55     -#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
56     +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
57    
58     /*
59     * In general all page table modifications should use the V8 atomic
60     diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
61     index 29d64b1758ed..be0cc1beed41 100644
62     --- a/arch/sparc/include/asm/setup.h
63     +++ b/arch/sparc/include/asm/setup.h
64     @@ -16,7 +16,7 @@ extern char reboot_command[];
65     */
66     extern unsigned char boot_cpu_id;
67    
68     -extern unsigned long empty_zero_page;
69     +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
70    
71     extern int serial_console;
72     static inline int con_is_present(void)
73     diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
74     index 6bcff698069b..cec54dc4ab81 100644
75     --- a/arch/sparc/kernel/ftrace.c
76     +++ b/arch/sparc/kernel/ftrace.c
77     @@ -130,17 +130,16 @@ unsigned long prepare_ftrace_return(unsigned long parent,
78     if (unlikely(atomic_read(&current->tracing_graph_pause)))
79     return parent + 8UL;
80    
81     - if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
82     - frame_pointer, NULL) == -EBUSY)
83     - return parent + 8UL;
84     -
85     trace.func = self_addr;
86     + trace.depth = current->curr_ret_stack + 1;
87    
88     /* Only trace if the calling function expects to */
89     - if (!ftrace_graph_entry(&trace)) {
90     - current->curr_ret_stack--;
91     + if (!ftrace_graph_entry(&trace))
92     + return parent + 8UL;
93     +
94     + if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
95     + frame_pointer, NULL) == -EBUSY)
96     return parent + 8UL;
97     - }
98    
99     return return_hooker;
100     }
101     diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
102     index eb8287155279..3b7092d9ea8f 100644
103     --- a/arch/sparc/mm/init_32.c
104     +++ b/arch/sparc/mm/init_32.c
105     @@ -301,7 +301,7 @@ void __init mem_init(void)
106    
107    
108     /* Saves us work later. */
109     - memset((void *)&empty_zero_page, 0, PAGE_SIZE);
110     + memset((void *)empty_zero_page, 0, PAGE_SIZE);
111    
112     i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
113     i += 1;
114     diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
115     index 34d9e15857c3..4669b3a931ed 100644
116     --- a/arch/x86/boot/compressed/Makefile
117     +++ b/arch/x86/boot/compressed/Makefile
118     @@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
119     quiet_cmd_check_data_rel = DATAREL $@
120     define cmd_check_data_rel
121     for obj in $(filter %.o,$^); do \
122     - readelf -S $$obj | grep -qF .rel.local && { \
123     + ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
124     echo "error: $$obj has data relocations!" >&2; \
125     exit 1; \
126     } || true; \
127     diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
128     index 9bd7ff5ffbcc..70c9cc3f098c 100644
129     --- a/arch/x86/include/asm/mce.h
130     +++ b/arch/x86/include/asm/mce.h
131     @@ -257,6 +257,7 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
132     #endif
133    
134     int mce_available(struct cpuinfo_x86 *c);
135     +bool mce_is_memory_error(struct mce *m);
136    
137     DECLARE_PER_CPU(unsigned, mce_exception_count);
138     DECLARE_PER_CPU(unsigned, mce_poll_count);
139     diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
140     index 22cda29d654e..8ca5f8ad008e 100644
141     --- a/arch/x86/kernel/cpu/mcheck/mce.c
142     +++ b/arch/x86/kernel/cpu/mcheck/mce.c
143     @@ -598,16 +598,14 @@ static void mce_read_aux(struct mce *m, int i)
144     }
145     }
146    
147     -static bool memory_error(struct mce *m)
148     +bool mce_is_memory_error(struct mce *m)
149     {
150     - struct cpuinfo_x86 *c = &boot_cpu_data;
151     -
152     - if (c->x86_vendor == X86_VENDOR_AMD) {
153     + if (m->cpuvendor == X86_VENDOR_AMD) {
154     /* ErrCodeExt[20:16] */
155     u8 xec = (m->status >> 16) & 0x1f;
156    
157     return (xec == 0x0 || xec == 0x8);
158     - } else if (c->x86_vendor == X86_VENDOR_INTEL) {
159     + } else if (m->cpuvendor == X86_VENDOR_INTEL) {
160     /*
161     * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
162     *
163     @@ -628,6 +626,7 @@ static bool memory_error(struct mce *m)
164    
165     return false;
166     }
167     +EXPORT_SYMBOL_GPL(mce_is_memory_error);
168    
169     DEFINE_PER_CPU(unsigned, mce_poll_count);
170    
171     @@ -691,7 +690,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
172    
173     severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
174    
175     - if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
176     + if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
177     if (m.status & MCI_STATUS_ADDRV)
178     m.severity = severity;
179    
180     diff --git a/crypto/skcipher.c b/crypto/skcipher.c
181     index f7d0018dcaee..93110d70c1d3 100644
182     --- a/crypto/skcipher.c
183     +++ b/crypto/skcipher.c
184     @@ -221,6 +221,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
185     return 0;
186     }
187    
188     +static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
189     + const u8 *key, unsigned int keylen)
190     +{
191     + unsigned long alignmask = crypto_skcipher_alignmask(tfm);
192     + struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
193     + u8 *buffer, *alignbuffer;
194     + unsigned long absize;
195     + int ret;
196     +
197     + absize = keylen + alignmask;
198     + buffer = kmalloc(absize, GFP_ATOMIC);
199     + if (!buffer)
200     + return -ENOMEM;
201     +
202     + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
203     + memcpy(alignbuffer, key, keylen);
204     + ret = cipher->setkey(tfm, alignbuffer, keylen);
205     + kzfree(buffer);
206     + return ret;
207     +}
208     +
209     +static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
210     + unsigned int keylen)
211     +{
212     + struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
213     + unsigned long alignmask = crypto_skcipher_alignmask(tfm);
214     +
215     + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
216     + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
217     + return -EINVAL;
218     + }
219     +
220     + if ((unsigned long)key & alignmask)
221     + return skcipher_setkey_unaligned(tfm, key, keylen);
222     +
223     + return cipher->setkey(tfm, key, keylen);
224     +}
225     +
226     static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
227     {
228     struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
229     @@ -241,7 +279,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
230     tfm->__crt_alg->cra_type == &crypto_givcipher_type)
231     return crypto_init_skcipher_ops_ablkcipher(tfm);
232    
233     - skcipher->setkey = alg->setkey;
234     + skcipher->setkey = skcipher_setkey;
235     skcipher->encrypt = alg->encrypt;
236     skcipher->decrypt = alg->decrypt;
237     skcipher->ivsize = alg->ivsize;
238     diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
239     index 6d5a8c1d3132..e19f530f1083 100644
240     --- a/drivers/acpi/button.c
241     +++ b/drivers/acpi/button.c
242     @@ -113,7 +113,7 @@ struct acpi_button {
243    
244     static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
245     static struct acpi_device *lid_device;
246     -static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
247     +static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
248    
249     static unsigned long lid_report_interval __read_mostly = 500;
250     module_param(lid_report_interval, ulong, 0644);
251     diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
252     index e5ce81c38eed..e25787afb212 100644
253     --- a/drivers/acpi/nfit/mce.c
254     +++ b/drivers/acpi/nfit/mce.c
255     @@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
256     struct nfit_spa *nfit_spa;
257    
258     /* We only care about memory errors */
259     - if (!(mce->status & MCACOD))
260     + if (!mce_is_memory_error(mce))
261     return NOTIFY_DONE;
262    
263     /*
264     diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
265     index fc061f7c2bd1..a7de8ae185a5 100644
266     --- a/drivers/char/pcmcia/cm4040_cs.c
267     +++ b/drivers/char/pcmcia/cm4040_cs.c
268     @@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
269    
270     rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
271     if (rc <= 0) {
272     - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
273     + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
274     DEBUGP(2, dev, "<- cm4040_write (failed)\n");
275     if (rc == -ERESTARTSYS)
276     return rc;
277     @@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
278     for (i = 0; i < bytes_to_write; i++) {
279     rc = wait_for_bulk_out_ready(dev);
280     if (rc <= 0) {
281     - DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
282     + DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
283     rc);
284     DEBUGP(2, dev, "<- cm4040_write (failed)\n");
285     if (rc == -ERESTARTSYS)
286     @@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
287     rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
288    
289     if (rc <= 0) {
290     - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
291     + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
292     DEBUGP(2, dev, "<- cm4040_write (failed)\n");
293     if (rc == -ERESTARTSYS)
294     return rc;
295     diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
296     index fd7c91254841..79e9d3690667 100644
297     --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
298     +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
299     @@ -774,20 +774,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
300     if (scan->type & DRM_MODE_TYPE_PREFERRED) {
301     mode_dev->panel_fixed_mode =
302     drm_mode_duplicate(dev, scan);
303     + DRM_DEBUG_KMS("Using mode from DDC\n");
304     goto out; /* FIXME: check for quirks */
305     }
306     }
307    
308     /* Failed to get EDID, what about VBT? do we need this? */
309     - if (mode_dev->vbt_mode)
310     + if (dev_priv->lfp_lvds_vbt_mode) {
311     mode_dev->panel_fixed_mode =
312     - drm_mode_duplicate(dev, mode_dev->vbt_mode);
313     + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
314    
315     - if (!mode_dev->panel_fixed_mode)
316     - if (dev_priv->lfp_lvds_vbt_mode)
317     - mode_dev->panel_fixed_mode =
318     - drm_mode_duplicate(dev,
319     - dev_priv->lfp_lvds_vbt_mode);
320     + if (mode_dev->panel_fixed_mode) {
321     + mode_dev->panel_fixed_mode->type |=
322     + DRM_MODE_TYPE_PREFERRED;
323     + DRM_DEBUG_KMS("Using mode from VBT\n");
324     + goto out;
325     + }
326     + }
327    
328     /*
329     * If we didn't get EDID, try checking if the panel is already turned
330     @@ -804,6 +807,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
331     if (mode_dev->panel_fixed_mode) {
332     mode_dev->panel_fixed_mode->type |=
333     DRM_MODE_TYPE_PREFERRED;
334     + DRM_DEBUG_KMS("Using pre-programmed mode\n");
335     goto out; /* FIXME: check for quirks */
336     }
337     }
338     diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
339     index 7ba450832e6b..ea36dc4dd5d2 100644
340     --- a/drivers/gpu/drm/radeon/ci_dpm.c
341     +++ b/drivers/gpu/drm/radeon/ci_dpm.c
342     @@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
343     u32 vblank_time = r600_dpm_get_vblank_time(rdev);
344     u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
345    
346     + /* disable mclk switching if the refresh is >120Hz, even if the
347     + * blanking period would allow it
348     + */
349     + if (r600_dpm_get_vrefresh(rdev) > 120)
350     + return true;
351     +
352     if (vblank_time < switch_limit)
353     return true;
354     else
355     diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
356     index f6ff41a0eed6..edee6a5f4da9 100644
357     --- a/drivers/gpu/drm/radeon/cik.c
358     +++ b/drivers/gpu/drm/radeon/cik.c
359     @@ -7416,7 +7416,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
360     WREG32(DC_HPD5_INT_CONTROL, tmp);
361     }
362     if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
363     - tmp = RREG32(DC_HPD5_INT_CONTROL);
364     + tmp = RREG32(DC_HPD6_INT_CONTROL);
365     tmp |= DC_HPDx_INT_ACK;
366     WREG32(DC_HPD6_INT_CONTROL, tmp);
367     }
368     @@ -7446,7 +7446,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
369     WREG32(DC_HPD5_INT_CONTROL, tmp);
370     }
371     if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
372     - tmp = RREG32(DC_HPD5_INT_CONTROL);
373     + tmp = RREG32(DC_HPD6_INT_CONTROL);
374     tmp |= DC_HPDx_RX_INT_ACK;
375     WREG32(DC_HPD6_INT_CONTROL, tmp);
376     }
377     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
378     index 0b6b5766216f..6068b8a01016 100644
379     --- a/drivers/gpu/drm/radeon/evergreen.c
380     +++ b/drivers/gpu/drm/radeon/evergreen.c
381     @@ -4933,7 +4933,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
382     WREG32(DC_HPD5_INT_CONTROL, tmp);
383     }
384     if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
385     - tmp = RREG32(DC_HPD5_INT_CONTROL);
386     + tmp = RREG32(DC_HPD6_INT_CONTROL);
387     tmp |= DC_HPDx_INT_ACK;
388     WREG32(DC_HPD6_INT_CONTROL, tmp);
389     }
390     @@ -4964,7 +4964,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
391     WREG32(DC_HPD5_INT_CONTROL, tmp);
392     }
393     if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
394     - tmp = RREG32(DC_HPD5_INT_CONTROL);
395     + tmp = RREG32(DC_HPD6_INT_CONTROL);
396     tmp |= DC_HPDx_RX_INT_ACK;
397     WREG32(DC_HPD6_INT_CONTROL, tmp);
398     }
399     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
400     index a951881c2a50..f2eac6b6c46a 100644
401     --- a/drivers/gpu/drm/radeon/r600.c
402     +++ b/drivers/gpu/drm/radeon/r600.c
403     @@ -3995,7 +3995,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
404     WREG32(DC_HPD5_INT_CONTROL, tmp);
405     }
406     if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
407     - tmp = RREG32(DC_HPD5_INT_CONTROL);
408     + tmp = RREG32(DC_HPD6_INT_CONTROL);
409     tmp |= DC_HPDx_INT_ACK;
410     WREG32(DC_HPD6_INT_CONTROL, tmp);
411     }
412     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
413     index e0c143b865f3..30bd4a6a9d46 100644
414     --- a/drivers/gpu/drm/radeon/radeon_drv.c
415     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
416     @@ -97,9 +97,10 @@
417     * 2.46.0 - Add PFP_SYNC_ME support on evergreen
418     * 2.47.0 - Add UVD_NO_OP register support
419     * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
420     + * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
421     */
422     #define KMS_DRIVER_MAJOR 2
423     -#define KMS_DRIVER_MINOR 48
424     +#define KMS_DRIVER_MINOR 49
425     #define KMS_DRIVER_PATCHLEVEL 0
426     int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
427     int radeon_driver_unload_kms(struct drm_device *dev);
428     diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
429     index deb9511725c9..316856715878 100644
430     --- a/drivers/gpu/drm/radeon/radeon_gem.c
431     +++ b/drivers/gpu/drm/radeon/radeon_gem.c
432     @@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
433    
434     man = &rdev->mman.bdev.man[TTM_PL_VRAM];
435    
436     - args->vram_size = rdev->mc.real_vram_size;
437     - args->vram_visible = (u64)man->size << PAGE_SHIFT;
438     + args->vram_size = (u64)man->size << PAGE_SHIFT;
439     + args->vram_visible = rdev->mc.visible_vram_size;
440     args->vram_visible -= rdev->vram_pin_size;
441     args->gart_size = rdev->mc.gtt_size;
442     args->gart_size -= rdev->gart_pin_size;
443     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
444     index 877af4a5ef68..3333e8a45933 100644
445     --- a/drivers/gpu/drm/radeon/si.c
446     +++ b/drivers/gpu/drm/radeon/si.c
447     @@ -6330,7 +6330,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
448     WREG32(DC_HPD5_INT_CONTROL, tmp);
449     }
450     if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
451     - tmp = RREG32(DC_HPD5_INT_CONTROL);
452     + tmp = RREG32(DC_HPD6_INT_CONTROL);
453     tmp |= DC_HPDx_INT_ACK;
454     WREG32(DC_HPD6_INT_CONTROL, tmp);
455     }
456     @@ -6361,7 +6361,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
457     WREG32(DC_HPD5_INT_CONTROL, tmp);
458     }
459     if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
460     - tmp = RREG32(DC_HPD5_INT_CONTROL);
461     + tmp = RREG32(DC_HPD6_INT_CONTROL);
462     tmp |= DC_HPDx_RX_INT_ACK;
463     WREG32(DC_HPD6_INT_CONTROL, tmp);
464     }
465     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
466     index 0e07a769df7c..c6a922ee5d3b 100644
467     --- a/drivers/hid/wacom_wac.c
468     +++ b/drivers/hid/wacom_wac.c
469     @@ -1400,37 +1400,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
470     {
471     unsigned char *data = wacom->data;
472    
473     - if (wacom->pen_input)
474     + if (wacom->pen_input) {
475     dev_dbg(wacom->pen_input->dev.parent,
476     "%s: received report #%d\n", __func__, data[0]);
477     - else if (wacom->touch_input)
478     +
479     + if (len == WACOM_PKGLEN_PENABLED ||
480     + data[0] == WACOM_REPORT_PENABLED)
481     + return wacom_tpc_pen(wacom);
482     + }
483     + else if (wacom->touch_input) {
484     dev_dbg(wacom->touch_input->dev.parent,
485     "%s: received report #%d\n", __func__, data[0]);
486    
487     - switch (len) {
488     - case WACOM_PKGLEN_TPC1FG:
489     - return wacom_tpc_single_touch(wacom, len);
490     + switch (len) {
491     + case WACOM_PKGLEN_TPC1FG:
492     + return wacom_tpc_single_touch(wacom, len);
493    
494     - case WACOM_PKGLEN_TPC2FG:
495     - return wacom_tpc_mt_touch(wacom);
496     + case WACOM_PKGLEN_TPC2FG:
497     + return wacom_tpc_mt_touch(wacom);
498    
499     - case WACOM_PKGLEN_PENABLED:
500     - return wacom_tpc_pen(wacom);
501     + default:
502     + switch (data[0]) {
503     + case WACOM_REPORT_TPC1FG:
504     + case WACOM_REPORT_TPCHID:
505     + case WACOM_REPORT_TPCST:
506     + case WACOM_REPORT_TPC1FGE:
507     + return wacom_tpc_single_touch(wacom, len);
508    
509     - default:
510     - switch (data[0]) {
511     - case WACOM_REPORT_TPC1FG:
512     - case WACOM_REPORT_TPCHID:
513     - case WACOM_REPORT_TPCST:
514     - case WACOM_REPORT_TPC1FGE:
515     - return wacom_tpc_single_touch(wacom, len);
516     -
517     - case WACOM_REPORT_TPCMT:
518     - case WACOM_REPORT_TPCMT2:
519     - return wacom_mt_touch(wacom);
520     + case WACOM_REPORT_TPCMT:
521     + case WACOM_REPORT_TPCMT2:
522     + return wacom_mt_touch(wacom);
523    
524     - case WACOM_REPORT_PENABLED:
525     - return wacom_tpc_pen(wacom);
526     + }
527     }
528     }
529    
530     diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
531     index 0ed77eeff31e..a2e3dd715380 100644
532     --- a/drivers/i2c/busses/i2c-tiny-usb.c
533     +++ b/drivers/i2c/busses/i2c-tiny-usb.c
534     @@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
535     int value, int index, void *data, int len)
536     {
537     struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
538     + void *dmadata = kmalloc(len, GFP_KERNEL);
539     + int ret;
540     +
541     + if (!dmadata)
542     + return -ENOMEM;
543    
544     /* do control transfer */
545     - return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
546     + ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
547     cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
548     - USB_DIR_IN, value, index, data, len, 2000);
549     + USB_DIR_IN, value, index, dmadata, len, 2000);
550     +
551     + memcpy(data, dmadata, len);
552     + kfree(dmadata);
553     + return ret;
554     }
555    
556     static int usb_write(struct i2c_adapter *adapter, int cmd,
557     int value, int index, void *data, int len)
558     {
559     struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
560     + void *dmadata = kmemdup(data, len, GFP_KERNEL);
561     + int ret;
562     +
563     + if (!dmadata)
564     + return -ENOMEM;
565    
566     /* do control transfer */
567     - return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
568     + ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
569     cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
570     - value, index, data, len, 2000);
571     + value, index, dmadata, len, 2000);
572     +
573     + kfree(dmadata);
574     + return ret;
575     }
576    
577     static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
578     diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
579     index 83198a8a8797..4bd5b5caa243 100644
580     --- a/drivers/infiniband/hw/hfi1/rc.c
581     +++ b/drivers/infiniband/hw/hfi1/rc.c
582     @@ -2366,8 +2366,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
583     ret = hfi1_rvt_get_rwqe(qp, 1);
584     if (ret < 0)
585     goto nack_op_err;
586     - if (!ret)
587     + if (!ret) {
588     + /* peer will send again */
589     + rvt_put_ss(&qp->r_sge);
590     goto rnr_nak;
591     + }
592     wc.ex.imm_data = ohdr->u.rc.imm_data;
593     wc.wc_flags = IB_WC_WITH_IMM;
594     goto send_last;
595     diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
596     index 2097512e75aa..f3fe787c9426 100644
597     --- a/drivers/infiniband/hw/qib/qib_rc.c
598     +++ b/drivers/infiniband/hw/qib/qib_rc.c
599     @@ -2067,8 +2067,10 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
600     ret = qib_get_rwqe(qp, 1);
601     if (ret < 0)
602     goto nack_op_err;
603     - if (!ret)
604     + if (!ret) {
605     + rvt_put_ss(&qp->r_sge);
606     goto rnr_nak;
607     + }
608     wc.ex.imm_data = ohdr->u.rc.imm_data;
609     hdrsize += 4;
610     wc.wc_flags = IB_WC_WITH_IMM;
611     diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
612     index 726246665850..50dd6bd02951 100644
613     --- a/drivers/mmc/host/sdhci-iproc.c
614     +++ b/drivers/mmc/host/sdhci-iproc.c
615     @@ -157,7 +157,8 @@ static const struct sdhci_ops sdhci_iproc_ops = {
616     };
617    
618     static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
619     - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
620     + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
621     + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
622     .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
623     .ops = &sdhci_iproc_ops,
624     };
625     diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
626     index edc70ffad660..6dcc42d79cab 100644
627     --- a/drivers/net/bonding/bond_3ad.c
628     +++ b/drivers/net/bonding/bond_3ad.c
629     @@ -2573,7 +2573,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
630     return -1;
631    
632     ad_info->aggregator_id = aggregator->aggregator_identifier;
633     - ad_info->ports = aggregator->num_of_ports;
634     + ad_info->ports = __agg_active_ports(aggregator);
635     ad_info->actor_key = aggregator->actor_oper_aggregator_key;
636     ad_info->partner_key = aggregator->partner_oper_aggregator_key;
637     ether_addr_copy(ad_info->partner_system,
638     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
639     index 93aa2939142a..9711ca4510fa 100644
640     --- a/drivers/net/ethernet/emulex/benet/be_main.c
641     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
642     @@ -5144,9 +5144,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
643     struct be_adapter *adapter = netdev_priv(dev);
644     u8 l4_hdr = 0;
645    
646     - /* The code below restricts offload features for some tunneled packets.
647     + /* The code below restricts offload features for some tunneled and
648     + * Q-in-Q packets.
649     * Offload features for normal (non tunnel) packets are unchanged.
650     */
651     + features = vlan_features_check(skb, features);
652     if (!skb->encapsulation ||
653     !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
654     return features;
655     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
656     index 3f51a44bde6b..cb45390c7623 100644
657     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
658     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
659     @@ -767,7 +767,7 @@ static void cb_timeout_handler(struct work_struct *work)
660     mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
661     mlx5_command_str(msg_to_opcode(ent->in)),
662     msg_to_opcode(ent->in));
663     - mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
664     + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
665     }
666    
667     static void cmd_work_handler(struct work_struct *work)
668     @@ -797,6 +797,7 @@ static void cmd_work_handler(struct work_struct *work)
669     }
670    
671     cmd->ent_arr[ent->idx] = ent;
672     + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
673     lay = get_inst(cmd, ent->idx);
674     ent->lay = lay;
675     memset(lay, 0, sizeof(*lay));
676     @@ -818,6 +819,20 @@ static void cmd_work_handler(struct work_struct *work)
677     if (ent->callback)
678     schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
679    
680     + /* Skip sending command to fw if internal error */
681     + if (pci_channel_offline(dev->pdev) ||
682     + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
683     + u8 status = 0;
684     + u32 drv_synd;
685     +
686     + ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
687     + MLX5_SET(mbox_out, ent->out, status, status);
688     + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
689     +
690     + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
691     + return;
692     + }
693     +
694     /* ring doorbell after the descriptor is valid */
695     mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
696     wmb();
697     @@ -828,7 +843,7 @@ static void cmd_work_handler(struct work_struct *work)
698     poll_timeout(ent);
699     /* make sure we read the descriptor after ownership is SW */
700     rmb();
701     - mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
702     + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
703     }
704     }
705    
706     @@ -872,7 +887,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
707     wait_for_completion(&ent->done);
708     } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
709     ent->ret = -ETIMEDOUT;
710     - mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
711     + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
712     }
713    
714     err = ent->ret;
715     @@ -1369,7 +1384,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
716     }
717     }
718    
719     -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
720     +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
721     {
722     struct mlx5_cmd *cmd = &dev->cmd;
723     struct mlx5_cmd_work_ent *ent;
724     @@ -1389,6 +1404,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
725     struct semaphore *sem;
726    
727     ent = cmd->ent_arr[i];
728     +
729     + /* if we already completed the command, ignore it */
730     + if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
731     + &ent->state)) {
732     + /* only real completion can free the cmd slot */
733     + if (!forced) {
734     + mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
735     + ent->idx);
736     + free_ent(cmd, ent->idx);
737     + }
738     + continue;
739     + }
740     +
741     if (ent->callback)
742     cancel_delayed_work(&ent->cb_timeout_work);
743     if (ent->page_queue)
744     @@ -1411,7 +1439,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
745     mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
746     ent->ret, deliv_status_to_str(ent->status), ent->status);
747     }
748     - free_ent(cmd, ent->idx);
749     +
750     + /* only real completion will free the entry slot */
751     + if (!forced)
752     + free_ent(cmd, ent->idx);
753    
754     if (ent->callback) {
755     ds = ent->ts2 - ent->ts1;
756     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
757     index 126cfeb7e0ec..3744e2f79ecf 100644
758     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
759     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
760     @@ -751,7 +751,6 @@ static void get_supported(u32 eth_proto_cap,
761     ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
762     ptys2ethtool_supported_link(supported, eth_proto_cap);
763     ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
764     - ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
765     }
766    
767     static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
768     @@ -761,7 +760,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
769     unsigned long *advertising = link_ksettings->link_modes.advertising;
770    
771     ptys2ethtool_adver_link(advertising, eth_proto_cap);
772     - if (tx_pause)
773     + if (rx_pause)
774     ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
775     if (tx_pause ^ rx_pause)
776     ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
777     @@ -806,6 +805,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
778     struct mlx5e_priv *priv = netdev_priv(netdev);
779     struct mlx5_core_dev *mdev = priv->mdev;
780     u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
781     + u32 rx_pause = 0;
782     + u32 tx_pause = 0;
783     u32 eth_proto_cap;
784     u32 eth_proto_admin;
785     u32 eth_proto_lp;
786     @@ -828,11 +829,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
787     an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
788     an_status = MLX5_GET(ptys_reg, out, an_status);
789    
790     + mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
791     +
792     ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
793     ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
794    
795     get_supported(eth_proto_cap, link_ksettings);
796     - get_advertising(eth_proto_admin, 0, 0, link_ksettings);
797     + get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
798     get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
799    
800     eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
801     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
802     index aaca09002ca6..f86e9ff995be 100644
803     --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
804     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
805     @@ -234,7 +234,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
806     break;
807    
808     case MLX5_EVENT_TYPE_CMD:
809     - mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
810     + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
811     break;
812    
813     case MLX5_EVENT_TYPE_PORT_CHANGE:
814     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
815     index 5bcf93422ee0..2115c8aacc5b 100644
816     --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
817     +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
818     @@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
819     spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
820    
821     mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
822     - mlx5_cmd_comp_handler(dev, vector);
823     + mlx5_cmd_comp_handler(dev, vector, true);
824     return;
825    
826     no_trig:
827     diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
828     index c2dcf02df202..d6a541bde331 100644
829     --- a/drivers/net/phy/marvell.c
830     +++ b/drivers/net/phy/marvell.c
831     @@ -240,34 +240,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
832     {
833     int err;
834    
835     - /* The Marvell PHY has an errata which requires
836     - * that certain registers get written in order
837     - * to restart autonegotiation */
838     - err = phy_write(phydev, MII_BMCR, BMCR_RESET);
839     -
840     - if (err < 0)
841     - return err;
842     -
843     - err = phy_write(phydev, 0x1d, 0x1f);
844     - if (err < 0)
845     - return err;
846     -
847     - err = phy_write(phydev, 0x1e, 0x200c);
848     - if (err < 0)
849     - return err;
850     -
851     - err = phy_write(phydev, 0x1d, 0x5);
852     - if (err < 0)
853     - return err;
854     -
855     - err = phy_write(phydev, 0x1e, 0);
856     - if (err < 0)
857     - return err;
858     -
859     - err = phy_write(phydev, 0x1e, 0x100);
860     - if (err < 0)
861     - return err;
862     -
863     err = marvell_set_polarity(phydev, phydev->mdix);
864     if (err < 0)
865     return err;
866     @@ -301,6 +273,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
867     return 0;
868     }
869    
870     +static int m88e1101_config_aneg(struct phy_device *phydev)
871     +{
872     + int err;
873     +
874     + /* This Marvell PHY has an errata which requires
875     + * that certain registers get written in order
876     + * to restart autonegotiation
877     + */
878     + err = phy_write(phydev, MII_BMCR, BMCR_RESET);
879     +
880     + if (err < 0)
881     + return err;
882     +
883     + err = phy_write(phydev, 0x1d, 0x1f);
884     + if (err < 0)
885     + return err;
886     +
887     + err = phy_write(phydev, 0x1e, 0x200c);
888     + if (err < 0)
889     + return err;
890     +
891     + err = phy_write(phydev, 0x1d, 0x5);
892     + if (err < 0)
893     + return err;
894     +
895     + err = phy_write(phydev, 0x1e, 0);
896     + if (err < 0)
897     + return err;
898     +
899     + err = phy_write(phydev, 0x1e, 0x100);
900     + if (err < 0)
901     + return err;
902     +
903     + return marvell_config_aneg(phydev);
904     +}
905     +
906     static int m88e1111_config_aneg(struct phy_device *phydev)
907     {
908     int err;
909     @@ -1491,7 +1499,7 @@ static struct phy_driver marvell_drivers[] = {
910     .probe = marvell_probe,
911     .flags = PHY_HAS_INTERRUPT,
912     .config_init = &marvell_config_init,
913     - .config_aneg = &marvell_config_aneg,
914     + .config_aneg = &m88e1101_config_aneg,
915     .read_status = &genphy_read_status,
916     .ack_interrupt = &marvell_ack_interrupt,
917     .config_intr = &marvell_config_intr,
918     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
919     index 34d997ca1b27..2f260c63c383 100644
920     --- a/drivers/net/usb/qmi_wwan.c
921     +++ b/drivers/net/usb/qmi_wwan.c
922     @@ -897,6 +897,8 @@ static const struct usb_device_id products[] = {
923     {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
924     {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
925     {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
926     + {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
927     + {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
928     {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
929     {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
930     {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
931     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
932     index 51fc0c33a62f..7ca99899972e 100644
933     --- a/drivers/net/virtio_net.c
934     +++ b/drivers/net/virtio_net.c
935     @@ -1456,6 +1456,7 @@ static const struct net_device_ops virtnet_netdev = {
936     #ifdef CONFIG_NET_RX_BUSY_POLL
937     .ndo_busy_poll = virtnet_busy_poll,
938     #endif
939     + .ndo_features_check = passthru_features_check,
940     };
941    
942     static void virtnet_config_changed_work(struct work_struct *work)
943     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
944     index 80ef4865cc8b..ee02605a0f89 100644
945     --- a/drivers/net/vrf.c
946     +++ b/drivers/net/vrf.c
947     @@ -850,6 +850,7 @@ static u32 vrf_fib_table(const struct net_device *dev)
948    
949     static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
950     {
951     + kfree_skb(skb);
952     return 0;
953     }
954    
955     @@ -859,7 +860,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
956     {
957     struct net *net = dev_net(dev);
958    
959     - if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
960     + if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
961     skb = NULL; /* kfree_skb(skb) handled by nf code */
962    
963     return skb;
964     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
965     index 5f2feeef8905..fbeca065f18c 100644
966     --- a/drivers/nvme/host/core.c
967     +++ b/drivers/nvme/host/core.c
968     @@ -1725,7 +1725,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
969     sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
970     &nvme_ns_attr_group);
971     del_gendisk(ns->disk);
972     - blk_mq_abort_requeue_list(ns->queue);
973     blk_cleanup_queue(ns->queue);
974     }
975    
976     @@ -2048,8 +2047,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
977     continue;
978     revalidate_disk(ns->disk);
979     blk_set_queue_dying(ns->queue);
980     - blk_mq_abort_requeue_list(ns->queue);
981     - blk_mq_start_stopped_hw_queues(ns->queue, true);
982     +
983     + /*
984     + * Forcibly start all queues to avoid having stuck requests.
985     + * Note that we must ensure the queues are not stopped
986     + * when the final removal happens.
987     + */
988     + blk_mq_start_hw_queues(ns->queue);
989     +
990     + /* draining requests in requeue list */
991     + blk_mq_kick_requeue_list(ns->queue);
992     }
993     mutex_unlock(&ctrl->namespaces_mutex);
994     }
995     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
996     index 3d25add36d91..3222f3e987eb 100644
997     --- a/drivers/nvme/host/rdma.c
998     +++ b/drivers/nvme/host/rdma.c
999     @@ -1011,6 +1011,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1000     nvme_rdma_wr_error(cq, wc, "SEND");
1001     }
1002    
1003     +static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
1004     +{
1005     + int sig_limit;
1006     +
1007     + /*
1008     + * We signal completion every queue depth/2 and also handle the
1009     + * degenerated case of a device with queue_depth=1, where we
1010     + * would need to signal every message.
1011     + */
1012     + sig_limit = max(queue->queue_size / 2, 1);
1013     + return (++queue->sig_count % sig_limit) == 0;
1014     +}
1015     +
1016     static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1017     struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1018     struct ib_send_wr *first, bool flush)
1019     @@ -1038,9 +1051,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1020     * Would have been way to obvious to handle this in hardware or
1021     * at least the RDMA stack..
1022     *
1023     - * This messy and racy code sniplet is copy and pasted from the iSER
1024     - * initiator, and the magic '32' comes from there as well.
1025     - *
1026     * Always signal the flushes. The magic request used for the flush
1027     * sequencer is not allocated in our driver's tagset and it's
1028     * triggered to be freed by blk_cleanup_queue(). So we need to
1029     @@ -1048,7 +1058,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1030     * embeded in request's payload, is not freed when __ib_process_cq()
1031     * calls wr_cqe->done().
1032     */
1033     - if ((++queue->sig_count % 32) == 0 || flush)
1034     + if (nvme_rdma_queue_sig_limit(queue) || flush)
1035     wr.send_flags |= IB_SEND_SIGNALED;
1036    
1037     if (first)
1038     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
1039     index 6d4b68c483f3..f3756ca6f349 100644
1040     --- a/drivers/s390/net/qeth_core.h
1041     +++ b/drivers/s390/net/qeth_core.h
1042     @@ -718,6 +718,7 @@ enum qeth_discipline_id {
1043     };
1044    
1045     struct qeth_discipline {
1046     + const struct device_type *devtype;
1047     void (*start_poll)(struct ccw_device *, int, unsigned long);
1048     qdio_handler_t *input_handler;
1049     qdio_handler_t *output_handler;
1050     @@ -893,6 +894,9 @@ extern struct qeth_discipline qeth_l2_discipline;
1051     extern struct qeth_discipline qeth_l3_discipline;
1052     extern const struct attribute_group *qeth_generic_attr_groups[];
1053     extern const struct attribute_group *qeth_osn_attr_groups[];
1054     +extern const struct attribute_group qeth_device_attr_group;
1055     +extern const struct attribute_group qeth_device_blkt_group;
1056     +extern const struct device_type qeth_generic_devtype;
1057     extern struct workqueue_struct *qeth_wq;
1058    
1059     int qeth_card_hw_is_reachable(struct qeth_card *);
1060     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
1061     index 20cf29613043..e8c48309ebe9 100644
1062     --- a/drivers/s390/net/qeth_core_main.c
1063     +++ b/drivers/s390/net/qeth_core_main.c
1064     @@ -5462,10 +5462,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
1065     card->discipline = NULL;
1066     }
1067    
1068     -static const struct device_type qeth_generic_devtype = {
1069     +const struct device_type qeth_generic_devtype = {
1070     .name = "qeth_generic",
1071     .groups = qeth_generic_attr_groups,
1072     };
1073     +EXPORT_SYMBOL_GPL(qeth_generic_devtype);
1074     +
1075     static const struct device_type qeth_osn_devtype = {
1076     .name = "qeth_osn",
1077     .groups = qeth_osn_attr_groups,
1078     @@ -5591,23 +5593,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
1079     goto err_card;
1080     }
1081    
1082     - if (card->info.type == QETH_CARD_TYPE_OSN)
1083     - gdev->dev.type = &qeth_osn_devtype;
1084     - else
1085     - gdev->dev.type = &qeth_generic_devtype;
1086     -
1087     switch (card->info.type) {
1088     case QETH_CARD_TYPE_OSN:
1089     case QETH_CARD_TYPE_OSM:
1090     rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
1091     if (rc)
1092     goto err_card;
1093     +
1094     + gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
1095     + ? card->discipline->devtype
1096     + : &qeth_osn_devtype;
1097     rc = card->discipline->setup(card->gdev);
1098     if (rc)
1099     goto err_disc;
1100     - case QETH_CARD_TYPE_OSD:
1101     - case QETH_CARD_TYPE_OSX:
1102     + break;
1103     default:
1104     + gdev->dev.type = &qeth_generic_devtype;
1105     break;
1106     }
1107    
1108     @@ -5663,8 +5664,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
1109     if (rc)
1110     goto err;
1111     rc = card->discipline->setup(card->gdev);
1112     - if (rc)
1113     + if (rc) {
1114     + qeth_core_free_discipline(card);
1115     goto err;
1116     + }
1117     }
1118     rc = card->discipline->set_online(gdev);
1119     err:
1120     diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
1121     index 75b29fd2fcf4..db6a285d41e0 100644
1122     --- a/drivers/s390/net/qeth_core_sys.c
1123     +++ b/drivers/s390/net/qeth_core_sys.c
1124     @@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
1125    
1126     if (card->options.layer2 == newdis)
1127     goto out;
1128     - else {
1129     - card->info.mac_bits = 0;
1130     - if (card->discipline) {
1131     - card->discipline->remove(card->gdev);
1132     - qeth_core_free_discipline(card);
1133     - }
1134     + if (card->info.type == QETH_CARD_TYPE_OSM) {
1135     + /* fixed layer, can't switch */
1136     + rc = -EOPNOTSUPP;
1137     + goto out;
1138     + }
1139     +
1140     + card->info.mac_bits = 0;
1141     + if (card->discipline) {
1142     + card->discipline->remove(card->gdev);
1143     + qeth_core_free_discipline(card);
1144     }
1145    
1146     rc = qeth_core_load_discipline(card, newdis);
1147     @@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
1148     goto out;
1149    
1150     rc = card->discipline->setup(card->gdev);
1151     + if (rc)
1152     + qeth_core_free_discipline(card);
1153     out:
1154     mutex_unlock(&card->discipline_mutex);
1155     return rc ? rc : count;
1156     @@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
1157     &dev_attr_inter_jumbo.attr,
1158     NULL,
1159     };
1160     -static struct attribute_group qeth_device_blkt_group = {
1161     +const struct attribute_group qeth_device_blkt_group = {
1162     .name = "blkt",
1163     .attrs = qeth_blkt_device_attrs,
1164     };
1165     +EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
1166    
1167     static struct attribute *qeth_device_attrs[] = {
1168     &dev_attr_state.attr,
1169     @@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = {
1170     &dev_attr_switch_attrs.attr,
1171     NULL,
1172     };
1173     -static struct attribute_group qeth_device_attr_group = {
1174     +const struct attribute_group qeth_device_attr_group = {
1175     .attrs = qeth_device_attrs,
1176     };
1177     +EXPORT_SYMBOL_GPL(qeth_device_attr_group);
1178    
1179     const struct attribute_group *qeth_generic_attr_groups[] = {
1180     &qeth_device_attr_group,
1181     diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
1182     index 29d9fb3890ad..0d59f9a45ea9 100644
1183     --- a/drivers/s390/net/qeth_l2.h
1184     +++ b/drivers/s390/net/qeth_l2.h
1185     @@ -8,6 +8,8 @@
1186    
1187     #include "qeth_core.h"
1188    
1189     +extern const struct attribute_group *qeth_l2_attr_groups[];
1190     +
1191     int qeth_l2_create_device_attributes(struct device *);
1192     void qeth_l2_remove_device_attributes(struct device *);
1193     void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
1194     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
1195     index bb27058fa9f0..5d010aa89852 100644
1196     --- a/drivers/s390/net/qeth_l2_main.c
1197     +++ b/drivers/s390/net/qeth_l2_main.c
1198     @@ -1021,11 +1021,21 @@ static int qeth_l2_stop(struct net_device *dev)
1199     return 0;
1200     }
1201    
1202     +static const struct device_type qeth_l2_devtype = {
1203     + .name = "qeth_layer2",
1204     + .groups = qeth_l2_attr_groups,
1205     +};
1206     +
1207     static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
1208     {
1209     struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1210     + int rc;
1211    
1212     - qeth_l2_create_device_attributes(&gdev->dev);
1213     + if (gdev->dev.type == &qeth_generic_devtype) {
1214     + rc = qeth_l2_create_device_attributes(&gdev->dev);
1215     + if (rc)
1216     + return rc;
1217     + }
1218     INIT_LIST_HEAD(&card->vid_list);
1219     hash_init(card->mac_htable);
1220     card->options.layer2 = 1;
1221     @@ -1037,7 +1047,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
1222     {
1223     struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
1224    
1225     - qeth_l2_remove_device_attributes(&cgdev->dev);
1226     + if (cgdev->dev.type == &qeth_generic_devtype)
1227     + qeth_l2_remove_device_attributes(&cgdev->dev);
1228     qeth_set_allowed_threads(card, 0, 1);
1229     wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
1230    
1231     @@ -1095,7 +1106,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1232     case QETH_CARD_TYPE_OSN:
1233     card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
1234     ether_setup);
1235     - card->dev->flags |= IFF_NOARP;
1236     break;
1237     default:
1238     card->dev = alloc_etherdev(0);
1239     @@ -1108,9 +1118,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1240     card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
1241     card->dev->mtu = card->info.initial_mtu;
1242     card->dev->netdev_ops = &qeth_l2_netdev_ops;
1243     - card->dev->ethtool_ops =
1244     - (card->info.type != QETH_CARD_TYPE_OSN) ?
1245     - &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
1246     + if (card->info.type == QETH_CARD_TYPE_OSN) {
1247     + card->dev->ethtool_ops = &qeth_l2_osn_ops;
1248     + card->dev->flags |= IFF_NOARP;
1249     + } else {
1250     + card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
1251     + }
1252     card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1253     if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
1254     card->dev->hw_features = NETIF_F_SG;
1255     @@ -1434,6 +1447,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
1256     }
1257    
1258     struct qeth_discipline qeth_l2_discipline = {
1259     + .devtype = &qeth_l2_devtype,
1260     .start_poll = qeth_qdio_start_poll,
1261     .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1262     .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
1263     diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
1264     index 692db49e3d2a..a48ed9e7e168 100644
1265     --- a/drivers/s390/net/qeth_l2_sys.c
1266     +++ b/drivers/s390/net/qeth_l2_sys.c
1267     @@ -272,3 +272,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
1268     } else
1269     qeth_bridgeport_an_set(card, 0);
1270     }
1271     +
1272     +const struct attribute_group *qeth_l2_attr_groups[] = {
1273     + &qeth_device_attr_group,
1274     + &qeth_device_blkt_group,
1275     + /* l2 specific, see l2_{create,remove}_device_attributes(): */
1276     + &qeth_l2_bridgeport_attr_group,
1277     + NULL,
1278     +};
1279     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1280     index 272d9e7419be..171be5ec2ece 100644
1281     --- a/drivers/s390/net/qeth_l3_main.c
1282     +++ b/drivers/s390/net/qeth_l3_main.c
1283     @@ -3157,8 +3157,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
1284     static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
1285     {
1286     struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1287     + int rc;
1288    
1289     - qeth_l3_create_device_attributes(&gdev->dev);
1290     + rc = qeth_l3_create_device_attributes(&gdev->dev);
1291     + if (rc)
1292     + return rc;
1293     + hash_init(card->ip_htable);
1294     + hash_init(card->ip_mc_htable);
1295     card->options.layer2 = 0;
1296     card->info.hwtrap = 0;
1297     return 0;
1298     @@ -3450,6 +3455,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
1299     }
1300    
1301     struct qeth_discipline qeth_l3_discipline = {
1302     + .devtype = &qeth_generic_devtype,
1303     .start_poll = qeth_qdio_start_poll,
1304     .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1305     .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
1306     diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1307     index 904422f5b62f..04148438d7ec 100644
1308     --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1309     +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1310     @@ -1169,6 +1169,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1311     cmd = list_first_entry_or_null(&vscsi->free_cmd,
1312     struct ibmvscsis_cmd, list);
1313     if (cmd) {
1314     + if (cmd->abort_cmd)
1315     + cmd->abort_cmd = NULL;
1316     cmd->flags &= ~(DELAY_SEND);
1317     list_del(&cmd->list);
1318     cmd->iue = iue;
1319     @@ -1773,6 +1775,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1320     if (cmd->abort_cmd) {
1321     retry = true;
1322     cmd->abort_cmd->flags &= ~(DELAY_SEND);
1323     + cmd->abort_cmd = NULL;
1324     }
1325    
1326     /*
1327     @@ -1787,6 +1790,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1328     list_del(&cmd->list);
1329     ibmvscsis_free_cmd_resources(vscsi,
1330     cmd);
1331     + /*
1332     + * With a successfully aborted op
1333     + * through LIO we want to increment the
1334     + * the vscsi credit so that when we dont
1335     + * send a rsp to the original scsi abort
1336     + * op (h_send_crq), but the tm rsp to
1337     + * the abort is sent, the credit is
1338     + * correctly sent with the abort tm rsp.
1339     + * We would need 1 for the abort tm rsp
1340     + * and 1 credit for the aborted scsi op.
1341     + * Thus we need to increment here.
1342     + * Also we want to increment the credit
1343     + * here because we want to make sure
1344     + * cmd is actually released first
1345     + * otherwise the client will think it
1346     + * it can send a new cmd, and we could
1347     + * find ourselves short of cmd elements.
1348     + */
1349     + vscsi->credit += 1;
1350     } else {
1351     iue = cmd->iue;
1352    
1353     @@ -2961,10 +2983,7 @@ static long srp_build_response(struct scsi_info *vscsi,
1354    
1355     rsp->opcode = SRP_RSP;
1356    
1357     - if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
1358     - rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
1359     - else
1360     - rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
1361     + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
1362     rsp->tag = cmd->rsp.tag;
1363     rsp->flags = 0;
1364    
1365     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1366     index 8a7941b8189f..289374cbcb47 100644
1367     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1368     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1369     @@ -4634,6 +4634,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1370     struct MPT3SAS_DEVICE *sas_device_priv_data;
1371     u32 response_code = 0;
1372     unsigned long flags;
1373     + unsigned int sector_sz;
1374    
1375     mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1376     scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
1377     @@ -4692,6 +4693,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1378     }
1379    
1380     xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
1381     +
1382     + /* In case of bogus fw or device, we could end up having
1383     + * unaligned partial completion. We can force alignment here,
1384     + * then scsi-ml does not need to handle this misbehavior.
1385     + */
1386     + sector_sz = scmd->device->sector_size;
1387     + if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
1388     + xfer_cnt % sector_sz)) {
1389     + sdev_printk(KERN_INFO, scmd->device,
1390     + "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
1391     + xfer_cnt, sector_sz);
1392     + xfer_cnt = round_down(xfer_cnt, sector_sz);
1393     + }
1394     +
1395     scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
1396     if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
1397     log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
1398     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1399     index 40e50f2d209d..01ea228358ea 100644
1400     --- a/drivers/target/iscsi/iscsi_target.c
1401     +++ b/drivers/target/iscsi/iscsi_target.c
1402     @@ -3798,6 +3798,8 @@ int iscsi_target_tx_thread(void *arg)
1403     {
1404     int ret = 0;
1405     struct iscsi_conn *conn = arg;
1406     + bool conn_freed = false;
1407     +
1408     /*
1409     * Allow ourselves to be interrupted by SIGINT so that a
1410     * connection recovery / failure event can be triggered externally.
1411     @@ -3823,12 +3825,14 @@ int iscsi_target_tx_thread(void *arg)
1412     goto transport_err;
1413    
1414     ret = iscsit_handle_response_queue(conn);
1415     - if (ret == 1)
1416     + if (ret == 1) {
1417     goto get_immediate;
1418     - else if (ret == -ECONNRESET)
1419     + } else if (ret == -ECONNRESET) {
1420     + conn_freed = true;
1421     goto out;
1422     - else if (ret < 0)
1423     + } else if (ret < 0) {
1424     goto transport_err;
1425     + }
1426     }
1427    
1428     transport_err:
1429     @@ -3838,8 +3842,13 @@ int iscsi_target_tx_thread(void *arg)
1430     * responsible for cleaning up the early connection failure.
1431     */
1432     if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
1433     - iscsit_take_action_for_connection_exit(conn);
1434     + iscsit_take_action_for_connection_exit(conn, &conn_freed);
1435     out:
1436     + if (!conn_freed) {
1437     + while (!kthread_should_stop()) {
1438     + msleep(100);
1439     + }
1440     + }
1441     return 0;
1442     }
1443    
1444     @@ -4012,6 +4021,7 @@ int iscsi_target_rx_thread(void *arg)
1445     {
1446     int rc;
1447     struct iscsi_conn *conn = arg;
1448     + bool conn_freed = false;
1449    
1450     /*
1451     * Allow ourselves to be interrupted by SIGINT so that a
1452     @@ -4024,7 +4034,7 @@ int iscsi_target_rx_thread(void *arg)
1453     */
1454     rc = wait_for_completion_interruptible(&conn->rx_login_comp);
1455     if (rc < 0 || iscsi_target_check_conn_state(conn))
1456     - return 0;
1457     + goto out;
1458    
1459     if (!conn->conn_transport->iscsit_get_rx_pdu)
1460     return 0;
1461     @@ -4033,7 +4043,15 @@ int iscsi_target_rx_thread(void *arg)
1462    
1463     if (!signal_pending(current))
1464     atomic_set(&conn->transport_failed, 1);
1465     - iscsit_take_action_for_connection_exit(conn);
1466     + iscsit_take_action_for_connection_exit(conn, &conn_freed);
1467     +
1468     +out:
1469     + if (!conn_freed) {
1470     + while (!kthread_should_stop()) {
1471     + msleep(100);
1472     + }
1473     + }
1474     +
1475     return 0;
1476     }
1477    
1478     diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
1479     index b54e72c7ab0f..efc453ef6831 100644
1480     --- a/drivers/target/iscsi/iscsi_target_erl0.c
1481     +++ b/drivers/target/iscsi/iscsi_target_erl0.c
1482     @@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
1483     }
1484     }
1485    
1486     -void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
1487     +void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
1488     {
1489     + *conn_freed = false;
1490     +
1491     spin_lock_bh(&conn->state_lock);
1492     if (atomic_read(&conn->connection_exit)) {
1493     spin_unlock_bh(&conn->state_lock);
1494     @@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
1495     if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
1496     spin_unlock_bh(&conn->state_lock);
1497     iscsit_close_connection(conn);
1498     + *conn_freed = true;
1499     return;
1500     }
1501    
1502     @@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
1503     spin_unlock_bh(&conn->state_lock);
1504    
1505     iscsit_handle_connection_cleanup(conn);
1506     + *conn_freed = true;
1507     }
1508     diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
1509     index a9e2f9497fb2..fbc1d84a63c3 100644
1510     --- a/drivers/target/iscsi/iscsi_target_erl0.h
1511     +++ b/drivers/target/iscsi/iscsi_target_erl0.h
1512     @@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
1513     extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
1514     extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
1515     extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
1516     -extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
1517     +extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
1518    
1519     #endif /*** ISCSI_TARGET_ERL0_H ***/
1520     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1521     index 96c55bc10ac9..6128e8e80170 100644
1522     --- a/drivers/target/iscsi/iscsi_target_login.c
1523     +++ b/drivers/target/iscsi/iscsi_target_login.c
1524     @@ -1460,5 +1460,9 @@ int iscsi_target_login_thread(void *arg)
1525     break;
1526     }
1527    
1528     + while (!kthread_should_stop()) {
1529     + msleep(100);
1530     + }
1531     +
1532     return 0;
1533     }
1534     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
1535     index 080d5a59d0a7..f24d3030b98c 100644
1536     --- a/drivers/tty/serial/8250/8250_port.c
1537     +++ b/drivers/tty/serial/8250/8250_port.c
1538     @@ -1320,7 +1320,7 @@ static void autoconfig(struct uart_8250_port *up)
1539     /*
1540     * Check if the device is a Fintek F81216A
1541     */
1542     - if (port->type == PORT_16550A)
1543     + if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
1544     fintek_8250_probe(up);
1545    
1546     if (up->capabilities != old_capabilities) {
1547     diff --git a/fs/ufs/super.c b/fs/ufs/super.c
1548     index f04ab232d08d..f3469ad0fef2 100644
1549     --- a/fs/ufs/super.c
1550     +++ b/fs/ufs/super.c
1551     @@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
1552     uspi->s_dirblksize = UFS_SECTOR_SIZE;
1553     super_block_offset=UFS_SBLOCK;
1554    
1555     - /* Keep 2Gig file limit. Some UFS variants need to override
1556     - this but as I don't know which I'll let those in the know loosen
1557     - the rules */
1558     + sb->s_maxbytes = MAX_LFS_FILESIZE;
1559     +
1560     switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
1561     case UFS_MOUNT_UFSTYPE_44BSD:
1562     UFSD("ufstype=44bsd\n");
1563     diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
1564     index 5a508b011e27..2a8cbd15d5d1 100644
1565     --- a/fs/xfs/libxfs/xfs_bmap.c
1566     +++ b/fs/xfs/libxfs/xfs_bmap.c
1567     @@ -2208,8 +2208,10 @@ xfs_bmap_add_extent_delay_real(
1568     }
1569     temp = xfs_bmap_worst_indlen(bma->ip, temp);
1570     temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
1571     - diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1572     - (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1573     + diff = (int)(temp + temp2 -
1574     + (startblockval(PREV.br_startblock) -
1575     + (bma->cur ?
1576     + bma->cur->bc_private.b.allocated : 0)));
1577     if (diff > 0) {
1578     error = xfs_mod_fdblocks(bma->ip->i_mount,
1579     -((int64_t)diff), false);
1580     @@ -2266,7 +2268,6 @@ xfs_bmap_add_extent_delay_real(
1581     temp = da_new;
1582     if (bma->cur)
1583     temp += bma->cur->bc_private.b.allocated;
1584     - ASSERT(temp <= da_old);
1585     if (temp < da_old)
1586     xfs_mod_fdblocks(bma->ip->i_mount,
1587     (int64_t)(da_old - temp), false);
1588     @@ -3964,7 +3965,7 @@ xfs_bmap_remap_alloc(
1589     {
1590     struct xfs_trans *tp = ap->tp;
1591     struct xfs_mount *mp = tp->t_mountp;
1592     - xfs_agblock_t bno;
1593     + xfs_fsblock_t bno;
1594     struct xfs_alloc_arg args;
1595     int error;
1596    
1597     diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
1598     index 2849d3fa3d0b..91c68913d495 100644
1599     --- a/fs/xfs/libxfs/xfs_btree.c
1600     +++ b/fs/xfs/libxfs/xfs_btree.c
1601     @@ -4376,7 +4376,7 @@ xfs_btree_visit_blocks(
1602     xfs_btree_readahead_ptr(cur, ptr, 1);
1603    
1604     /* save for the next iteration of the loop */
1605     - lptr = *ptr;
1606     + xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
1607     }
1608    
1609     /* for each buffer in the level */
1610     diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
1611     index ef9f6ead96a4..699a51bb2cbe 100644
1612     --- a/fs/xfs/libxfs/xfs_dir2_priv.h
1613     +++ b/fs/xfs/libxfs/xfs_dir2_priv.h
1614     @@ -126,6 +126,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
1615     extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
1616     extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
1617     extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
1618     +extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
1619    
1620     /* xfs_dir2_readdir.c */
1621     extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
1622     diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
1623     index c6809ff41197..e84af093b2ab 100644
1624     --- a/fs/xfs/libxfs/xfs_dir2_sf.c
1625     +++ b/fs/xfs/libxfs/xfs_dir2_sf.c
1626     @@ -629,6 +629,112 @@ xfs_dir2_sf_check(
1627     }
1628     #endif /* DEBUG */
1629    
1630     +/* Verify the consistency of an inline directory. */
1631     +int
1632     +xfs_dir2_sf_verify(
1633     + struct xfs_inode *ip)
1634     +{
1635     + struct xfs_mount *mp = ip->i_mount;
1636     + struct xfs_dir2_sf_hdr *sfp;
1637     + struct xfs_dir2_sf_entry *sfep;
1638     + struct xfs_dir2_sf_entry *next_sfep;
1639     + char *endp;
1640     + const struct xfs_dir_ops *dops;
1641     + struct xfs_ifork *ifp;
1642     + xfs_ino_t ino;
1643     + int i;
1644     + int i8count;
1645     + int offset;
1646     + int size;
1647     + int error;
1648     + __uint8_t filetype;
1649     +
1650     + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
1651     + /*
1652     + * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
1653     + * so we can only trust the mountpoint to have the right pointer.
1654     + */
1655     + dops = xfs_dir_get_ops(mp, NULL);
1656     +
1657     + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1658     + sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
1659     + size = ifp->if_bytes;
1660     +
1661     + /*
1662     + * Give up if the directory is way too short.
1663     + */
1664     + if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
1665     + size < xfs_dir2_sf_hdr_size(sfp->i8count))
1666     + return -EFSCORRUPTED;
1667     +
1668     + endp = (char *)sfp + size;
1669     +
1670     + /* Check .. entry */
1671     + ino = dops->sf_get_parent_ino(sfp);
1672     + i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
1673     + error = xfs_dir_ino_validate(mp, ino);
1674     + if (error)
1675     + return error;
1676     + offset = dops->data_first_offset;
1677     +
1678     + /* Check all reported entries */
1679     + sfep = xfs_dir2_sf_firstentry(sfp);
1680     + for (i = 0; i < sfp->count; i++) {
1681     + /*
1682     + * struct xfs_dir2_sf_entry has a variable length.
1683     + * Check the fixed-offset parts of the structure are
1684     + * within the data buffer.
1685     + */
1686     + if (((char *)sfep + sizeof(*sfep)) >= endp)
1687     + return -EFSCORRUPTED;
1688     +
1689     + /* Don't allow names with known bad length. */
1690     + if (sfep->namelen == 0)
1691     + return -EFSCORRUPTED;
1692     +
1693     + /*
1694     + * Check that the variable-length part of the structure is
1695     + * within the data buffer. The next entry starts after the
1696     + * name component, so nextentry is an acceptable test.
1697     + */
1698     + next_sfep = dops->sf_nextentry(sfp, sfep);
1699     + if (endp < (char *)next_sfep)
1700     + return -EFSCORRUPTED;
1701     +
1702     + /* Check that the offsets always increase. */
1703     + if (xfs_dir2_sf_get_offset(sfep) < offset)
1704     + return -EFSCORRUPTED;
1705     +
1706     + /* Check the inode number. */
1707     + ino = dops->sf_get_ino(sfp, sfep);
1708     + i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
1709     + error = xfs_dir_ino_validate(mp, ino);
1710     + if (error)
1711     + return error;
1712     +
1713     + /* Check the file type. */
1714     + filetype = dops->sf_get_ftype(sfep);
1715     + if (filetype >= XFS_DIR3_FT_MAX)
1716     + return -EFSCORRUPTED;
1717     +
1718     + offset = xfs_dir2_sf_get_offset(sfep) +
1719     + dops->data_entsize(sfep->namelen);
1720     +
1721     + sfep = next_sfep;
1722     + }
1723     + if (i8count != sfp->i8count)
1724     + return -EFSCORRUPTED;
1725     + if ((void *)sfep != (void *)endp)
1726     + return -EFSCORRUPTED;
1727     +
1728     + /* Make sure this whole thing ought to be in local format. */
1729     + if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
1730     + (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
1731     + return -EFSCORRUPTED;
1732     +
1733     + return 0;
1734     +}
1735     +
1736     /*
1737     * Create a new (shortform) directory.
1738     */
1739     diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
1740     index 25c1e078aef6..8a37efe04de3 100644
1741     --- a/fs/xfs/libxfs/xfs_inode_fork.c
1742     +++ b/fs/xfs/libxfs/xfs_inode_fork.c
1743     @@ -33,6 +33,8 @@
1744     #include "xfs_trace.h"
1745     #include "xfs_attr_sf.h"
1746     #include "xfs_da_format.h"
1747     +#include "xfs_da_btree.h"
1748     +#include "xfs_dir2_priv.h"
1749    
1750     kmem_zone_t *xfs_ifork_zone;
1751    
1752     @@ -210,6 +212,16 @@ xfs_iformat_fork(
1753     if (error)
1754     return error;
1755    
1756     + /* Check inline dir contents. */
1757     + if (S_ISDIR(VFS_I(ip)->i_mode) &&
1758     + dip->di_format == XFS_DINODE_FMT_LOCAL) {
1759     + error = xfs_dir2_sf_verify(ip);
1760     + if (error) {
1761     + xfs_idestroy_fork(ip, XFS_DATA_FORK);
1762     + return error;
1763     + }
1764     + }
1765     +
1766     if (xfs_is_reflink_inode(ip)) {
1767     ASSERT(ip->i_cowfp == NULL);
1768     xfs_ifork_init_cow(ip);
1769     @@ -320,7 +332,6 @@ xfs_iformat_local(
1770     int whichfork,
1771     int size)
1772     {
1773     -
1774     /*
1775     * If the size is unreasonable, then something
1776     * is wrong and we just bail out rather than crash in
1777     diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
1778     index b177ef33cd4c..82a38d86ebad 100644
1779     --- a/fs/xfs/libxfs/xfs_refcount.c
1780     +++ b/fs/xfs/libxfs/xfs_refcount.c
1781     @@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
1782     if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
1783     return -EOPNOTSUPP;
1784    
1785     - error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
1786     + INIT_LIST_HEAD(&debris);
1787     +
1788     + /*
1789     + * In this first part, we use an empty transaction to gather up
1790     + * all the leftover CoW extents so that we can subsequently
1791     + * delete them. The empty transaction is used to avoid
1792     + * a buffer lock deadlock if there happens to be a loop in the
1793     + * refcountbt because we're allowed to re-grab a buffer that is
1794     + * already attached to our transaction. When we're done
1795     + * recording the CoW debris we cancel the (empty) transaction
1796     + * and everything goes away cleanly.
1797     + */
1798     + error = xfs_trans_alloc_empty(mp, &tp);
1799     if (error)
1800     return error;
1801     - cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
1802     +
1803     + error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
1804     + if (error)
1805     + goto out_trans;
1806     + cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
1807    
1808     /* Find all the leftover CoW staging extents. */
1809     - INIT_LIST_HEAD(&debris);
1810     memset(&low, 0, sizeof(low));
1811     memset(&high, 0, sizeof(high));
1812     low.rc.rc_startblock = XFS_REFC_COW_START;
1813     @@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
1814     if (error)
1815     goto out_cursor;
1816     xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1817     - xfs_buf_relse(agbp);
1818     + xfs_trans_brelse(tp, agbp);
1819     + xfs_trans_cancel(tp);
1820    
1821     /* Now iterate the list to free the leftovers */
1822     - list_for_each_entry(rr, &debris, rr_list) {
1823     + list_for_each_entry_safe(rr, n, &debris, rr_list) {
1824     /* Set up transaction. */
1825     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1826     if (error)
1827     @@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
1828     error = xfs_trans_commit(tp);
1829     if (error)
1830     goto out_free;
1831     +
1832     + list_del(&rr->rr_list);
1833     + kmem_free(rr);
1834     }
1835    
1836     + return error;
1837     +out_defer:
1838     + xfs_defer_cancel(&dfops);
1839     +out_trans:
1840     + xfs_trans_cancel(tp);
1841     out_free:
1842     /* Free the leftover list */
1843     list_for_each_entry_safe(rr, n, &debris, rr_list) {
1844     @@ -1688,11 +1712,6 @@ xfs_refcount_recover_cow_leftovers(
1845    
1846     out_cursor:
1847     xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1848     - xfs_buf_relse(agbp);
1849     - goto out_free;
1850     -
1851     -out_defer:
1852     - xfs_defer_cancel(&dfops);
1853     - xfs_trans_cancel(tp);
1854     - goto out_free;
1855     + xfs_trans_brelse(tp, agbp);
1856     + goto out_trans;
1857     }
1858     diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
1859     index 7917f6e44286..d787c677d2a3 100644
1860     --- a/fs/xfs/libxfs/xfs_trans_space.h
1861     +++ b/fs/xfs/libxfs/xfs_trans_space.h
1862     @@ -21,8 +21,20 @@
1863     /*
1864     * Components of space reservations.
1865     */
1866     +
1867     +/* Worst case number of rmaps that can be held in a block. */
1868     #define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
1869     (((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
1870     +
1871     +/* Adding one rmap could split every level up to the top of the tree. */
1872     +#define XFS_RMAPADD_SPACE_RES(mp) ((mp)->m_rmap_maxlevels)
1873     +
1874     +/* Blocks we might need to add "b" rmaps to a tree. */
1875     +#define XFS_NRMAPADD_SPACE_RES(mp, b)\
1876     + (((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
1877     + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
1878     + XFS_RMAPADD_SPACE_RES(mp))
1879     +
1880     #define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) \
1881     (((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
1882     #define XFS_EXTENTADD_SPACE_RES(mp,w) (XFS_BM_MAXLEVELS(mp,w) - 1)
1883     @@ -30,13 +42,12 @@
1884     (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
1885     XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
1886     XFS_EXTENTADD_SPACE_RES(mp,w))
1887     +
1888     +/* Blocks we might need to add "b" mappings & rmappings to a file. */
1889     #define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
1890     - (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
1891     - XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
1892     - XFS_EXTENTADD_SPACE_RES(mp,w) + \
1893     - ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
1894     - XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
1895     - (mp)->m_rmap_maxlevels)
1896     + (XFS_NEXTENTADD_SPACE_RES((mp), (b), (w)) + \
1897     + XFS_NRMAPADD_SPACE_RES((mp), (b)))
1898     +
1899     #define XFS_DAENTER_1B(mp,w) \
1900     ((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
1901     #define XFS_DAENTER_DBS(mp,w) \
1902     diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
1903     index 0457abe4118a..6df0a7ce3e8a 100644
1904     --- a/fs/xfs/xfs_aops.c
1905     +++ b/fs/xfs/xfs_aops.c
1906     @@ -116,11 +116,11 @@ xfs_finish_page_writeback(
1907    
1908     bsize = bh->b_size;
1909     do {
1910     + if (off > end)
1911     + break;
1912     next = bh->b_this_page;
1913     if (off < bvec->bv_offset)
1914     goto next_bh;
1915     - if (off > end)
1916     - break;
1917     bh->b_end_io(bh, !error);
1918     next_bh:
1919     off += bsize;
1920     diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
1921     index 9bf57c76623b..c4b90e794e41 100644
1922     --- a/fs/xfs/xfs_bmap_item.c
1923     +++ b/fs/xfs/xfs_bmap_item.c
1924     @@ -34,6 +34,8 @@
1925     #include "xfs_bmap.h"
1926     #include "xfs_icache.h"
1927     #include "xfs_trace.h"
1928     +#include "xfs_bmap_btree.h"
1929     +#include "xfs_trans_space.h"
1930    
1931    
1932     kmem_zone_t *xfs_bui_zone;
1933     @@ -446,7 +448,8 @@ xfs_bui_recover(
1934     return -EIO;
1935     }
1936    
1937     - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1938     + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
1939     + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
1940     if (error)
1941     return error;
1942     budp = xfs_trans_get_bud(tp, buip);
1943     diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
1944     index 5328ecdd03d4..87b495e2f15a 100644
1945     --- a/fs/xfs/xfs_bmap_util.c
1946     +++ b/fs/xfs/xfs_bmap_util.c
1947     @@ -588,9 +588,13 @@ xfs_getbmap(
1948     }
1949     break;
1950     default:
1951     + /* Local format data forks report no extents. */
1952     + if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
1953     + bmv->bmv_entries = 0;
1954     + return 0;
1955     + }
1956     if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
1957     - ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
1958     - ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
1959     + ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
1960     return -EINVAL;
1961    
1962     if (xfs_get_extsz_hint(ip) ||
1963     @@ -718,7 +722,7 @@ xfs_getbmap(
1964     * extents.
1965     */
1966     if (map[i].br_startblock == DELAYSTARTBLOCK &&
1967     - map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
1968     + map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
1969     ASSERT((iflags & BMV_IF_DELALLOC) != 0);
1970    
1971     if (map[i].br_startblock == HOLESTARTBLOCK &&
1972     @@ -911,9 +915,9 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
1973     }
1974    
1975     /*
1976     - * This is called by xfs_inactive to free any blocks beyond eof
1977     - * when the link count isn't zero and by xfs_dm_punch_hole() when
1978     - * punching a hole to EOF.
1979     + * This is called to free any blocks beyond eof. The caller must hold
1980     + * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
1981     + * reference to the inode.
1982     */
1983     int
1984     xfs_free_eofblocks(
1985     @@ -928,8 +932,6 @@ xfs_free_eofblocks(
1986     struct xfs_bmbt_irec imap;
1987     struct xfs_mount *mp = ip->i_mount;
1988    
1989     - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1990     -
1991     /*
1992     * Figure out if there are any blocks beyond the end
1993     * of the file. If not, then there is nothing to do.
1994     diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
1995     index d7a67d7fbc7f..16269271ebd6 100644
1996     --- a/fs/xfs/xfs_buf.c
1997     +++ b/fs/xfs/xfs_buf.c
1998     @@ -96,12 +96,16 @@ static inline void
1999     xfs_buf_ioacct_inc(
2000     struct xfs_buf *bp)
2001     {
2002     - if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
2003     + if (bp->b_flags & XBF_NO_IOACCT)
2004     return;
2005    
2006     ASSERT(bp->b_flags & XBF_ASYNC);
2007     - bp->b_flags |= _XBF_IN_FLIGHT;
2008     - percpu_counter_inc(&bp->b_target->bt_io_count);
2009     + spin_lock(&bp->b_lock);
2010     + if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
2011     + bp->b_state |= XFS_BSTATE_IN_FLIGHT;
2012     + percpu_counter_inc(&bp->b_target->bt_io_count);
2013     + }
2014     + spin_unlock(&bp->b_lock);
2015     }
2016    
2017     /*
2018     @@ -109,14 +113,24 @@ xfs_buf_ioacct_inc(
2019     * freed and unaccount from the buftarg.
2020     */
2021     static inline void
2022     -xfs_buf_ioacct_dec(
2023     +__xfs_buf_ioacct_dec(
2024     struct xfs_buf *bp)
2025     {
2026     - if (!(bp->b_flags & _XBF_IN_FLIGHT))
2027     - return;
2028     + ASSERT(spin_is_locked(&bp->b_lock));
2029    
2030     - bp->b_flags &= ~_XBF_IN_FLIGHT;
2031     - percpu_counter_dec(&bp->b_target->bt_io_count);
2032     + if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
2033     + bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
2034     + percpu_counter_dec(&bp->b_target->bt_io_count);
2035     + }
2036     +}
2037     +
2038     +static inline void
2039     +xfs_buf_ioacct_dec(
2040     + struct xfs_buf *bp)
2041     +{
2042     + spin_lock(&bp->b_lock);
2043     + __xfs_buf_ioacct_dec(bp);
2044     + spin_unlock(&bp->b_lock);
2045     }
2046    
2047     /*
2048     @@ -148,9 +162,9 @@ xfs_buf_stale(
2049     * unaccounted (released to LRU) before that occurs. Drop in-flight
2050     * status now to preserve accounting consistency.
2051     */
2052     - xfs_buf_ioacct_dec(bp);
2053     -
2054     spin_lock(&bp->b_lock);
2055     + __xfs_buf_ioacct_dec(bp);
2056     +
2057     atomic_set(&bp->b_lru_ref, 0);
2058     if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
2059     (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
2060     @@ -953,12 +967,12 @@ xfs_buf_rele(
2061     * ensures the decrement occurs only once per-buf.
2062     */
2063     if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
2064     - xfs_buf_ioacct_dec(bp);
2065     + __xfs_buf_ioacct_dec(bp);
2066     goto out_unlock;
2067     }
2068    
2069     /* the last reference has been dropped ... */
2070     - xfs_buf_ioacct_dec(bp);
2071     + __xfs_buf_ioacct_dec(bp);
2072     if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
2073     /*
2074     * If the buffer is added to the LRU take a new reference to the
2075     @@ -1052,6 +1066,8 @@ void
2076     xfs_buf_unlock(
2077     struct xfs_buf *bp)
2078     {
2079     + ASSERT(xfs_buf_islocked(bp));
2080     +
2081     XB_CLEAR_OWNER(bp);
2082     up(&bp->b_sema);
2083    
2084     @@ -1790,6 +1806,28 @@ xfs_alloc_buftarg(
2085     }
2086    
2087     /*
2088     + * Cancel a delayed write list.
2089     + *
2090     + * Remove each buffer from the list, clear the delwri queue flag and drop the
2091     + * associated buffer reference.
2092     + */
2093     +void
2094     +xfs_buf_delwri_cancel(
2095     + struct list_head *list)
2096     +{
2097     + struct xfs_buf *bp;
2098     +
2099     + while (!list_empty(list)) {
2100     + bp = list_first_entry(list, struct xfs_buf, b_list);
2101     +
2102     + xfs_buf_lock(bp);
2103     + bp->b_flags &= ~_XBF_DELWRI_Q;
2104     + list_del_init(&bp->b_list);
2105     + xfs_buf_relse(bp);
2106     + }
2107     +}
2108     +
2109     +/*
2110     * Add a buffer to the delayed write list.
2111     *
2112     * This queues a buffer for writeout if it hasn't already been. Note that
2113     diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
2114     index 1c2e52b2d926..ad514a8025dd 100644
2115     --- a/fs/xfs/xfs_buf.h
2116     +++ b/fs/xfs/xfs_buf.h
2117     @@ -63,7 +63,6 @@ typedef enum {
2118     #define _XBF_KMEM (1 << 21)/* backed by heap memory */
2119     #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
2120     #define _XBF_COMPOUND (1 << 23)/* compound buffer */
2121     -#define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */
2122    
2123     typedef unsigned int xfs_buf_flags_t;
2124    
2125     @@ -83,14 +82,14 @@ typedef unsigned int xfs_buf_flags_t;
2126     { _XBF_PAGES, "PAGES" }, \
2127     { _XBF_KMEM, "KMEM" }, \
2128     { _XBF_DELWRI_Q, "DELWRI_Q" }, \
2129     - { _XBF_COMPOUND, "COMPOUND" }, \
2130     - { _XBF_IN_FLIGHT, "IN_FLIGHT" }
2131     + { _XBF_COMPOUND, "COMPOUND" }
2132    
2133    
2134     /*
2135     * Internal state flags.
2136     */
2137     #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
2138     +#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
2139    
2140     /*
2141     * The xfs_buftarg contains 2 notions of "sector size" -
2142     @@ -330,6 +329,7 @@ extern void *xfs_buf_offset(struct xfs_buf *, size_t);
2143     extern void xfs_buf_stale(struct xfs_buf *bp);
2144    
2145     /* Delayed Write Buffer Routines */
2146     +extern void xfs_buf_delwri_cancel(struct list_head *);
2147     extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
2148     extern int xfs_buf_delwri_submit(struct list_head *);
2149     extern int xfs_buf_delwri_submit_nowait(struct list_head *);
2150     diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
2151     index 29816981b50a..eba63160be6c 100644
2152     --- a/fs/xfs/xfs_dir2_readdir.c
2153     +++ b/fs/xfs/xfs_dir2_readdir.c
2154     @@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
2155     struct xfs_da_geometry *geo = args->geo;
2156    
2157     ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
2158     - /*
2159     - * Give up if the directory is way too short.
2160     - */
2161     - if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
2162     - ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
2163     - return -EIO;
2164     - }
2165     -
2166     ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
2167     ASSERT(dp->i_df.if_u1.if_data != NULL);
2168    
2169     sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
2170    
2171     - if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
2172     - return -EFSCORRUPTED;
2173     -
2174     /*
2175     * If the block number in the offset is out of range, we're done.
2176     */
2177     @@ -405,6 +394,7 @@ xfs_dir2_leaf_readbuf(
2178    
2179     /*
2180     * Do we need more readahead?
2181     + * Each loop tries to process 1 full dir blk; last may be partial.
2182     */
2183     blk_start_plug(&plug);
2184     for (mip->ra_index = mip->ra_offset = i = 0;
2185     @@ -415,7 +405,8 @@ xfs_dir2_leaf_readbuf(
2186     * Read-ahead a contiguous directory block.
2187     */
2188     if (i > mip->ra_current &&
2189     - map[mip->ra_index].br_blockcount >= geo->fsbcount) {
2190     + (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
2191     + geo->fsbcount) {
2192     xfs_dir3_data_readahead(dp,
2193     map[mip->ra_index].br_startoff + mip->ra_offset,
2194     XFS_FSB_TO_DADDR(dp->i_mount,
2195     @@ -436,14 +427,19 @@ xfs_dir2_leaf_readbuf(
2196     }
2197    
2198     /*
2199     - * Advance offset through the mapping table.
2200     + * Advance offset through the mapping table, processing a full
2201     + * dir block even if it is fragmented into several extents.
2202     + * But stop if we have consumed all valid mappings, even if
2203     + * it's not yet a full directory block.
2204     */
2205     - for (j = 0; j < geo->fsbcount; j += length ) {
2206     + for (j = 0;
2207     + j < geo->fsbcount && mip->ra_index < mip->map_valid;
2208     + j += length ) {
2209     /*
2210     * The rest of this extent but not more than a dir
2211     * block.
2212     */
2213     - length = min_t(int, geo->fsbcount,
2214     + length = min_t(int, geo->fsbcount - j,
2215     map[mip->ra_index].br_blockcount -
2216     mip->ra_offset);
2217     mip->ra_offset += length;
2218     diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
2219     index 1209ad29e902..a90ec3fad69f 100644
2220     --- a/fs/xfs/xfs_file.c
2221     +++ b/fs/xfs/xfs_file.c
2222     @@ -1130,13 +1130,13 @@ xfs_find_get_desired_pgoff(
2223    
2224     index = startoff >> PAGE_SHIFT;
2225     endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
2226     - end = endoff >> PAGE_SHIFT;
2227     + end = (endoff - 1) >> PAGE_SHIFT;
2228     do {
2229     int want;
2230     unsigned nr_pages;
2231     unsigned int i;
2232    
2233     - want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
2234     + want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
2235     nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
2236     want);
2237     /*
2238     @@ -1163,17 +1163,6 @@ xfs_find_get_desired_pgoff(
2239     break;
2240     }
2241    
2242     - /*
2243     - * At lease we found one page. If this is the first time we
2244     - * step into the loop, and if the first page index offset is
2245     - * greater than the given search offset, a hole was found.
2246     - */
2247     - if (type == HOLE_OFF && lastoff == startoff &&
2248     - lastoff < page_offset(pvec.pages[0])) {
2249     - found = true;
2250     - break;
2251     - }
2252     -
2253     for (i = 0; i < nr_pages; i++) {
2254     struct page *page = pvec.pages[i];
2255     loff_t b_offset;
2256     @@ -1185,18 +1174,18 @@ xfs_find_get_desired_pgoff(
2257     * file mapping. However, page->index will not change
2258     * because we have a reference on the page.
2259     *
2260     - * Searching done if the page index is out of range.
2261     - * If the current offset is not reaches the end of
2262     - * the specified search range, there should be a hole
2263     - * between them.
2264     + * If current page offset is beyond where we've ended,
2265     + * we've found a hole.
2266     */
2267     - if (page->index > end) {
2268     - if (type == HOLE_OFF && lastoff < endoff) {
2269     - *offset = lastoff;
2270     - found = true;
2271     - }
2272     + if (type == HOLE_OFF && lastoff < endoff &&
2273     + lastoff < page_offset(pvec.pages[i])) {
2274     + found = true;
2275     + *offset = lastoff;
2276     goto out;
2277     }
2278     + /* Searching done if the page index is out of range. */
2279     + if (page->index > end)
2280     + goto out;
2281    
2282     lock_page(page);
2283     /*
2284     diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
2285     index 3fb1f3fb8efe..74304b6ce84b 100644
2286     --- a/fs/xfs/xfs_icache.c
2287     +++ b/fs/xfs/xfs_icache.c
2288     @@ -264,6 +264,22 @@ xfs_inode_clear_reclaim_tag(
2289     xfs_perag_clear_reclaim_tag(pag);
2290     }
2291    
2292     +static void
2293     +xfs_inew_wait(
2294     + struct xfs_inode *ip)
2295     +{
2296     + wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
2297     + DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
2298     +
2299     + do {
2300     + prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
2301     + if (!xfs_iflags_test(ip, XFS_INEW))
2302     + break;
2303     + schedule();
2304     + } while (true);
2305     + finish_wait(wq, &wait.wait);
2306     +}
2307     +
2308     /*
2309     * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
2310     * part of the structure. This is made more complex by the fact we store
2311     @@ -368,14 +384,17 @@ xfs_iget_cache_hit(
2312    
2313     error = xfs_reinit_inode(mp, inode);
2314     if (error) {
2315     + bool wake;
2316     /*
2317     * Re-initializing the inode failed, and we are in deep
2318     * trouble. Try to re-add it to the reclaim list.
2319     */
2320     rcu_read_lock();
2321     spin_lock(&ip->i_flags_lock);
2322     -
2323     + wake = !!__xfs_iflags_test(ip, XFS_INEW);
2324     ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
2325     + if (wake)
2326     + wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
2327     ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
2328     trace_xfs_iget_reclaim_fail(ip);
2329     goto out_error;
2330     @@ -625,9 +644,11 @@ xfs_iget(
2331    
2332     STATIC int
2333     xfs_inode_ag_walk_grab(
2334     - struct xfs_inode *ip)
2335     + struct xfs_inode *ip,
2336     + int flags)
2337     {
2338     struct inode *inode = VFS_I(ip);
2339     + bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
2340    
2341     ASSERT(rcu_read_lock_held());
2342    
2343     @@ -645,7 +666,8 @@ xfs_inode_ag_walk_grab(
2344     goto out_unlock_noent;
2345    
2346     /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
2347     - if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
2348     + if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
2349     + __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
2350     goto out_unlock_noent;
2351     spin_unlock(&ip->i_flags_lock);
2352    
2353     @@ -673,7 +695,8 @@ xfs_inode_ag_walk(
2354     void *args),
2355     int flags,
2356     void *args,
2357     - int tag)
2358     + int tag,
2359     + int iter_flags)
2360     {
2361     uint32_t first_index;
2362     int last_error = 0;
2363     @@ -715,7 +738,7 @@ xfs_inode_ag_walk(
2364     for (i = 0; i < nr_found; i++) {
2365     struct xfs_inode *ip = batch[i];
2366    
2367     - if (done || xfs_inode_ag_walk_grab(ip))
2368     + if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
2369     batch[i] = NULL;
2370    
2371     /*
2372     @@ -743,6 +766,9 @@ xfs_inode_ag_walk(
2373     for (i = 0; i < nr_found; i++) {
2374     if (!batch[i])
2375     continue;
2376     + if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
2377     + xfs_iflags_test(batch[i], XFS_INEW))
2378     + xfs_inew_wait(batch[i]);
2379     error = execute(batch[i], flags, args);
2380     IRELE(batch[i]);
2381     if (error == -EAGAIN) {
2382     @@ -822,12 +848,13 @@ xfs_cowblocks_worker(
2383     }
2384    
2385     int
2386     -xfs_inode_ag_iterator(
2387     +xfs_inode_ag_iterator_flags(
2388     struct xfs_mount *mp,
2389     int (*execute)(struct xfs_inode *ip, int flags,
2390     void *args),
2391     int flags,
2392     - void *args)
2393     + void *args,
2394     + int iter_flags)
2395     {
2396     struct xfs_perag *pag;
2397     int error = 0;
2398     @@ -837,7 +864,8 @@ xfs_inode_ag_iterator(
2399     ag = 0;
2400     while ((pag = xfs_perag_get(mp, ag))) {
2401     ag = pag->pag_agno + 1;
2402     - error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
2403     + error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
2404     + iter_flags);
2405     xfs_perag_put(pag);
2406     if (error) {
2407     last_error = error;
2408     @@ -849,6 +877,17 @@ xfs_inode_ag_iterator(
2409     }
2410    
2411     int
2412     +xfs_inode_ag_iterator(
2413     + struct xfs_mount *mp,
2414     + int (*execute)(struct xfs_inode *ip, int flags,
2415     + void *args),
2416     + int flags,
2417     + void *args)
2418     +{
2419     + return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
2420     +}
2421     +
2422     +int
2423     xfs_inode_ag_iterator_tag(
2424     struct xfs_mount *mp,
2425     int (*execute)(struct xfs_inode *ip, int flags,
2426     @@ -865,7 +904,8 @@ xfs_inode_ag_iterator_tag(
2427     ag = 0;
2428     while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
2429     ag = pag->pag_agno + 1;
2430     - error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
2431     + error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
2432     + 0);
2433     xfs_perag_put(pag);
2434     if (error) {
2435     last_error = error;
2436     diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
2437     index 8a7c849b4dea..9183f77958ef 100644
2438     --- a/fs/xfs/xfs_icache.h
2439     +++ b/fs/xfs/xfs_icache.h
2440     @@ -48,6 +48,11 @@ struct xfs_eofblocks {
2441     #define XFS_IGET_UNTRUSTED 0x2
2442     #define XFS_IGET_DONTCACHE 0x4
2443    
2444     +/*
2445     + * flags for AG inode iterator
2446     + */
2447     +#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
2448     +
2449     int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
2450     uint flags, uint lock_flags, xfs_inode_t **ipp);
2451    
2452     @@ -79,6 +84,9 @@ void xfs_cowblocks_worker(struct work_struct *);
2453     int xfs_inode_ag_iterator(struct xfs_mount *mp,
2454     int (*execute)(struct xfs_inode *ip, int flags, void *args),
2455     int flags, void *args);
2456     +int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
2457     + int (*execute)(struct xfs_inode *ip, int flags, void *args),
2458     + int flags, void *args, int iter_flags);
2459     int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
2460     int (*execute)(struct xfs_inode *ip, int flags, void *args),
2461     int flags, void *args, int tag);
2462     diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
2463     index e50636c9a89c..7a0b4eeb99e4 100644
2464     --- a/fs/xfs/xfs_inode.c
2465     +++ b/fs/xfs/xfs_inode.c
2466     @@ -50,6 +50,7 @@
2467     #include "xfs_log.h"
2468     #include "xfs_bmap_btree.h"
2469     #include "xfs_reflink.h"
2470     +#include "xfs_dir2_priv.h"
2471    
2472     kmem_zone_t *xfs_inode_zone;
2473    
2474     @@ -1914,12 +1915,13 @@ xfs_inactive(
2475     * force is true because we are evicting an inode from the
2476     * cache. Post-eof blocks must be freed, lest we end up with
2477     * broken free space accounting.
2478     + *
2479     + * Note: don't bother with iolock here since lockdep complains
2480     + * about acquiring it in reclaim context. We have the only
2481     + * reference to the inode at this point anyways.
2482     */
2483     - if (xfs_can_free_eofblocks(ip, true)) {
2484     - xfs_ilock(ip, XFS_IOLOCK_EXCL);
2485     + if (xfs_can_free_eofblocks(ip, true))
2486     xfs_free_eofblocks(ip);
2487     - xfs_iunlock(ip, XFS_IOLOCK_EXCL);
2488     - }
2489    
2490     return;
2491     }
2492     @@ -3562,6 +3564,12 @@ xfs_iflush_int(
2493     if (ip->i_d.di_version < 3)
2494     ip->i_d.di_flushiter++;
2495    
2496     + /* Check the inline directory data. */
2497     + if (S_ISDIR(VFS_I(ip)->i_mode) &&
2498     + ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
2499     + xfs_dir2_sf_verify(ip))
2500     + goto corrupt_out;
2501     +
2502     /*
2503     * Copy the dirty parts of the inode into the on-disk inode. We always
2504     * copy out the core of the inode, because if the inode is dirty at all
2505     diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
2506     index 71e8a81c91a3..c038f6eecc28 100644
2507     --- a/fs/xfs/xfs_inode.h
2508     +++ b/fs/xfs/xfs_inode.h
2509     @@ -217,7 +217,8 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
2510     #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
2511     #define XFS_ISTALE (1 << 1) /* inode has been staled */
2512     #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
2513     -#define XFS_INEW (1 << 3) /* inode has just been allocated */
2514     +#define __XFS_INEW_BIT 3 /* inode has just been allocated */
2515     +#define XFS_INEW (1 << __XFS_INEW_BIT)
2516     #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
2517     #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
2518     #define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */
2519     @@ -467,6 +468,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
2520     xfs_iflags_clear(ip, XFS_INEW);
2521     barrier();
2522     unlock_new_inode(VFS_I(ip));
2523     + wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
2524     }
2525    
2526     static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
2527     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
2528     index a39197501a7c..73cfc7179124 100644
2529     --- a/fs/xfs/xfs_ioctl.c
2530     +++ b/fs/xfs/xfs_ioctl.c
2531     @@ -1542,10 +1542,11 @@ xfs_ioc_getbmap(
2532     unsigned int cmd,
2533     void __user *arg)
2534     {
2535     - struct getbmapx bmx;
2536     + struct getbmapx bmx = { 0 };
2537     int error;
2538    
2539     - if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
2540     + /* struct getbmap is a strict subset of struct getbmapx. */
2541     + if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
2542     return -EFAULT;
2543    
2544     if (bmx.bmv_count < 2)
2545     diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
2546     index 360562484e7b..65740d1cbd92 100644
2547     --- a/fs/xfs/xfs_iomap.c
2548     +++ b/fs/xfs/xfs_iomap.c
2549     @@ -1151,10 +1151,10 @@ xfs_xattr_iomap_begin(
2550     if (XFS_FORCED_SHUTDOWN(mp))
2551     return -EIO;
2552    
2553     - lockmode = xfs_ilock_data_map_shared(ip);
2554     + lockmode = xfs_ilock_attr_map_shared(ip);
2555    
2556     /* if there are no attribute fork or extents, return ENOENT */
2557     - if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
2558     + if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
2559     error = -ENOENT;
2560     goto out_unlock;
2561     }
2562     diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
2563     index 66e881790c17..d8a77dbf4e3a 100644
2564     --- a/fs/xfs/xfs_itable.c
2565     +++ b/fs/xfs/xfs_itable.c
2566     @@ -585,7 +585,7 @@ xfs_inumbers(
2567     return error;
2568    
2569     bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
2570     - buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
2571     + buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
2572     do {
2573     struct xfs_inobt_rec_incore r;
2574     int stat;
2575     diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
2576     index 4017aa967331..b57ab34fbf3c 100644
2577     --- a/fs/xfs/xfs_log.c
2578     +++ b/fs/xfs/xfs_log.c
2579     @@ -1293,7 +1293,7 @@ void
2580     xfs_log_work_queue(
2581     struct xfs_mount *mp)
2582     {
2583     - queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
2584     + queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
2585     msecs_to_jiffies(xfs_syncd_centisecs * 10));
2586     }
2587    
2588     diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
2589     index 1bf878b0492c..5415f9031ef8 100644
2590     --- a/fs/xfs/xfs_mount.h
2591     +++ b/fs/xfs/xfs_mount.h
2592     @@ -183,6 +183,7 @@ typedef struct xfs_mount {
2593     struct workqueue_struct *m_reclaim_workqueue;
2594     struct workqueue_struct *m_log_workqueue;
2595     struct workqueue_struct *m_eofblocks_workqueue;
2596     + struct workqueue_struct *m_sync_workqueue;
2597    
2598     /*
2599     * Generation of the filesysyem layout. This is incremented by each
2600     diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
2601     index b669b123287b..8b9a9f15f022 100644
2602     --- a/fs/xfs/xfs_qm.c
2603     +++ b/fs/xfs/xfs_qm.c
2604     @@ -1384,12 +1384,7 @@ xfs_qm_quotacheck(
2605     mp->m_qflags |= flags;
2606    
2607     error_return:
2608     - while (!list_empty(&buffer_list)) {
2609     - struct xfs_buf *bp =
2610     - list_first_entry(&buffer_list, struct xfs_buf, b_list);
2611     - list_del_init(&bp->b_list);
2612     - xfs_buf_relse(bp);
2613     - }
2614     + xfs_buf_delwri_cancel(&buffer_list);
2615    
2616     if (error) {
2617     xfs_warn(mp,
2618     diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
2619     index 475a3882a81f..9cb5c381b01c 100644
2620     --- a/fs/xfs/xfs_qm_syscalls.c
2621     +++ b/fs/xfs/xfs_qm_syscalls.c
2622     @@ -759,5 +759,6 @@ xfs_qm_dqrele_all_inodes(
2623     uint flags)
2624     {
2625     ASSERT(mp->m_quotainfo);
2626     - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
2627     + xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
2628     + XFS_AGITER_INEW_WAIT);
2629     }
2630     diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
2631     index 2252f163c38f..29a75ecb2425 100644
2632     --- a/fs/xfs/xfs_reflink.c
2633     +++ b/fs/xfs/xfs_reflink.c
2634     @@ -736,8 +736,22 @@ xfs_reflink_end_cow(
2635     offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
2636     end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
2637    
2638     - /* Start a rolling transaction to switch the mappings */
2639     - resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
2640     + /*
2641     + * Start a rolling transaction to switch the mappings. We're
2642     + * unlikely ever to have to remap 16T worth of single-block
2643     + * extents, so just cap the worst case extent count to 2^32-1.
2644     + * Stick a warning in just in case, and avoid 64-bit division.
2645     + */
2646     + BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
2647     + if (end_fsb - offset_fsb > UINT_MAX) {
2648     + error = -EFSCORRUPTED;
2649     + xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
2650     + ASSERT(0);
2651     + goto out;
2652     + }
2653     + resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
2654     + (unsigned int)(end_fsb - offset_fsb),
2655     + XFS_DATA_FORK);
2656     error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
2657     resblks, 0, 0, &tp);
2658     if (error)
2659     diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
2660     index dbbd3f1fd2b7..882fb8524fcb 100644
2661     --- a/fs/xfs/xfs_super.c
2662     +++ b/fs/xfs/xfs_super.c
2663     @@ -872,8 +872,15 @@ xfs_init_mount_workqueues(
2664     if (!mp->m_eofblocks_workqueue)
2665     goto out_destroy_log;
2666    
2667     + mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
2668     + mp->m_fsname);
2669     + if (!mp->m_sync_workqueue)
2670     + goto out_destroy_eofb;
2671     +
2672     return 0;
2673    
2674     +out_destroy_eofb:
2675     + destroy_workqueue(mp->m_eofblocks_workqueue);
2676     out_destroy_log:
2677     destroy_workqueue(mp->m_log_workqueue);
2678     out_destroy_reclaim:
2679     @@ -894,6 +901,7 @@ STATIC void
2680     xfs_destroy_mount_workqueues(
2681     struct xfs_mount *mp)
2682     {
2683     + destroy_workqueue(mp->m_sync_workqueue);
2684     destroy_workqueue(mp->m_eofblocks_workqueue);
2685     destroy_workqueue(mp->m_log_workqueue);
2686     destroy_workqueue(mp->m_reclaim_workqueue);
2687     diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
2688     index 70f42ea86dfb..a280e126491f 100644
2689     --- a/fs/xfs/xfs_trans.c
2690     +++ b/fs/xfs/xfs_trans.c
2691     @@ -263,6 +263,28 @@ xfs_trans_alloc(
2692     }
2693    
2694     /*
2695     + * Create an empty transaction with no reservation. This is a defensive
2696     + * mechanism for routines that query metadata without actually modifying
2697     + * them -- if the metadata being queried is somehow cross-linked (think a
2698     + * btree block pointer that points higher in the tree), we risk deadlock.
2699     + * However, blocks grabbed as part of a transaction can be re-grabbed.
2700     + * The verifiers will notice the corrupt block and the operation will fail
2701     + * back to userspace without deadlocking.
2702     + *
2703     + * Note the zero-length reservation; this transaction MUST be cancelled
2704     + * without any dirty data.
2705     + */
2706     +int
2707     +xfs_trans_alloc_empty(
2708     + struct xfs_mount *mp,
2709     + struct xfs_trans **tpp)
2710     +{
2711     + struct xfs_trans_res resv = {0};
2712     +
2713     + return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
2714     +}
2715     +
2716     +/*
2717     * Record the indicated change to the given field for application
2718     * to the file system's superblock when the transaction commits.
2719     * For now, just store the change in the transaction structure.
2720     diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
2721     index 61b7fbdd3ebd..98024cb933ef 100644
2722     --- a/fs/xfs/xfs_trans.h
2723     +++ b/fs/xfs/xfs_trans.h
2724     @@ -159,6 +159,8 @@ typedef struct xfs_trans {
2725     int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
2726     uint blocks, uint rtextents, uint flags,
2727     struct xfs_trans **tpp);
2728     +int xfs_trans_alloc_empty(struct xfs_mount *mp,
2729     + struct xfs_trans **tpp);
2730     void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
2731    
2732     struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
2733     diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2734     index 3319d97d789d..8feecd5345e7 100644
2735     --- a/include/linux/if_vlan.h
2736     +++ b/include/linux/if_vlan.h
2737     @@ -630,14 +630,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
2738     static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
2739     netdev_features_t features)
2740     {
2741     - if (skb_vlan_tagged_multi(skb))
2742     - features = netdev_intersect_features(features,
2743     - NETIF_F_SG |
2744     - NETIF_F_HIGHDMA |
2745     - NETIF_F_FRAGLIST |
2746     - NETIF_F_HW_CSUM |
2747     - NETIF_F_HW_VLAN_CTAG_TX |
2748     - NETIF_F_HW_VLAN_STAG_TX);
2749     + if (skb_vlan_tagged_multi(skb)) {
2750     + /* In the case of multi-tagged packets, use a direct mask
2751     + * instead of using netdev_interesect_features(), to make
2752     + * sure that only devices supporting NETIF_F_HW_CSUM will
2753     + * have checksum offloading support.
2754     + */
2755     + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
2756     + NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
2757     + NETIF_F_HW_VLAN_STAG_TX;
2758     + }
2759    
2760     return features;
2761     }
2762     diff --git a/include/linux/memblock.h b/include/linux/memblock.h
2763     index 5b759c9acf97..e8fba68e5d03 100644
2764     --- a/include/linux/memblock.h
2765     +++ b/include/linux/memblock.h
2766     @@ -421,12 +421,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
2767     }
2768     #endif
2769    
2770     +extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
2771     + phys_addr_t end_addr);
2772     #else
2773     static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
2774     {
2775     return 0;
2776     }
2777    
2778     +static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
2779     + phys_addr_t end_addr)
2780     +{
2781     + return 0;
2782     +}
2783     +
2784     #endif /* CONFIG_HAVE_MEMBLOCK */
2785    
2786     #endif /* __KERNEL__ */
2787     diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
2788     index ecc451d89ccd..e1a903a5bb3e 100644
2789     --- a/include/linux/mlx5/driver.h
2790     +++ b/include/linux/mlx5/driver.h
2791     @@ -640,7 +640,12 @@ enum {
2792    
2793     typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
2794    
2795     +enum {
2796     + MLX5_CMD_ENT_STATE_PENDING_COMP,
2797     +};
2798     +
2799     struct mlx5_cmd_work_ent {
2800     + unsigned long state;
2801     struct mlx5_cmd_msg *in;
2802     struct mlx5_cmd_msg *out;
2803     void *uout;
2804     @@ -838,7 +843,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
2805     #endif
2806     void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
2807     struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
2808     -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
2809     +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
2810     void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
2811     int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
2812     int nent, u64 mask, const char *name, struct mlx5_uar *uar);
2813     diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
2814     index 7e273e243a13..6744eb40c4ea 100644
2815     --- a/include/linux/mmzone.h
2816     +++ b/include/linux/mmzone.h
2817     @@ -672,6 +672,7 @@ typedef struct pglist_data {
2818     * is the first PFN that needs to be initialised.
2819     */
2820     unsigned long first_deferred_pfn;
2821     + unsigned long static_init_size;
2822     #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2823    
2824     #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2825     diff --git a/include/net/dst.h b/include/net/dst.h
2826     index 6835d224d47b..ddcff17615da 100644
2827     --- a/include/net/dst.h
2828     +++ b/include/net/dst.h
2829     @@ -107,10 +107,16 @@ struct dst_entry {
2830     };
2831     };
2832    
2833     +struct dst_metrics {
2834     + u32 metrics[RTAX_MAX];
2835     + atomic_t refcnt;
2836     +};
2837     +extern const struct dst_metrics dst_default_metrics;
2838     +
2839     u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
2840     -extern const u32 dst_default_metrics[];
2841    
2842     #define DST_METRICS_READ_ONLY 0x1UL
2843     +#define DST_METRICS_REFCOUNTED 0x2UL
2844     #define DST_METRICS_FLAGS 0x3UL
2845     #define __DST_METRICS_PTR(Y) \
2846     ((u32 *)((Y) & ~DST_METRICS_FLAGS))
2847     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
2848     index f390c3bb05c5..aa758280d8a8 100644
2849     --- a/include/net/ip_fib.h
2850     +++ b/include/net/ip_fib.h
2851     @@ -114,11 +114,11 @@ struct fib_info {
2852     __be32 fib_prefsrc;
2853     u32 fib_tb_id;
2854     u32 fib_priority;
2855     - u32 *fib_metrics;
2856     -#define fib_mtu fib_metrics[RTAX_MTU-1]
2857     -#define fib_window fib_metrics[RTAX_WINDOW-1]
2858     -#define fib_rtt fib_metrics[RTAX_RTT-1]
2859     -#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
2860     + struct dst_metrics *fib_metrics;
2861     +#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
2862     +#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
2863     +#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
2864     +#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
2865     int fib_nhs;
2866     #ifdef CONFIG_IP_ROUTE_MULTIPATH
2867     int fib_weight;
2868     diff --git a/mm/ksm.c b/mm/ksm.c
2869     index 9ae6011a41f8..caa54a55a357 100644
2870     --- a/mm/ksm.c
2871     +++ b/mm/ksm.c
2872     @@ -1002,8 +1002,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
2873     goto out;
2874    
2875     if (PageTransCompound(page)) {
2876     - err = split_huge_page(page);
2877     - if (err)
2878     + if (split_huge_page(page))
2879     goto out_unlock;
2880     }
2881    
2882     diff --git a/mm/memblock.c b/mm/memblock.c
2883     index 7608bc305936..68849d0ead09 100644
2884     --- a/mm/memblock.c
2885     +++ b/mm/memblock.c
2886     @@ -1696,6 +1696,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type, char *name
2887     }
2888     }
2889    
2890     +extern unsigned long __init_memblock
2891     +memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
2892     +{
2893     + struct memblock_region *rgn;
2894     + unsigned long size = 0;
2895     + int idx;
2896     +
2897     + for_each_memblock_type((&memblock.reserved), rgn) {
2898     + phys_addr_t start, end;
2899     +
2900     + if (rgn->base + rgn->size < start_addr)
2901     + continue;
2902     + if (rgn->base > end_addr)
2903     + continue;
2904     +
2905     + start = rgn->base;
2906     + end = start + rgn->size;
2907     + size += end - start;
2908     + }
2909     +
2910     + return size;
2911     +}
2912     +
2913     void __init_memblock __memblock_dump_all(void)
2914     {
2915     pr_info("MEMBLOCK configuration:\n");
2916     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2917     index 19e796d36a62..4bd44803e366 100644
2918     --- a/mm/memory-failure.c
2919     +++ b/mm/memory-failure.c
2920     @@ -1587,12 +1587,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
2921     if (ret) {
2922     pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
2923     pfn, ret, page->flags);
2924     - /*
2925     - * We know that soft_offline_huge_page() tries to migrate
2926     - * only one hugepage pointed to by hpage, so we need not
2927     - * run through the pagelist here.
2928     - */
2929     - putback_active_hugepage(hpage);
2930     + if (!list_empty(&pagelist))
2931     + putback_movable_pages(&pagelist);
2932     if (ret > 0)
2933     ret = -EIO;
2934     } else {
2935     diff --git a/mm/mlock.c b/mm/mlock.c
2936     index 665ab75b5533..f0505692a5f4 100644
2937     --- a/mm/mlock.c
2938     +++ b/mm/mlock.c
2939     @@ -285,7 +285,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
2940     {
2941     int i;
2942     int nr = pagevec_count(pvec);
2943     - int delta_munlocked;
2944     + int delta_munlocked = -nr;
2945     struct pagevec pvec_putback;
2946     int pgrescued = 0;
2947    
2948     @@ -305,6 +305,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
2949     continue;
2950     else
2951     __munlock_isolation_failed(page);
2952     + } else {
2953     + delta_munlocked++;
2954     }
2955    
2956     /*
2957     @@ -316,7 +318,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
2958     pagevec_add(&pvec_putback, pvec->pages[i]);
2959     pvec->pages[i] = NULL;
2960     }
2961     - delta_munlocked = -nr + pagevec_count(&pvec_putback);
2962     __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
2963     spin_unlock_irq(zone_lru_lock(zone));
2964    
2965     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2966     index 5b06fb385dd7..56df8c24689d 100644
2967     --- a/mm/page_alloc.c
2968     +++ b/mm/page_alloc.c
2969     @@ -286,6 +286,26 @@ int page_group_by_mobility_disabled __read_mostly;
2970     #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2971     static inline void reset_deferred_meminit(pg_data_t *pgdat)
2972     {
2973     + unsigned long max_initialise;
2974     + unsigned long reserved_lowmem;
2975     +
2976     + /*
2977     + * Initialise at least 2G of a node but also take into account that
2978     + * two large system hashes that can take up 1GB for 0.25TB/node.
2979     + */
2980     + max_initialise = max(2UL << (30 - PAGE_SHIFT),
2981     + (pgdat->node_spanned_pages >> 8));
2982     +
2983     + /*
2984     + * Compensate the all the memblock reservations (e.g. crash kernel)
2985     + * from the initial estimation to make sure we will initialize enough
2986     + * memory to boot.
2987     + */
2988     + reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
2989     + pgdat->node_start_pfn + max_initialise);
2990     + max_initialise += reserved_lowmem;
2991     +
2992     + pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
2993     pgdat->first_deferred_pfn = ULONG_MAX;
2994     }
2995    
2996     @@ -308,20 +328,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
2997     unsigned long pfn, unsigned long zone_end,
2998     unsigned long *nr_initialised)
2999     {
3000     - unsigned long max_initialise;
3001     -
3002     /* Always populate low zones for address-contrained allocations */
3003     if (zone_end < pgdat_end_pfn(pgdat))
3004     return true;
3005     - /*
3006     - * Initialise at least 2G of a node but also take into account that
3007     - * two large system hashes that can take up 1GB for 0.25TB/node.
3008     - */
3009     - max_initialise = max(2UL << (30 - PAGE_SHIFT),
3010     - (pgdat->node_spanned_pages >> 8));
3011     -
3012     (*nr_initialised)++;
3013     - if ((*nr_initialised > max_initialise) &&
3014     + if ((*nr_initialised > pgdat->static_init_size) &&
3015     (pfn & (PAGES_PER_SECTION - 1)) == 0) {
3016     pgdat->first_deferred_pfn = pfn;
3017     return false;
3018     @@ -5911,7 +5922,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3019     /* pg_data_t should be reset to zero when it's allocated */
3020     WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
3021    
3022     - reset_deferred_meminit(pgdat);
3023     pgdat->node_id = nid;
3024     pgdat->node_start_pfn = node_start_pfn;
3025     pgdat->per_cpu_nodestats = NULL;
3026     @@ -5933,6 +5943,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3027     (unsigned long)pgdat->node_mem_map);
3028     #endif
3029    
3030     + reset_deferred_meminit(pgdat);
3031     free_area_init_core(pgdat);
3032     }
3033    
3034     diff --git a/mm/slub.c b/mm/slub.c
3035     index 58c7526f8de2..edc79ca3c6d5 100644
3036     --- a/mm/slub.c
3037     +++ b/mm/slub.c
3038     @@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
3039     return 1;
3040     }
3041    
3042     -static void print_section(char *text, u8 *addr, unsigned int length)
3043     +static void print_section(char *level, char *text, u8 *addr,
3044     + unsigned int length)
3045     {
3046     metadata_access_enable();
3047     - print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
3048     + print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
3049     length, 1);
3050     metadata_access_disable();
3051     }
3052     @@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
3053     p, p - addr, get_freepointer(s, p));
3054    
3055     if (s->flags & SLAB_RED_ZONE)
3056     - print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
3057     + print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
3058     + s->red_left_pad);
3059     else if (p > addr + 16)
3060     - print_section("Bytes b4 ", p - 16, 16);
3061     + print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
3062    
3063     - print_section("Object ", p, min_t(unsigned long, s->object_size,
3064     - PAGE_SIZE));
3065     + print_section(KERN_ERR, "Object ", p,
3066     + min_t(unsigned long, s->object_size, PAGE_SIZE));
3067     if (s->flags & SLAB_RED_ZONE)
3068     - print_section("Redzone ", p + s->object_size,
3069     + print_section(KERN_ERR, "Redzone ", p + s->object_size,
3070     s->inuse - s->object_size);
3071    
3072     if (s->offset)
3073     @@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
3074    
3075     if (off != size_from_object(s))
3076     /* Beginning of the filler is the free pointer */
3077     - print_section("Padding ", p + off, size_from_object(s) - off);
3078     + print_section(KERN_ERR, "Padding ", p + off,
3079     + size_from_object(s) - off);
3080    
3081     dump_stack();
3082     }
3083     @@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
3084     end--;
3085    
3086     slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
3087     - print_section("Padding ", end - remainder, remainder);
3088     + print_section(KERN_ERR, "Padding ", end - remainder, remainder);
3089    
3090     restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
3091     return 0;
3092     @@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
3093     page->freelist);
3094    
3095     if (!alloc)
3096     - print_section("Object ", (void *)object,
3097     + print_section(KERN_INFO, "Object ", (void *)object,
3098     s->object_size);
3099    
3100     dump_stack();
3101     @@ -5452,6 +5455,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
3102     char mbuf[64];
3103     char *buf;
3104     struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
3105     + ssize_t len;
3106    
3107     if (!attr || !attr->store || !attr->show)
3108     continue;
3109     @@ -5476,8 +5480,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
3110     buf = buffer;
3111     }
3112    
3113     - attr->show(root_cache, buf);
3114     - attr->store(s, buf, strlen(buf));
3115     + len = attr->show(root_cache, buf);
3116     + if (len > 0)
3117     + attr->store(s, buf, len);
3118     }
3119    
3120     if (buffer)
3121     diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
3122     index 04741064a173..7625ec8458de 100644
3123     --- a/net/bridge/br_netlink.c
3124     +++ b/net/bridge/br_netlink.c
3125     @@ -776,6 +776,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
3126     return -EPROTONOSUPPORT;
3127     }
3128     }
3129     +
3130     + if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
3131     + __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
3132     +
3133     + if (defpvid >= VLAN_VID_MASK)
3134     + return -EINVAL;
3135     + }
3136     #endif
3137    
3138     return 0;
3139     diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
3140     index d8ad73b38de2..5a782f543aff 100644
3141     --- a/net/bridge/br_stp_if.c
3142     +++ b/net/bridge/br_stp_if.c
3143     @@ -185,6 +185,7 @@ static void br_stp_start(struct net_bridge *br)
3144     br_debug(br, "using kernel STP\n");
3145    
3146     /* To start timers on any ports left in blocking */
3147     + mod_timer(&br->hello_timer, jiffies + br->hello_time);
3148     br_port_state_selection(br);
3149     }
3150    
3151     diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
3152     index da058b85aa22..15826fd52af5 100644
3153     --- a/net/bridge/br_stp_timer.c
3154     +++ b/net/bridge/br_stp_timer.c
3155     @@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
3156     if (br->dev->flags & IFF_UP) {
3157     br_config_bpdu_generation(br);
3158    
3159     - if (br->stp_enabled != BR_USER_STP)
3160     + if (br->stp_enabled == BR_KERNEL_STP)
3161     mod_timer(&br->hello_timer,
3162     round_jiffies(jiffies + br->hello_time));
3163     }
3164     diff --git a/net/core/dst.c b/net/core/dst.c
3165     index b5cbbe07f786..656b70d39690 100644
3166     --- a/net/core/dst.c
3167     +++ b/net/core/dst.c
3168     @@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3169     }
3170     EXPORT_SYMBOL(dst_discard_out);
3171    
3172     -const u32 dst_default_metrics[RTAX_MAX + 1] = {
3173     +const struct dst_metrics dst_default_metrics = {
3174     /* This initializer is needed to force linker to place this variable
3175     * into const section. Otherwise it might end into bss section.
3176     * We really want to avoid false sharing on this variable, and catch
3177     * any writes on it.
3178     */
3179     - [RTAX_MAX] = 0xdeadbeef,
3180     + .refcnt = ATOMIC_INIT(1),
3181     };
3182    
3183     void dst_init(struct dst_entry *dst, struct dst_ops *ops,
3184     @@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
3185     if (dev)
3186     dev_hold(dev);
3187     dst->ops = ops;
3188     - dst_init_metrics(dst, dst_default_metrics, true);
3189     + dst_init_metrics(dst, dst_default_metrics.metrics, true);
3190     dst->expires = 0UL;
3191     dst->path = dst;
3192     dst->from = NULL;
3193     @@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release);
3194    
3195     u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
3196     {
3197     - u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
3198     + struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
3199    
3200     if (p) {
3201     - u32 *old_p = __DST_METRICS_PTR(old);
3202     + struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
3203     unsigned long prev, new;
3204    
3205     - memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
3206     + atomic_set(&p->refcnt, 1);
3207     + memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
3208    
3209     new = (unsigned long) p;
3210     prev = cmpxchg(&dst->_metrics, old, new);
3211    
3212     if (prev != old) {
3213     kfree(p);
3214     - p = __DST_METRICS_PTR(prev);
3215     + p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
3216     if (prev & DST_METRICS_READ_ONLY)
3217     p = NULL;
3218     + } else if (prev & DST_METRICS_REFCOUNTED) {
3219     + if (atomic_dec_and_test(&old_p->refcnt))
3220     + kfree(old_p);
3221     }
3222     }
3223     - return p;
3224     + BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
3225     + return (u32 *)p;
3226     }
3227     EXPORT_SYMBOL(dst_cow_metrics_generic);
3228    
3229     @@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
3230     {
3231     unsigned long prev, new;
3232    
3233     - new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
3234     + new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
3235     prev = cmpxchg(&dst->_metrics, old, new);
3236     if (prev == old)
3237     kfree(__DST_METRICS_PTR(old));
3238     diff --git a/net/core/filter.c b/net/core/filter.c
3239     index b391209838ef..4eb4ce0aeef4 100644
3240     --- a/net/core/filter.c
3241     +++ b/net/core/filter.c
3242     @@ -2198,6 +2198,7 @@ bool bpf_helper_changes_skb_data(void *func)
3243     func == bpf_skb_change_proto ||
3244     func == bpf_skb_change_tail ||
3245     func == bpf_skb_pull_data ||
3246     + func == bpf_clone_redirect ||
3247     func == bpf_l3_csum_replace ||
3248     func == bpf_l4_csum_replace)
3249     return true;
3250     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3251     index b490af67c6fa..1d9160794e55 100644
3252     --- a/net/core/rtnetlink.c
3253     +++ b/net/core/rtnetlink.c
3254     @@ -1617,13 +1617,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3255     cb->nlh->nlmsg_seq, 0,
3256     flags,
3257     ext_filter_mask);
3258     - /* If we ran out of room on the first message,
3259     - * we're in trouble
3260     - */
3261     - WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
3262    
3263     - if (err < 0)
3264     - goto out;
3265     + if (err < 0) {
3266     + if (likely(skb->len))
3267     + goto out;
3268     +
3269     + goto out_err;
3270     + }
3271    
3272     nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3273     cont:
3274     @@ -1631,10 +1631,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3275     }
3276     }
3277     out:
3278     + err = skb->len;
3279     +out_err:
3280     cb->args[1] = idx;
3281     cb->args[0] = h;
3282    
3283     - return skb->len;
3284     + return err;
3285     }
3286    
3287     int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
3288     @@ -3413,8 +3415,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3289     err = br_dev->netdev_ops->ndo_bridge_getlink(
3290     skb, portid, seq, dev,
3291     filter_mask, NLM_F_MULTI);
3292     - if (err < 0 && err != -EOPNOTSUPP)
3293     - break;
3294     + if (err < 0 && err != -EOPNOTSUPP) {
3295     + if (likely(skb->len))
3296     + break;
3297     +
3298     + goto out_err;
3299     + }
3300     }
3301     idx++;
3302     }
3303     @@ -3425,16 +3431,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3304     seq, dev,
3305     filter_mask,
3306     NLM_F_MULTI);
3307     - if (err < 0 && err != -EOPNOTSUPP)
3308     - break;
3309     + if (err < 0 && err != -EOPNOTSUPP) {
3310     + if (likely(skb->len))
3311     + break;
3312     +
3313     + goto out_err;
3314     + }
3315     }
3316     idx++;
3317     }
3318     }
3319     + err = skb->len;
3320     +out_err:
3321     rcu_read_unlock();
3322     cb->args[0] = idx;
3323    
3324     - return skb->len;
3325     + return err;
3326     }
3327    
3328     static inline size_t bridge_nlmsg_size(void)
3329     diff --git a/net/core/sock.c b/net/core/sock.c
3330     index 470a2043b846..1989b3dd6d17 100644
3331     --- a/net/core/sock.c
3332     +++ b/net/core/sock.c
3333     @@ -138,10 +138,7 @@
3334    
3335     #include <trace/events/sock.h>
3336    
3337     -#ifdef CONFIG_INET
3338     #include <net/tcp.h>
3339     -#endif
3340     -
3341     #include <net/busy_poll.h>
3342    
3343     static DEFINE_MUTEX(proto_list_mutex);
3344     @@ -1687,28 +1684,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
3345     * delay queue. We want to allow the owner socket to send more
3346     * packets, as if they were already TX completed by a typical driver.
3347     * But we also want to keep skb->sk set because some packet schedulers
3348     - * rely on it (sch_fq for example). So we set skb->truesize to a small
3349     - * amount (1) and decrease sk_wmem_alloc accordingly.
3350     + * rely on it (sch_fq for example).
3351     */
3352     void skb_orphan_partial(struct sk_buff *skb)
3353     {
3354     - /* If this skb is a TCP pure ACK or already went here,
3355     - * we have nothing to do. 2 is already a very small truesize.
3356     - */
3357     - if (skb->truesize <= 2)
3358     + if (skb_is_tcp_pure_ack(skb))
3359     return;
3360    
3361     - /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
3362     - * so we do not completely orphan skb, but transfert all
3363     - * accounted bytes but one, to avoid unexpected reorders.
3364     - */
3365     if (skb->destructor == sock_wfree
3366     #ifdef CONFIG_INET
3367     || skb->destructor == tcp_wfree
3368     #endif
3369     ) {
3370     - atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
3371     - skb->truesize = 1;
3372     + struct sock *sk = skb->sk;
3373     +
3374     + if (atomic_inc_not_zero(&sk->sk_refcnt)) {
3375     + atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
3376     + skb->destructor = sock_efree;
3377     + }
3378     } else {
3379     skb_orphan(skb);
3380     }
3381     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3382     index 237d62c493e3..2ac9d2a1aaab 100644
3383     --- a/net/dccp/ipv6.c
3384     +++ b/net/dccp/ipv6.c
3385     @@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3386     newsk->sk_backlog_rcv = dccp_v4_do_rcv;
3387     newnp->pktoptions = NULL;
3388     newnp->opt = NULL;
3389     + newnp->ipv6_mc_list = NULL;
3390     + newnp->ipv6_ac_list = NULL;
3391     + newnp->ipv6_fl_list = NULL;
3392     newnp->mcast_oif = inet6_iif(skb);
3393     newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3394    
3395     @@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3396     /* Clone RX bits */
3397     newnp->rxopt.all = np->rxopt.all;
3398    
3399     + newnp->ipv6_mc_list = NULL;
3400     + newnp->ipv6_ac_list = NULL;
3401     + newnp->ipv6_fl_list = NULL;
3402     newnp->pktoptions = NULL;
3403     newnp->opt = NULL;
3404     newnp->mcast_oif = inet6_iif(skb);
3405     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
3406     index 6789e48b7085..3d92534c4450 100644
3407     --- a/net/ipv4/fib_frontend.c
3408     +++ b/net/ipv4/fib_frontend.c
3409     @@ -758,7 +758,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
3410     unsigned int e = 0, s_e;
3411     struct fib_table *tb;
3412     struct hlist_head *head;
3413     - int dumped = 0;
3414     + int dumped = 0, err;
3415    
3416     if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
3417     ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
3418     @@ -778,20 +778,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
3419     if (dumped)
3420     memset(&cb->args[2], 0, sizeof(cb->args) -
3421     2 * sizeof(cb->args[0]));
3422     - if (fib_table_dump(tb, skb, cb) < 0)
3423     - goto out;
3424     + err = fib_table_dump(tb, skb, cb);
3425     + if (err < 0) {
3426     + if (likely(skb->len))
3427     + goto out;
3428     +
3429     + goto out_err;
3430     + }
3431     dumped = 1;
3432     next:
3433     e++;
3434     }
3435     }
3436     out:
3437     + err = skb->len;
3438     +out_err:
3439     rcu_read_unlock();
3440    
3441     cb->args[1] = e;
3442     cb->args[0] = h;
3443    
3444     - return skb->len;
3445     + return err;
3446     }
3447    
3448     /* Prepare and feed intra-kernel routing request.
3449     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3450     index 6a4068031aaa..7563831fa432 100644
3451     --- a/net/ipv4/fib_semantics.c
3452     +++ b/net/ipv4/fib_semantics.c
3453     @@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
3454     static void free_fib_info_rcu(struct rcu_head *head)
3455     {
3456     struct fib_info *fi = container_of(head, struct fib_info, rcu);
3457     + struct dst_metrics *m;
3458    
3459     change_nexthops(fi) {
3460     if (nexthop_nh->nh_dev)
3461     @@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
3462     rt_fibinfo_free(&nexthop_nh->nh_rth_input);
3463     } endfor_nexthops(fi);
3464    
3465     - if (fi->fib_metrics != (u32 *) dst_default_metrics)
3466     - kfree(fi->fib_metrics);
3467     + m = fi->fib_metrics;
3468     + if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
3469     + kfree(m);
3470     kfree(fi);
3471     }
3472    
3473     @@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
3474     val = 255;
3475     if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
3476     return -EINVAL;
3477     - fi->fib_metrics[type - 1] = val;
3478     + fi->fib_metrics->metrics[type - 1] = val;
3479     }
3480    
3481     if (ecn_ca)
3482     - fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
3483     + fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
3484    
3485     return 0;
3486     }
3487     @@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
3488     goto failure;
3489     fib_info_cnt++;
3490     if (cfg->fc_mx) {
3491     - fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
3492     + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
3493     if (!fi->fib_metrics)
3494     goto failure;
3495     + atomic_set(&fi->fib_metrics->refcnt, 1);
3496     } else
3497     - fi->fib_metrics = (u32 *) dst_default_metrics;
3498     + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
3499    
3500     fi->fib_net = net;
3501     fi->fib_protocol = cfg->fc_protocol;
3502     @@ -1252,7 +1255,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
3503     if (fi->fib_priority &&
3504     nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
3505     goto nla_put_failure;
3506     - if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
3507     + if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
3508     goto nla_put_failure;
3509    
3510     if (fi->fib_prefsrc &&
3511     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
3512     index e3665bf7a7f3..ef40bb659a7a 100644
3513     --- a/net/ipv4/fib_trie.c
3514     +++ b/net/ipv4/fib_trie.c
3515     @@ -1932,6 +1932,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
3516    
3517     /* rcu_read_lock is hold by caller */
3518     hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
3519     + int err;
3520     +
3521     if (i < s_i) {
3522     i++;
3523     continue;
3524     @@ -1942,17 +1944,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
3525     continue;
3526     }
3527    
3528     - if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
3529     - cb->nlh->nlmsg_seq,
3530     - RTM_NEWROUTE,
3531     - tb->tb_id,
3532     - fa->fa_type,
3533     - xkey,
3534     - KEYLENGTH - fa->fa_slen,
3535     - fa->fa_tos,
3536     - fa->fa_info, NLM_F_MULTI) < 0) {
3537     + err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
3538     + cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3539     + tb->tb_id, fa->fa_type,
3540     + xkey, KEYLENGTH - fa->fa_slen,
3541     + fa->fa_tos, fa->fa_info, NLM_F_MULTI);
3542     + if (err < 0) {
3543     cb->args[4] = i;
3544     - return -1;
3545     + return err;
3546     }
3547     i++;
3548     }
3549     @@ -1974,10 +1973,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
3550     t_key key = cb->args[3];
3551    
3552     while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
3553     - if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
3554     + int err;
3555     +
3556     + err = fn_trie_dump_leaf(l, tb, skb, cb);
3557     + if (err < 0) {
3558     cb->args[3] = key;
3559     cb->args[2] = count;
3560     - return -1;
3561     + return err;
3562     }
3563    
3564     ++count;
3565     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
3566     index 61a9deec2993..cf3d5674846a 100644
3567     --- a/net/ipv4/inet_connection_sock.c
3568     +++ b/net/ipv4/inet_connection_sock.c
3569     @@ -665,6 +665,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
3570     /* listeners have SOCK_RCU_FREE, not the children */
3571     sock_reset_flag(newsk, SOCK_RCU_FREE);
3572    
3573     + inet_sk(newsk)->mc_list = NULL;
3574     +
3575     newsk->sk_mark = inet_rsk(req)->ir_mark;
3576     atomic64_set(&newsk->sk_cookie,
3577     atomic64_read(&inet_rsk(req)->ir_cookie));
3578     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3579     index 6dbcb37753d7..6cd49fd17ac0 100644
3580     --- a/net/ipv4/route.c
3581     +++ b/net/ipv4/route.c
3582     @@ -1364,8 +1364,12 @@ static void rt_add_uncached_list(struct rtable *rt)
3583    
3584     static void ipv4_dst_destroy(struct dst_entry *dst)
3585     {
3586     + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
3587     struct rtable *rt = (struct rtable *) dst;
3588    
3589     + if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
3590     + kfree(p);
3591     +
3592     if (!list_empty(&rt->rt_uncached)) {
3593     struct uncached_list *ul = rt->rt_uncached_list;
3594    
3595     @@ -1417,7 +1421,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
3596     rt->rt_gateway = nh->nh_gw;
3597     rt->rt_uses_gateway = 1;
3598     }
3599     - dst_init_metrics(&rt->dst, fi->fib_metrics, true);
3600     + dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
3601     + if (fi->fib_metrics != &dst_default_metrics) {
3602     + rt->dst._metrics |= DST_METRICS_REFCOUNTED;
3603     + atomic_inc(&fi->fib_metrics->refcnt);
3604     + }
3605     #ifdef CONFIG_IP_ROUTE_CLASSID
3606     rt->dst.tclassid = nh->nh_tclassid;
3607     #endif
3608     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3609     index eb142ca71fc5..86fbf0f3235e 100644
3610     --- a/net/ipv4/tcp.c
3611     +++ b/net/ipv4/tcp.c
3612     @@ -1078,9 +1078,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
3613     int *copied, size_t size)
3614     {
3615     struct tcp_sock *tp = tcp_sk(sk);
3616     + struct sockaddr *uaddr = msg->msg_name;
3617     int err, flags;
3618    
3619     - if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
3620     + if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
3621     + (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
3622     + uaddr->sa_family == AF_UNSPEC))
3623     return -EOPNOTSUPP;
3624     if (tp->fastopen_req)
3625     return -EALREADY; /* Another Fast Open is in progress */
3626     @@ -1093,7 +1096,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
3627     tp->fastopen_req->size = size;
3628    
3629     flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
3630     - err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
3631     + err = __inet_stream_connect(sk->sk_socket, uaddr,
3632     msg->msg_namelen, flags);
3633     *copied = tp->fastopen_req->copied;
3634     tcp_free_fastopen_req(tp);
3635     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3636     index 7727ffeaaf9d..01336aa5f973 100644
3637     --- a/net/ipv4/tcp_input.c
3638     +++ b/net/ipv4/tcp_input.c
3639     @@ -1177,13 +1177,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
3640     */
3641     if (pkt_len > mss) {
3642     unsigned int new_len = (pkt_len / mss) * mss;
3643     - if (!in_sack && new_len < pkt_len) {
3644     + if (!in_sack && new_len < pkt_len)
3645     new_len += mss;
3646     - if (new_len >= skb->len)
3647     - return 0;
3648     - }
3649     pkt_len = new_len;
3650     }
3651     +
3652     + if (pkt_len >= skb->len && !in_sack)
3653     + return 0;
3654     +
3655     err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
3656     if (err < 0)
3657     return err;
3658     @@ -3232,7 +3233,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3659     int delta;
3660    
3661     /* Non-retransmitted hole got filled? That's reordering */
3662     - if (reord < prior_fackets)
3663     + if (reord < prior_fackets && reord <= tp->fackets_out)
3664     tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3665    
3666     delta = tcp_is_fack(tp) ? pkts_acked :
3667     diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
3668     index 33b04ec2744a..013086b248e2 100644
3669     --- a/net/ipv6/ip6_offload.c
3670     +++ b/net/ipv6/ip6_offload.c
3671     @@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
3672     const struct net_offload *ops;
3673     int proto;
3674     struct frag_hdr *fptr;
3675     - unsigned int unfrag_ip6hlen;
3676     unsigned int payload_len;
3677     u8 *prevhdr;
3678     int offset = 0;
3679     @@ -116,8 +115,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
3680     skb->network_header = (u8 *)ipv6h - skb->head;
3681    
3682     if (udpfrag) {
3683     - unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
3684     - fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
3685     + int err = ip6_find_1stfragopt(skb, &prevhdr);
3686     + if (err < 0)
3687     + return ERR_PTR(err);
3688     + fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
3689     fptr->frag_off = htons(offset);
3690     if (skb->next)
3691     fptr->frag_off |= htons(IP6_MF);
3692     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3693     index e27b8fdba5d2..1ac3cea49171 100644
3694     --- a/net/ipv6/ip6_output.c
3695     +++ b/net/ipv6/ip6_output.c
3696     @@ -586,7 +586,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
3697     int ptr, offset = 0, err = 0;
3698     u8 *prevhdr, nexthdr = 0;
3699    
3700     - hlen = ip6_find_1stfragopt(skb, &prevhdr);
3701     + err = ip6_find_1stfragopt(skb, &prevhdr);
3702     + if (err < 0)
3703     + goto fail;
3704     + hlen = err;
3705     nexthdr = *prevhdr;
3706    
3707     mtu = ip6_skb_dst_mtu(skb);
3708     @@ -1444,6 +1447,11 @@ static int __ip6_append_data(struct sock *sk,
3709     */
3710     alloclen += sizeof(struct frag_hdr);
3711    
3712     + copy = datalen - transhdrlen - fraggap;
3713     + if (copy < 0) {
3714     + err = -EINVAL;
3715     + goto error;
3716     + }
3717     if (transhdrlen) {
3718     skb = sock_alloc_send_skb(sk,
3719     alloclen + hh_len,
3720     @@ -1493,13 +1501,9 @@ static int __ip6_append_data(struct sock *sk,
3721     data += fraggap;
3722     pskb_trim_unique(skb_prev, maxfraglen);
3723     }
3724     - copy = datalen - transhdrlen - fraggap;
3725     -
3726     - if (copy < 0) {
3727     - err = -EINVAL;
3728     - kfree_skb(skb);
3729     - goto error;
3730     - } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
3731     + if (copy > 0 &&
3732     + getfrag(from, data + transhdrlen, offset,
3733     + copy, fraggap, skb) < 0) {
3734     err = -EFAULT;
3735     kfree_skb(skb);
3736     goto error;
3737     diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
3738     index cd4252346a32..e9065b8d3af8 100644
3739     --- a/net/ipv6/output_core.c
3740     +++ b/net/ipv6/output_core.c
3741     @@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
3742     int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3743     {
3744     u16 offset = sizeof(struct ipv6hdr);
3745     - struct ipv6_opt_hdr *exthdr =
3746     - (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
3747     unsigned int packet_len = skb_tail_pointer(skb) -
3748     skb_network_header(skb);
3749     int found_rhdr = 0;
3750     *nexthdr = &ipv6_hdr(skb)->nexthdr;
3751    
3752     - while (offset + 1 <= packet_len) {
3753     + while (offset <= packet_len) {
3754     + struct ipv6_opt_hdr *exthdr;
3755    
3756     switch (**nexthdr) {
3757    
3758     @@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3759     return offset;
3760     }
3761    
3762     - offset += ipv6_optlen(exthdr);
3763     - *nexthdr = &exthdr->nexthdr;
3764     + if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
3765     + return -EINVAL;
3766     +
3767     exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
3768     offset);
3769     + offset += ipv6_optlen(exthdr);
3770     + *nexthdr = &exthdr->nexthdr;
3771     }
3772    
3773     - return offset;
3774     + return -EINVAL;
3775     }
3776     EXPORT_SYMBOL(ip6_find_1stfragopt);
3777    
3778     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3779     index b2e61a0e8d0a..aef9b28067f4 100644
3780     --- a/net/ipv6/tcp_ipv6.c
3781     +++ b/net/ipv6/tcp_ipv6.c
3782     @@ -1046,6 +1046,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3783     newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
3784     #endif
3785    
3786     + newnp->ipv6_mc_list = NULL;
3787     newnp->ipv6_ac_list = NULL;
3788     newnp->ipv6_fl_list = NULL;
3789     newnp->pktoptions = NULL;
3790     @@ -1115,6 +1116,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3791     First: no IPv4 options.
3792     */
3793     newinet->inet_opt = NULL;
3794     + newnp->ipv6_mc_list = NULL;
3795     newnp->ipv6_ac_list = NULL;
3796     newnp->ipv6_fl_list = NULL;
3797    
3798     diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
3799     index ac858c480f2f..a2267f80febb 100644
3800     --- a/net/ipv6/udp_offload.c
3801     +++ b/net/ipv6/udp_offload.c
3802     @@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
3803     u8 frag_hdr_sz = sizeof(struct frag_hdr);
3804     __wsum csum;
3805     int tnl_hlen;
3806     + int err;
3807    
3808     mss = skb_shinfo(skb)->gso_size;
3809     if (unlikely(skb->len <= mss))
3810     @@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
3811     /* Find the unfragmentable header and shift it left by frag_hdr_sz
3812     * bytes to insert fragment header.
3813     */
3814     - unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
3815     + err = ip6_find_1stfragopt(skb, &prevhdr);
3816     + if (err < 0)
3817     + return ERR_PTR(err);
3818     + unfrag_ip6hlen = err;
3819     nexthdr = *prevhdr;
3820     *prevhdr = NEXTHDR_FRAGMENT;
3821     unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
3822     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3823     index cb76ff3088e9..6a563e6e24de 100644
3824     --- a/net/packet/af_packet.c
3825     +++ b/net/packet/af_packet.c
3826     @@ -2652,13 +2652,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3827     dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
3828     }
3829    
3830     - sockc.tsflags = po->sk.sk_tsflags;
3831     - if (msg->msg_controllen) {
3832     - err = sock_cmsg_send(&po->sk, msg, &sockc);
3833     - if (unlikely(err))
3834     - goto out;
3835     - }
3836     -
3837     err = -ENXIO;
3838     if (unlikely(dev == NULL))
3839     goto out;
3840     @@ -2666,6 +2659,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3841     if (unlikely(!(dev->flags & IFF_UP)))
3842     goto out_put;
3843    
3844     + sockc.tsflags = po->sk.sk_tsflags;
3845     + if (msg->msg_controllen) {
3846     + err = sock_cmsg_send(&po->sk, msg, &sockc);
3847     + if (unlikely(err))
3848     + goto out_put;
3849     + }
3850     +
3851     if (po->sk.sk_socket->type == SOCK_RAW)
3852     reserve = dev->hard_header_len;
3853     size_max = po->tx_ring.frame_size
3854     diff --git a/net/sctp/input.c b/net/sctp/input.c
3855     index a01a56ec8b8c..6c79915c7dbc 100644
3856     --- a/net/sctp/input.c
3857     +++ b/net/sctp/input.c
3858     @@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
3859     struct sctp_association **app,
3860     struct sctp_transport **tpp)
3861     {
3862     + struct sctp_init_chunk *chunkhdr, _chunkhdr;
3863     union sctp_addr saddr;
3864     union sctp_addr daddr;
3865     struct sctp_af *af;
3866     struct sock *sk = NULL;
3867     struct sctp_association *asoc;
3868     struct sctp_transport *transport = NULL;
3869     - struct sctp_init_chunk *chunkhdr;
3870     __u32 vtag = ntohl(sctphdr->vtag);
3871     - int len = skb->len - ((void *)sctphdr - (void *)skb->data);
3872    
3873     *app = NULL; *tpp = NULL;
3874    
3875     @@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
3876     * discard the packet.
3877     */
3878     if (vtag == 0) {
3879     - chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
3880     - if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
3881     - + sizeof(__be32) ||
3882     + /* chunk header + first 4 octects of init header */
3883     + chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
3884     + sizeof(struct sctphdr),
3885     + sizeof(struct sctp_chunkhdr) +
3886     + sizeof(__be32), &_chunkhdr);
3887     + if (!chunkhdr ||
3888     chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
3889     - ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
3890     + ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
3891     goto out;
3892     - }
3893     +
3894     } else if (vtag != asoc->c.peer_vtag) {
3895     goto out;
3896     }
3897     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3898     index 6a2532dd31c4..0c090600f377 100644
3899     --- a/net/sctp/ipv6.c
3900     +++ b/net/sctp/ipv6.c
3901     @@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
3902     struct sctp_bind_addr *bp;
3903     struct ipv6_pinfo *np = inet6_sk(sk);
3904     struct sctp_sockaddr_entry *laddr;
3905     - union sctp_addr *baddr = NULL;
3906     union sctp_addr *daddr = &t->ipaddr;
3907     union sctp_addr dst_saddr;
3908     struct in6_addr *final_p, final;
3909     __u8 matchlen = 0;
3910     - __u8 bmatchlen;
3911     sctp_scope_t scope;
3912    
3913     memset(fl6, 0, sizeof(struct flowi6));
3914     @@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
3915     */
3916     rcu_read_lock();
3917     list_for_each_entry_rcu(laddr, &bp->address_list, list) {
3918     - if (!laddr->valid)
3919     + struct dst_entry *bdst;
3920     + __u8 bmatchlen;
3921     +
3922     + if (!laddr->valid ||
3923     + laddr->state != SCTP_ADDR_SRC ||
3924     + laddr->a.sa.sa_family != AF_INET6 ||
3925     + scope > sctp_scope(&laddr->a))
3926     continue;
3927     - if ((laddr->state == SCTP_ADDR_SRC) &&
3928     - (laddr->a.sa.sa_family == AF_INET6) &&
3929     - (scope <= sctp_scope(&laddr->a))) {
3930     - bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
3931     - if (!baddr || (matchlen < bmatchlen)) {
3932     - baddr = &laddr->a;
3933     - matchlen = bmatchlen;
3934     - }
3935     - }
3936     - }
3937     - if (baddr) {
3938     - fl6->saddr = baddr->v6.sin6_addr;
3939     - fl6->fl6_sport = baddr->v6.sin6_port;
3940     +
3941     + fl6->saddr = laddr->a.v6.sin6_addr;
3942     + fl6->fl6_sport = laddr->a.v6.sin6_port;
3943     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
3944     - dst = ip6_dst_lookup_flow(sk, fl6, final_p);
3945     + bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
3946     +
3947     + if (!IS_ERR(bdst) &&
3948     + ipv6_chk_addr(dev_net(bdst->dev),
3949     + &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
3950     + if (!IS_ERR_OR_NULL(dst))
3951     + dst_release(dst);
3952     + dst = bdst;
3953     + break;
3954     + }
3955     +
3956     + bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
3957     + if (matchlen > bmatchlen)
3958     + continue;
3959     +
3960     + if (!IS_ERR_OR_NULL(dst))
3961     + dst_release(dst);
3962     + dst = bdst;
3963     + matchlen = bmatchlen;
3964     }
3965     rcu_read_unlock();
3966    
3967     @@ -666,6 +678,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
3968     newnp = inet6_sk(newsk);
3969    
3970     memcpy(newnp, np, sizeof(struct ipv6_pinfo));
3971     + newnp->ipv6_mc_list = NULL;
3972     + newnp->ipv6_ac_list = NULL;
3973     + newnp->ipv6_fl_list = NULL;
3974    
3975     rcu_read_lock();
3976     opt = rcu_dereference(np->opt);
3977     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3978     index 37b70f8e878f..0abab7926dca 100644
3979     --- a/sound/pci/hda/patch_sigmatel.c
3980     +++ b/sound/pci/hda/patch_sigmatel.c
3981     @@ -1537,6 +1537,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
3982     "Dell Inspiron 1501", STAC_9200_DELL_M26),
3983     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
3984     "unknown Dell", STAC_9200_DELL_M26),
3985     + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
3986     + "Dell Latitude D430", STAC_9200_DELL_M22),
3987     /* Panasonic */
3988     SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
3989     /* Gateway machines needs EAPD to be set on resume */