Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0170-4.14.71-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 195836 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2     index d499676890d8..a054b5ad410a 100644
3     --- a/Documentation/networking/ip-sysctl.txt
4     +++ b/Documentation/networking/ip-sysctl.txt
5     @@ -133,14 +133,11 @@ min_adv_mss - INTEGER
6    
7     IP Fragmentation:
8    
9     -ipfrag_high_thresh - INTEGER
10     - Maximum memory used to reassemble IP fragments. When
11     - ipfrag_high_thresh bytes of memory is allocated for this purpose,
12     - the fragment handler will toss packets until ipfrag_low_thresh
13     - is reached. This also serves as a maximum limit to namespaces
14     - different from the initial one.
15     -
16     -ipfrag_low_thresh - INTEGER
17     +ipfrag_high_thresh - LONG INTEGER
18     + Maximum memory used to reassemble IP fragments.
19     +
20     +ipfrag_low_thresh - LONG INTEGER
21     + (Obsolete since linux-4.17)
22     Maximum memory used to reassemble IP fragments before the kernel
23     begins to remove incomplete fragment queues to free up resources.
24     The kernel still accepts new fragments for defragmentation.
25     diff --git a/Makefile b/Makefile
26     index aa458afa7fa2..dd4eaeeb2050 100644
27     --- a/Makefile
28     +++ b/Makefile
29     @@ -1,7 +1,7 @@
30     # SPDX-License-Identifier: GPL-2.0
31     VERSION = 4
32     PATCHLEVEL = 14
33     -SUBLEVEL = 70
34     +SUBLEVEL = 71
35     EXTRAVERSION =
36     NAME = Petit Gorille
37    
38     diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
39     index a8242362e551..ece78630d711 100644
40     --- a/arch/arc/configs/axs101_defconfig
41     +++ b/arch/arc/configs/axs101_defconfig
42     @@ -1,5 +1,4 @@
43     CONFIG_DEFAULT_HOSTNAME="ARCLinux"
44     -# CONFIG_SWAP is not set
45     CONFIG_SYSVIPC=y
46     CONFIG_POSIX_MQUEUE=y
47     # CONFIG_CROSS_MEMORY_ATTACH is not set
48     diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
49     index ef3c31cd7737..240c9251a7d4 100644
50     --- a/arch/arc/configs/axs103_defconfig
51     +++ b/arch/arc/configs/axs103_defconfig
52     @@ -1,5 +1,4 @@
53     CONFIG_DEFAULT_HOSTNAME="ARCLinux"
54     -# CONFIG_SWAP is not set
55     CONFIG_SYSVIPC=y
56     CONFIG_POSIX_MQUEUE=y
57     # CONFIG_CROSS_MEMORY_ATTACH is not set
58     diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
59     index 1757ac9cecbc..af54b96abee0 100644
60     --- a/arch/arc/configs/axs103_smp_defconfig
61     +++ b/arch/arc/configs/axs103_smp_defconfig
62     @@ -1,5 +1,4 @@
63     CONFIG_DEFAULT_HOSTNAME="ARCLinux"
64     -# CONFIG_SWAP is not set
65     CONFIG_SYSVIPC=y
66     CONFIG_POSIX_MQUEUE=y
67     # CONFIG_CROSS_MEMORY_ATTACH is not set
68     diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
69     index 8505db478904..1d92efb82c37 100644
70     --- a/arch/mips/cavium-octeon/octeon-platform.c
71     +++ b/arch/mips/cavium-octeon/octeon-platform.c
72     @@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
73     return 0;
74    
75     pd = of_find_device_by_node(ehci_node);
76     + of_node_put(ehci_node);
77     if (!pd)
78     return 0;
79    
80     @@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
81     return 0;
82    
83     pd = of_find_device_by_node(ohci_node);
84     + of_node_put(ohci_node);
85     if (!pd)
86     return 0;
87    
88     diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
89     index 5ba6fcc26fa7..94a78dbbc91f 100644
90     --- a/arch/mips/generic/init.c
91     +++ b/arch/mips/generic/init.c
92     @@ -204,6 +204,7 @@ void __init arch_init_irq(void)
93     "mti,cpu-interrupt-controller");
94     if (!cpu_has_veic && !intc_node)
95     mips_cpu_irq_init();
96     + of_node_put(intc_node);
97    
98     irqchip_init();
99     }
100     diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
101     index cea8ad864b3f..57b34257be2b 100644
102     --- a/arch/mips/include/asm/io.h
103     +++ b/arch/mips/include/asm/io.h
104     @@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
105     /*
106     * ISA I/O bus memory addresses are 1:1 with the physical address.
107     */
108     -static inline unsigned long isa_virt_to_bus(volatile void * address)
109     +static inline unsigned long isa_virt_to_bus(volatile void *address)
110     {
111     - return (unsigned long)address - PAGE_OFFSET;
112     + return virt_to_phys(address);
113     }
114    
115     -static inline void * isa_bus_to_virt(unsigned long address)
116     +static inline void *isa_bus_to_virt(unsigned long address)
117     {
118     - return (void *)(address + PAGE_OFFSET);
119     + return phys_to_virt(address);
120     }
121    
122     #define isa_page_to_bus page_to_phys
123     diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
124     index 019035d7225c..8f845f6e5f42 100644
125     --- a/arch/mips/kernel/vdso.c
126     +++ b/arch/mips/kernel/vdso.c
127     @@ -13,6 +13,7 @@
128     #include <linux/err.h>
129     #include <linux/init.h>
130     #include <linux/ioport.h>
131     +#include <linux/kernel.h>
132     #include <linux/mm.h>
133     #include <linux/sched.h>
134     #include <linux/slab.h>
135     @@ -20,6 +21,7 @@
136    
137     #include <asm/abi.h>
138     #include <asm/mips-cps.h>
139     +#include <asm/page.h>
140     #include <asm/vdso.h>
141    
142     /* Kernel-provided data used by the VDSO. */
143     @@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
144     vvar_size = gic_size + PAGE_SIZE;
145     size = vvar_size + image->size;
146    
147     + /*
148     + * Find a region that's large enough for us to perform the
149     + * colour-matching alignment below.
150     + */
151     + if (cpu_has_dc_aliases)
152     + size += shm_align_mask + 1;
153     +
154     base = get_unmapped_area(NULL, 0, size, 0, 0);
155     if (IS_ERR_VALUE(base)) {
156     ret = base;
157     goto out;
158     }
159    
160     + /*
161     + * If we suffer from dcache aliasing, ensure that the VDSO data page
162     + * mapping is coloured the same as the kernel's mapping of that memory.
163     + * This ensures that when the kernel updates the VDSO data userland
164     + * will observe it without requiring cache invalidations.
165     + */
166     + if (cpu_has_dc_aliases) {
167     + base = __ALIGN_MASK(base, shm_align_mask);
168     + base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
169     + }
170     +
171     data_addr = base + gic_size;
172     vdso_addr = data_addr + PAGE_SIZE;
173    
174     diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
175     index e12dfa48b478..a5893b2cdc0e 100644
176     --- a/arch/mips/mm/c-r4k.c
177     +++ b/arch/mips/mm/c-r4k.c
178     @@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
179     static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
180     {
181     /* Catch bad driver code */
182     - BUG_ON(size == 0);
183     + if (WARN_ON(size == 0))
184     + return;
185    
186     preempt_disable();
187     if (cpu_has_inclusive_pcaches) {
188     @@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
189     static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
190     {
191     /* Catch bad driver code */
192     - BUG_ON(size == 0);
193     + if (WARN_ON(size == 0))
194     + return;
195    
196     preempt_disable();
197     if (cpu_has_inclusive_pcaches) {
198     diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
199     index 63f007f2de7e..4b95bdde22aa 100644
200     --- a/arch/powerpc/platforms/powernv/npu-dma.c
201     +++ b/arch/powerpc/platforms/powernv/npu-dma.c
202     @@ -427,8 +427,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
203     int i;
204    
205     for (i = 0; i < npu->mmio_atsd_count; i++) {
206     - if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
207     - return i;
208     + if (!test_bit(i, &npu->mmio_atsd_usage))
209     + if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
210     + return i;
211     }
212    
213     return -ENOSPC;
214     diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
215     index 4f1f5fc8139d..061906f98dc5 100644
216     --- a/arch/s390/kvm/vsie.c
217     +++ b/arch/s390/kvm/vsie.c
218     @@ -170,7 +170,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
219     return set_validity_icpt(scb_s, 0x0039U);
220    
221     /* copy only the wrapping keys */
222     - if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
223     + if (read_guest_real(vcpu, crycb_addr + 72,
224     + vsie_page->crycb.dea_wrapping_key_mask, 56))
225     return set_validity_icpt(scb_s, 0x0035U);
226    
227     scb_s->ecb3 |= ecb3_flags;
228     diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
229     index 48179928ff38..9d33dbf2489e 100644
230     --- a/arch/x86/kernel/cpu/microcode/amd.c
231     +++ b/arch/x86/kernel/cpu/microcode/amd.c
232     @@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
233     struct microcode_amd *mc_amd;
234     struct ucode_cpu_info *uci;
235     struct ucode_patch *p;
236     + enum ucode_state ret;
237     u32 rev, dummy;
238    
239     BUG_ON(raw_smp_processor_id() != cpu);
240     @@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
241    
242     /* need to apply patch? */
243     if (rev >= mc_amd->hdr.patch_id) {
244     - c->microcode = rev;
245     - uci->cpu_sig.rev = rev;
246     - return UCODE_OK;
247     + ret = UCODE_OK;
248     + goto out;
249     }
250    
251     if (__apply_microcode_amd(mc_amd)) {
252     @@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
253     cpu, mc_amd->hdr.patch_id);
254     return UCODE_ERROR;
255     }
256     - pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
257     - mc_amd->hdr.patch_id);
258    
259     - uci->cpu_sig.rev = mc_amd->hdr.patch_id;
260     - c->microcode = mc_amd->hdr.patch_id;
261     + rev = mc_amd->hdr.patch_id;
262     + ret = UCODE_UPDATED;
263     +
264     + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
265    
266     - return UCODE_UPDATED;
267     +out:
268     + uci->cpu_sig.rev = rev;
269     + c->microcode = rev;
270     +
271     + /* Update boot_cpu_data's revision too, if we're on the BSP: */
272     + if (c->cpu_index == boot_cpu_data.cpu_index)
273     + boot_cpu_data.microcode = rev;
274     +
275     + return ret;
276     }
277    
278     static int install_equiv_cpu_table(const u8 *buf)
279     diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
280     index 97ccf4c3b45b..16936a24795c 100644
281     --- a/arch/x86/kernel/cpu/microcode/intel.c
282     +++ b/arch/x86/kernel/cpu/microcode/intel.c
283     @@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
284     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
285     struct cpuinfo_x86 *c = &cpu_data(cpu);
286     struct microcode_intel *mc;
287     + enum ucode_state ret;
288     static int prev_rev;
289     u32 rev;
290    
291     @@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
292     */
293     rev = intel_get_microcode_revision();
294     if (rev >= mc->hdr.rev) {
295     - uci->cpu_sig.rev = rev;
296     - c->microcode = rev;
297     - return UCODE_OK;
298     + ret = UCODE_OK;
299     + goto out;
300     }
301    
302     /*
303     @@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
304     prev_rev = rev;
305     }
306    
307     + ret = UCODE_UPDATED;
308     +
309     +out:
310     uci->cpu_sig.rev = rev;
311     - c->microcode = rev;
312     + c->microcode = rev;
313     +
314     + /* Update boot_cpu_data's revision too, if we're on the BSP: */
315     + if (c->cpu_index == boot_cpu_data.cpu_index)
316     + boot_cpu_data.microcode = rev;
317    
318     - return UCODE_UPDATED;
319     + return ret;
320     }
321    
322     static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
323     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
324     index 4e5a8e30cc4e..fd46d890296c 100644
325     --- a/arch/x86/kvm/vmx.c
326     +++ b/arch/x86/kvm/vmx.c
327     @@ -6965,8 +6965,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
328     if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
329     return kvm_skip_emulated_instruction(vcpu);
330     else
331     - return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
332     - NULL, 0) == EMULATE_DONE;
333     + return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
334     + EMULATE_DONE;
335     }
336    
337     ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
338     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
339     index c2faff548f59..794c35c4ca73 100644
340     --- a/arch/x86/mm/fault.c
341     +++ b/arch/x86/mm/fault.c
342     @@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
343     if (!(address >= VMALLOC_START && address < VMALLOC_END))
344     return -1;
345    
346     - WARN_ON_ONCE(in_nmi());
347     -
348     /*
349     * Synchronize this task's top level page-table
350     * with the 'reference' page table.
351     diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
352     index 4b571f3ea009..afbbe5750a1f 100644
353     --- a/block/bfq-cgroup.c
354     +++ b/block/bfq-cgroup.c
355     @@ -224,9 +224,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
356    
357     void bfqg_and_blkg_put(struct bfq_group *bfqg)
358     {
359     - bfqg_put(bfqg);
360     -
361     blkg_put(bfqg_to_blkg(bfqg));
362     +
363     + bfqg_put(bfqg);
364     }
365    
366     void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
367     diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
368     index 6714507aa6c7..3d2ab65d2dd1 100644
369     --- a/block/blk-mq-tag.c
370     +++ b/block/blk-mq-tag.c
371     @@ -416,8 +416,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
372     if (tdepth <= tags->nr_reserved_tags)
373     return -EINVAL;
374    
375     - tdepth -= tags->nr_reserved_tags;
376     -
377     /*
378     * If we are allowed to grow beyond the original size, allocate
379     * a new set of tags before freeing the old one.
380     @@ -437,7 +435,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
381     if (tdepth > 16 * BLKDEV_MAX_RQ)
382     return -EINVAL;
383    
384     - new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
385     + new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
386     + tags->nr_reserved_tags);
387     if (!new)
388     return -ENOMEM;
389     ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
390     @@ -454,7 +453,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
391     * Don't need (or can't) update reserved tags here, they
392     * remain static and should never need resizing.
393     */
394     - sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
395     + sbitmap_queue_resize(&tags->bitmap_tags,
396     + tdepth - tags->nr_reserved_tags);
397     }
398    
399     return 0;
400     diff --git a/block/partitions/aix.c b/block/partitions/aix.c
401     index 007f95eea0e1..903f3ed175d0 100644
402     --- a/block/partitions/aix.c
403     +++ b/block/partitions/aix.c
404     @@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
405     u32 vgda_sector = 0;
406     u32 vgda_len = 0;
407     int numlvs = 0;
408     - struct pvd *pvd;
409     + struct pvd *pvd = NULL;
410     struct lv_info {
411     unsigned short pps_per_lv;
412     unsigned short pps_found;
413     @@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
414     if (lvip[i].pps_per_lv)
415     foundlvs += 1;
416     }
417     + /* pvd loops depend on n[].name and lvip[].pps_per_lv */
418     + pvd = alloc_pvd(state, vgda_sector + 17);
419     }
420     put_dev_sector(sect);
421     }
422     - pvd = alloc_pvd(state, vgda_sector + 17);
423     if (pvd) {
424     int numpps = be16_to_cpu(pvd->pp_count);
425     int psn_part1 = be32_to_cpu(pvd->psn_part1);
426     @@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
427     next_lp_ix += 1;
428     }
429     for (i = 0; i < state->limit; i += 1)
430     - if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
431     + if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
432     + char tmp[sizeof(n[i].name) + 1]; // null char
433     +
434     + snprintf(tmp, sizeof(tmp), "%s", n[i].name);
435     pr_warn("partition %s (%u pp's found) is "
436     "not contiguous\n",
437     - n[i].name, lvip[i].pps_found);
438     + tmp, lvip[i].pps_found);
439     + }
440     kfree(pvd);
441     }
442     kfree(n);
443     diff --git a/crypto/Makefile b/crypto/Makefile
444     index adaf2c63baeb..56282e2d75ad 100644
445     --- a/crypto/Makefile
446     +++ b/crypto/Makefile
447     @@ -98,7 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
448     obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
449     CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
450     obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
451     -CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
452     +CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
453     obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
454     obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
455     obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
456     diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
457     index 6cb148268676..58e4658f9dd6 100644
458     --- a/drivers/android/binder_alloc.c
459     +++ b/drivers/android/binder_alloc.c
460     @@ -324,6 +324,34 @@ err_no_vma:
461     return vma ? -ENOMEM : -ESRCH;
462     }
463    
464     +static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
465     + struct vm_area_struct *vma)
466     +{
467     + if (vma)
468     + alloc->vma_vm_mm = vma->vm_mm;
469     + /*
470     + * If we see alloc->vma is not NULL, buffer data structures set up
471     + * completely. Look at smp_rmb side binder_alloc_get_vma.
472     + * We also want to guarantee new alloc->vma_vm_mm is always visible
473     + * if alloc->vma is set.
474     + */
475     + smp_wmb();
476     + alloc->vma = vma;
477     +}
478     +
479     +static inline struct vm_area_struct *binder_alloc_get_vma(
480     + struct binder_alloc *alloc)
481     +{
482     + struct vm_area_struct *vma = NULL;
483     +
484     + if (alloc->vma) {
485     + /* Look at description in binder_alloc_set_vma */
486     + smp_rmb();
487     + vma = alloc->vma;
488     + }
489     + return vma;
490     +}
491     +
492     struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
493     size_t data_size,
494     size_t offsets_size,
495     @@ -339,7 +367,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
496     size_t size, data_offsets_size;
497     int ret;
498    
499     - if (alloc->vma == NULL) {
500     + if (!binder_alloc_get_vma(alloc)) {
501     pr_err("%d: binder_alloc_buf, no vma\n",
502     alloc->pid);
503     return ERR_PTR(-ESRCH);
504     @@ -712,9 +740,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
505     buffer->free = 1;
506     binder_insert_free_buffer(alloc, buffer);
507     alloc->free_async_space = alloc->buffer_size / 2;
508     - barrier();
509     - alloc->vma = vma;
510     - alloc->vma_vm_mm = vma->vm_mm;
511     + binder_alloc_set_vma(alloc, vma);
512     mmgrab(alloc->vma_vm_mm);
513    
514     return 0;
515     @@ -741,10 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
516     int buffers, page_count;
517     struct binder_buffer *buffer;
518    
519     - BUG_ON(alloc->vma);
520     -
521     buffers = 0;
522     mutex_lock(&alloc->mutex);
523     + BUG_ON(alloc->vma);
524     +
525     while ((n = rb_first(&alloc->allocated_buffers))) {
526     buffer = rb_entry(n, struct binder_buffer, rb_node);
527    
528     @@ -886,7 +912,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
529     */
530     void binder_alloc_vma_close(struct binder_alloc *alloc)
531     {
532     - WRITE_ONCE(alloc->vma, NULL);
533     + binder_alloc_set_vma(alloc, NULL);
534     }
535    
536     /**
537     @@ -921,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
538    
539     index = page - alloc->pages;
540     page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
541     - vma = alloc->vma;
542     + vma = binder_alloc_get_vma(alloc);
543     if (vma) {
544     if (!mmget_not_zero(alloc->vma_vm_mm))
545     goto err_mmget;
546     diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
547     index bc562fd2b0a0..cda9a0b5bdaa 100644
548     --- a/drivers/ata/libahci.c
549     +++ b/drivers/ata/libahci.c
550     @@ -2096,7 +2096,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
551     struct ahci_host_priv *hpriv = ap->host->private_data;
552     void __iomem *port_mmio = ahci_port_base(ap);
553     struct ata_device *dev = ap->link.device;
554     - u32 devslp, dm, dito, mdat, deto;
555     + u32 devslp, dm, dito, mdat, deto, dito_conf;
556     int rc;
557     unsigned int err_mask;
558    
559     @@ -2120,8 +2120,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
560     return;
561     }
562    
563     - /* device sleep was already enabled */
564     - if (devslp & PORT_DEVSLP_ADSE)
565     + dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
566     + dito = devslp_idle_timeout / (dm + 1);
567     + if (dito > 0x3ff)
568     + dito = 0x3ff;
569     +
570     + dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
571     +
572     + /* device sleep was already enabled and same dito */
573     + if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
574     return;
575    
576     /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
577     @@ -2129,11 +2136,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
578     if (rc)
579     return;
580    
581     - dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
582     - dito = devslp_idle_timeout / (dm + 1);
583     - if (dito > 0x3ff)
584     - dito = 0x3ff;
585     -
586     /* Use the nominal value 10 ms if the read MDAT is zero,
587     * the nominal value of DETO is 20 ms.
588     */
589     @@ -2151,6 +2153,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
590     deto = 20;
591     }
592    
593     + /* Make dito, mdat, deto bits to 0s */
594     + devslp &= ~GENMASK_ULL(24, 2);
595     devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
596     (mdat << PORT_DEVSLP_MDAT_OFFSET) |
597     (deto << PORT_DEVSLP_DETO_OFFSET) |
598     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
599     index 5e55d03d3d01..fe1414df0f33 100644
600     --- a/drivers/block/nbd.c
601     +++ b/drivers/block/nbd.c
602     @@ -1228,6 +1228,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
603     case NBD_SET_SOCK:
604     return nbd_add_socket(nbd, arg, false);
605     case NBD_SET_BLKSIZE:
606     + if (!arg || !is_power_of_2(arg) || arg < 512 ||
607     + arg > PAGE_SIZE)
608     + return -EINVAL;
609     nbd_size_set(nbd, arg,
610     div_s64(config->bytesize, arg));
611     return 0;
612     diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
613     index 531a0915066b..11ec92e47455 100644
614     --- a/drivers/block/pktcdvd.c
615     +++ b/drivers/block/pktcdvd.c
616     @@ -67,7 +67,7 @@
617     #include <scsi/scsi.h>
618     #include <linux/debugfs.h>
619     #include <linux/device.h>
620     -
621     +#include <linux/nospec.h>
622     #include <linux/uaccess.h>
623    
624     #define DRIVER_NAME "pktcdvd"
625     @@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
626     {
627     if (dev_minor >= MAX_WRITERS)
628     return NULL;
629     +
630     + dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
631     return pkt_devs[dev_minor];
632     }
633    
634     diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
635     index b33c8d6eb8c7..500d4d632e48 100644
636     --- a/drivers/bluetooth/Kconfig
637     +++ b/drivers/bluetooth/Kconfig
638     @@ -146,6 +146,7 @@ config BT_HCIUART_LL
639     config BT_HCIUART_3WIRE
640     bool "Three-wire UART (H5) protocol support"
641     depends on BT_HCIUART
642     + depends on BT_HCIUART_SERDEV
643     help
644     The HCI Three-wire UART Transport Layer makes it possible to
645     user the Bluetooth HCI over a serial port interface. The HCI
646     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
647     index 86b526b7d990..a2070ab86c82 100644
648     --- a/drivers/char/tpm/tpm-interface.c
649     +++ b/drivers/char/tpm/tpm-interface.c
650     @@ -369,10 +369,13 @@ err_len:
651     return -EINVAL;
652     }
653    
654     -static int tpm_request_locality(struct tpm_chip *chip)
655     +static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags)
656     {
657     int rc;
658    
659     + if (flags & TPM_TRANSMIT_RAW)
660     + return 0;
661     +
662     if (!chip->ops->request_locality)
663     return 0;
664    
665     @@ -385,10 +388,13 @@ static int tpm_request_locality(struct tpm_chip *chip)
666     return 0;
667     }
668    
669     -static void tpm_relinquish_locality(struct tpm_chip *chip)
670     +static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags)
671     {
672     int rc;
673    
674     + if (flags & TPM_TRANSMIT_RAW)
675     + return;
676     +
677     if (!chip->ops->relinquish_locality)
678     return;
679    
680     @@ -399,6 +405,28 @@ static void tpm_relinquish_locality(struct tpm_chip *chip)
681     chip->locality = -1;
682     }
683    
684     +static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags)
685     +{
686     + if (flags & TPM_TRANSMIT_RAW)
687     + return 0;
688     +
689     + if (!chip->ops->cmd_ready)
690     + return 0;
691     +
692     + return chip->ops->cmd_ready(chip);
693     +}
694     +
695     +static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags)
696     +{
697     + if (flags & TPM_TRANSMIT_RAW)
698     + return 0;
699     +
700     + if (!chip->ops->go_idle)
701     + return 0;
702     +
703     + return chip->ops->go_idle(chip);
704     +}
705     +
706     static ssize_t tpm_try_transmit(struct tpm_chip *chip,
707     struct tpm_space *space,
708     u8 *buf, size_t bufsiz,
709     @@ -449,14 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
710     /* Store the decision as chip->locality will be changed. */
711     need_locality = chip->locality == -1;
712    
713     - if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
714     - rc = tpm_request_locality(chip);
715     + if (need_locality) {
716     + rc = tpm_request_locality(chip, flags);
717     if (rc < 0)
718     goto out_no_locality;
719     }
720    
721     - if (chip->dev.parent)
722     - pm_runtime_get_sync(chip->dev.parent);
723     + rc = tpm_cmd_ready(chip, flags);
724     + if (rc)
725     + goto out;
726    
727     rc = tpm2_prepare_space(chip, space, ordinal, buf);
728     if (rc)
729     @@ -516,13 +545,16 @@ out_recv:
730     }
731    
732     rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
733     + if (rc)
734     + dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
735    
736     out:
737     - if (chip->dev.parent)
738     - pm_runtime_put_sync(chip->dev.parent);
739     + rc = tpm_go_idle(chip, flags);
740     + if (rc)
741     + goto out;
742    
743     if (need_locality)
744     - tpm_relinquish_locality(chip);
745     + tpm_relinquish_locality(chip, flags);
746    
747     out_no_locality:
748     if (chip->ops->clk_enable != NULL)
749     diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
750     index b83b30a3eea5..4bb9b4aa9b49 100644
751     --- a/drivers/char/tpm/tpm.h
752     +++ b/drivers/char/tpm/tpm.h
753     @@ -511,9 +511,17 @@ extern const struct file_operations tpm_fops;
754     extern const struct file_operations tpmrm_fops;
755     extern struct idr dev_nums_idr;
756    
757     +/**
758     + * enum tpm_transmit_flags
759     + *
760     + * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls.
761     + * @TPM_TRANSMIT_RAW: prevent recursive calls into setup steps
762     + * (go idle, locality,..). Always use with UNLOCKED
763     + * as it will fail on double locking.
764     + */
765     enum tpm_transmit_flags {
766     - TPM_TRANSMIT_UNLOCKED = BIT(0),
767     - TPM_TRANSMIT_RAW = BIT(1),
768     + TPM_TRANSMIT_UNLOCKED = BIT(0),
769     + TPM_TRANSMIT_RAW = BIT(1),
770     };
771    
772     ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
773     diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
774     index d26ea7513226..dabb2ae4e779 100644
775     --- a/drivers/char/tpm/tpm2-space.c
776     +++ b/drivers/char/tpm/tpm2-space.c
777     @@ -39,7 +39,8 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
778     for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) {
779     if (space->session_tbl[i])
780     tpm2_flush_context_cmd(chip, space->session_tbl[i],
781     - TPM_TRANSMIT_UNLOCKED);
782     + TPM_TRANSMIT_UNLOCKED |
783     + TPM_TRANSMIT_RAW);
784     }
785     }
786    
787     @@ -84,7 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
788     tpm_buf_append(&tbuf, &buf[*offset], body_size);
789    
790     rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4,
791     - TPM_TRANSMIT_UNLOCKED, NULL);
792     + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
793     if (rc < 0) {
794     dev_warn(&chip->dev, "%s: failed with a system error %d\n",
795     __func__, rc);
796     @@ -133,7 +134,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
797     tpm_buf_append_u32(&tbuf, handle);
798    
799     rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0,
800     - TPM_TRANSMIT_UNLOCKED, NULL);
801     + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, NULL);
802     if (rc < 0) {
803     dev_warn(&chip->dev, "%s: failed with a system error %d\n",
804     __func__, rc);
805     @@ -170,7 +171,8 @@ static void tpm2_flush_space(struct tpm_chip *chip)
806     for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
807     if (space->context_tbl[i] && ~space->context_tbl[i])
808     tpm2_flush_context_cmd(chip, space->context_tbl[i],
809     - TPM_TRANSMIT_UNLOCKED);
810     + TPM_TRANSMIT_UNLOCKED |
811     + TPM_TRANSMIT_RAW);
812    
813     tpm2_flush_sessions(chip, space);
814     }
815     @@ -377,7 +379,8 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp,
816    
817     return 0;
818     out_no_slots:
819     - tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED);
820     + tpm2_flush_context_cmd(chip, phandle,
821     + TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW);
822     dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__,
823     phandle);
824     return -ENOMEM;
825     @@ -465,7 +468,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
826     return rc;
827    
828     tpm2_flush_context_cmd(chip, space->context_tbl[i],
829     - TPM_TRANSMIT_UNLOCKED);
830     + TPM_TRANSMIT_UNLOCKED |
831     + TPM_TRANSMIT_RAW);
832     space->context_tbl[i] = ~0;
833     }
834    
835     diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
836     index bb756ad7897e..5c7ce5aaaf6f 100644
837     --- a/drivers/char/tpm/tpm_crb.c
838     +++ b/drivers/char/tpm/tpm_crb.c
839     @@ -137,7 +137,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
840     }
841    
842     /**
843     - * crb_go_idle - request tpm crb device to go the idle state
844     + * __crb_go_idle - request tpm crb device to go the idle state
845     *
846     * @dev: crb device
847     * @priv: crb private data
848     @@ -151,7 +151,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
849     *
850     * Return: 0 always
851     */
852     -static int crb_go_idle(struct device *dev, struct crb_priv *priv)
853     +static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
854     {
855     if ((priv->flags & CRB_FL_ACPI_START) ||
856     (priv->flags & CRB_FL_CRB_SMC_START))
857     @@ -166,11 +166,20 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
858     dev_warn(dev, "goIdle timed out\n");
859     return -ETIME;
860     }
861     +
862     return 0;
863     }
864    
865     +static int crb_go_idle(struct tpm_chip *chip)
866     +{
867     + struct device *dev = &chip->dev;
868     + struct crb_priv *priv = dev_get_drvdata(dev);
869     +
870     + return __crb_go_idle(dev, priv);
871     +}
872     +
873     /**
874     - * crb_cmd_ready - request tpm crb device to enter ready state
875     + * __crb_cmd_ready - request tpm crb device to enter ready state
876     *
877     * @dev: crb device
878     * @priv: crb private data
879     @@ -183,7 +192,7 @@ static int crb_go_idle(struct device *dev, struct crb_priv *priv)
880     *
881     * Return: 0 on success -ETIME on timeout;
882     */
883     -static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
884     +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
885     {
886     if ((priv->flags & CRB_FL_ACPI_START) ||
887     (priv->flags & CRB_FL_CRB_SMC_START))
888     @@ -201,6 +210,14 @@ static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
889     return 0;
890     }
891    
892     +static int crb_cmd_ready(struct tpm_chip *chip)
893     +{
894     + struct device *dev = &chip->dev;
895     + struct crb_priv *priv = dev_get_drvdata(dev);
896     +
897     + return __crb_cmd_ready(dev, priv);
898     +}
899     +
900     static int __crb_request_locality(struct device *dev,
901     struct crb_priv *priv, int loc)
902     {
903     @@ -393,6 +410,8 @@ static const struct tpm_class_ops tpm_crb = {
904     .send = crb_send,
905     .cancel = crb_cancel,
906     .req_canceled = crb_req_canceled,
907     + .go_idle = crb_go_idle,
908     + .cmd_ready = crb_cmd_ready,
909     .request_locality = crb_request_locality,
910     .relinquish_locality = crb_relinquish_locality,
911     .req_complete_mask = CRB_DRV_STS_COMPLETE,
912     @@ -508,7 +527,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
913     * PTT HW bug w/a: wake up the device to access
914     * possibly not retained registers.
915     */
916     - ret = crb_cmd_ready(dev, priv);
917     + ret = __crb_cmd_ready(dev, priv);
918     if (ret)
919     return ret;
920    
921     @@ -553,7 +572,7 @@ out:
922     if (!ret)
923     priv->cmd_size = cmd_size;
924    
925     - crb_go_idle(dev, priv);
926     + __crb_go_idle(dev, priv);
927    
928     __crb_relinquish_locality(dev, priv, 0);
929    
930     @@ -624,32 +643,7 @@ static int crb_acpi_add(struct acpi_device *device)
931     chip->acpi_dev_handle = device->handle;
932     chip->flags = TPM_CHIP_FLAG_TPM2;
933    
934     - rc = __crb_request_locality(dev, priv, 0);
935     - if (rc)
936     - return rc;
937     -
938     - rc = crb_cmd_ready(dev, priv);
939     - if (rc)
940     - goto out;
941     -
942     - pm_runtime_get_noresume(dev);
943     - pm_runtime_set_active(dev);
944     - pm_runtime_enable(dev);
945     -
946     - rc = tpm_chip_register(chip);
947     - if (rc) {
948     - crb_go_idle(dev, priv);
949     - pm_runtime_put_noidle(dev);
950     - pm_runtime_disable(dev);
951     - goto out;
952     - }
953     -
954     - pm_runtime_put_sync(dev);
955     -
956     -out:
957     - __crb_relinquish_locality(dev, priv, 0);
958     -
959     - return rc;
960     + return tpm_chip_register(chip);
961     }
962    
963     static int crb_acpi_remove(struct acpi_device *device)
964     @@ -659,52 +653,11 @@ static int crb_acpi_remove(struct acpi_device *device)
965    
966     tpm_chip_unregister(chip);
967    
968     - pm_runtime_disable(dev);
969     -
970     return 0;
971     }
972    
973     -static int __maybe_unused crb_pm_runtime_suspend(struct device *dev)
974     -{
975     - struct tpm_chip *chip = dev_get_drvdata(dev);
976     - struct crb_priv *priv = dev_get_drvdata(&chip->dev);
977     -
978     - return crb_go_idle(dev, priv);
979     -}
980     -
981     -static int __maybe_unused crb_pm_runtime_resume(struct device *dev)
982     -{
983     - struct tpm_chip *chip = dev_get_drvdata(dev);
984     - struct crb_priv *priv = dev_get_drvdata(&chip->dev);
985     -
986     - return crb_cmd_ready(dev, priv);
987     -}
988     -
989     -static int __maybe_unused crb_pm_suspend(struct device *dev)
990     -{
991     - int ret;
992     -
993     - ret = tpm_pm_suspend(dev);
994     - if (ret)
995     - return ret;
996     -
997     - return crb_pm_runtime_suspend(dev);
998     -}
999     -
1000     -static int __maybe_unused crb_pm_resume(struct device *dev)
1001     -{
1002     - int ret;
1003     -
1004     - ret = crb_pm_runtime_resume(dev);
1005     - if (ret)
1006     - return ret;
1007     -
1008     - return tpm_pm_resume(dev);
1009     -}
1010     -
1011     static const struct dev_pm_ops crb_pm = {
1012     - SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume)
1013     - SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
1014     + SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
1015     };
1016    
1017     static const struct acpi_device_id crb_device_ids[] = {
1018     diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1019     index d5b44cadac56..c619e76ce827 100644
1020     --- a/drivers/char/tpm/tpm_i2c_infineon.c
1021     +++ b/drivers/char/tpm/tpm_i2c_infineon.c
1022     @@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1023     /* Lock the adapter for the duration of the whole sequence. */
1024     if (!tpm_dev.client->adapter->algo->master_xfer)
1025     return -EOPNOTSUPP;
1026     - i2c_lock_adapter(tpm_dev.client->adapter);
1027     + i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1028    
1029     if (tpm_dev.chip_type == SLB9645) {
1030     /* use a combined read for newer chips
1031     @@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1032     }
1033    
1034     out:
1035     - i2c_unlock_adapter(tpm_dev.client->adapter);
1036     + i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1037     /* take care of 'guard time' */
1038     usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1039    
1040     @@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1041    
1042     if (!tpm_dev.client->adapter->algo->master_xfer)
1043     return -EOPNOTSUPP;
1044     - i2c_lock_adapter(tpm_dev.client->adapter);
1045     + i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1046    
1047     /* prepend the 'register address' to the buffer */
1048     tpm_dev.buf[0] = addr;
1049     @@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1050     usleep_range(sleep_low, sleep_hi);
1051     }
1052    
1053     - i2c_unlock_adapter(tpm_dev.client->adapter);
1054     + i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1055     /* take care of 'guard time' */
1056     usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1057    
1058     diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1059     index 8ab0bd8445f6..b00388fc41c8 100644
1060     --- a/drivers/char/tpm/tpm_tis_spi.c
1061     +++ b/drivers/char/tpm/tpm_tis_spi.c
1062     @@ -188,6 +188,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1063     static int tpm_tis_spi_probe(struct spi_device *dev)
1064     {
1065     struct tpm_tis_spi_phy *phy;
1066     + int irq;
1067    
1068     phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1069     GFP_KERNEL);
1070     @@ -200,7 +201,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1071     if (!phy->iobuf)
1072     return -ENOMEM;
1073    
1074     - return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1075     + /* If the SPI device has an IRQ then use that */
1076     + if (dev->irq > 0)
1077     + irq = dev->irq;
1078     + else
1079     + irq = -1;
1080     +
1081     + return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
1082     NULL);
1083     }
1084    
1085     diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1086     index e4b40f2b4627..9c0f7cf920af 100644
1087     --- a/drivers/firmware/google/vpd.c
1088     +++ b/drivers/firmware/google/vpd.c
1089     @@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
1090     sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
1091     kfree(sec->raw_name);
1092     memunmap(sec->baseaddr);
1093     + sec->enabled = false;
1094     }
1095    
1096     return 0;
1097     @@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
1098     ret = vpd_section_init("rw", &rw_vpd,
1099     physaddr + sizeof(struct vpd_cbmem) +
1100     header.ro_size, header.rw_size);
1101     - if (ret)
1102     + if (ret) {
1103     + vpd_section_destroy(&ro_vpd);
1104     return ret;
1105     + }
1106     }
1107    
1108     return 0;
1109     diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
1110     index 4b80e996d976..1022fe8d09c7 100644
1111     --- a/drivers/gpio/gpio-ml-ioh.c
1112     +++ b/drivers/gpio/gpio-ml-ioh.c
1113     @@ -497,9 +497,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
1114     return 0;
1115    
1116     err_gpiochip_add:
1117     + chip = chip_save;
1118     while (--i >= 0) {
1119     - chip--;
1120     gpiochip_remove(&chip->gpio);
1121     + chip++;
1122     }
1123     kfree(chip_save);
1124    
1125     diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1126     index fbaf974277df..1eb857e2f62f 100644
1127     --- a/drivers/gpio/gpio-tegra.c
1128     +++ b/drivers/gpio/gpio-tegra.c
1129     @@ -728,4 +728,4 @@ static int __init tegra_gpio_init(void)
1130     {
1131     return platform_driver_register(&tegra_gpio_driver);
1132     }
1133     -postcore_initcall(tegra_gpio_init);
1134     +subsys_initcall(tegra_gpio_init);
1135     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1136     index be813b2738c1..2e706f1abe64 100644
1137     --- a/drivers/gpu/drm/i915/i915_reg.h
1138     +++ b/drivers/gpu/drm/i915/i915_reg.h
1139     @@ -8462,6 +8462,7 @@ enum skl_power_gate {
1140     #define TRANS_MSA_10_BPC (2<<5)
1141     #define TRANS_MSA_12_BPC (3<<5)
1142     #define TRANS_MSA_16_BPC (4<<5)
1143     +#define TRANS_MSA_CEA_RANGE (1<<3)
1144    
1145     /* LCPLL Control */
1146     #define LCPLL_CTL _MMIO(0x130040)
1147     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
1148     index 5e5fe03b638c..3a4a581345c4 100644
1149     --- a/drivers/gpu/drm/i915/intel_ddi.c
1150     +++ b/drivers/gpu/drm/i915/intel_ddi.c
1151     @@ -1396,6 +1396,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1152     WARN_ON(transcoder_is_dsi(cpu_transcoder));
1153    
1154     temp = TRANS_MSA_SYNC_CLK;
1155     +
1156     + if (crtc_state->limited_color_range)
1157     + temp |= TRANS_MSA_CEA_RANGE;
1158     +
1159     switch (crtc_state->pipe_bpp) {
1160     case 18:
1161     temp |= TRANS_MSA_6_BPC;
1162     diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
1163     index 658fa2d3e40c..2c8411b8d050 100644
1164     --- a/drivers/gpu/ipu-v3/ipu-common.c
1165     +++ b/drivers/gpu/ipu-v3/ipu-common.c
1166     @@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1167     return -ENODEV;
1168    
1169     ipu->id = of_alias_get_id(np, "ipu");
1170     + if (ipu->id < 0)
1171     + ipu->id = 0;
1172    
1173     if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1174     IS_ENABLED(CONFIG_DRM)) {
1175     diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1176     index 8267439dd1ee..d8101cd28dfa 100644
1177     --- a/drivers/hv/hv.c
1178     +++ b/drivers/hv/hv.c
1179     @@ -196,6 +196,10 @@ int hv_synic_alloc(void)
1180    
1181     return 0;
1182     err:
1183     + /*
1184     + * Any memory allocations that succeeded will be freed when
1185     + * the caller cleans up by calling hv_synic_free()
1186     + */
1187     return -ENOMEM;
1188     }
1189    
1190     @@ -208,12 +212,10 @@ void hv_synic_free(void)
1191     struct hv_per_cpu_context *hv_cpu
1192     = per_cpu_ptr(hv_context.cpu_context, cpu);
1193    
1194     - if (hv_cpu->synic_event_page)
1195     - free_page((unsigned long)hv_cpu->synic_event_page);
1196     - if (hv_cpu->synic_message_page)
1197     - free_page((unsigned long)hv_cpu->synic_message_page);
1198     - if (hv_cpu->post_msg_page)
1199     - free_page((unsigned long)hv_cpu->post_msg_page);
1200     + kfree(hv_cpu->clk_evt);
1201     + free_page((unsigned long)hv_cpu->synic_event_page);
1202     + free_page((unsigned long)hv_cpu->synic_message_page);
1203     + free_page((unsigned long)hv_cpu->post_msg_page);
1204     }
1205    
1206     kfree(hv_context.hv_numa_map);
1207     diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1208     index 284f8670dbeb..2feae9a421e6 100644
1209     --- a/drivers/i2c/busses/i2c-aspeed.c
1210     +++ b/drivers/i2c/busses/i2c-aspeed.c
1211     @@ -859,7 +859,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
1212     if (!match)
1213     bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1214     else
1215     - bus->get_clk_reg_val = match->data;
1216     + bus->get_clk_reg_val = (u32 (*)(u32))match->data;
1217    
1218     /* Initialize the I2C adapter */
1219     spin_lock_init(&bus->lock);
1220     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1221     index ba8df2fde1b2..67cbd9f61acc 100644
1222     --- a/drivers/i2c/busses/i2c-i801.c
1223     +++ b/drivers/i2c/busses/i2c-i801.c
1224     @@ -138,6 +138,7 @@
1225    
1226     #define SBREG_BAR 0x10
1227     #define SBREG_SMBCTRL 0xc6000c
1228     +#define SBREG_SMBCTRL_DNV 0xcf000c
1229    
1230     /* Host status bits for SMBPCISTS */
1231     #define SMBPCISTS_INTS BIT(3)
1232     @@ -1395,7 +1396,11 @@ static void i801_add_tco(struct i801_priv *priv)
1233     spin_unlock(&p2sb_spinlock);
1234    
1235     res = &tco_res[ICH_RES_MEM_OFF];
1236     - res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1237     + if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1238     + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1239     + else
1240     + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1241     +
1242     res->end = res->start + 3;
1243     res->flags = IORESOURCE_MEM;
1244    
1245     diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
1246     index ae6ed254e01d..732d6c456a6f 100644
1247     --- a/drivers/i2c/busses/i2c-xiic.c
1248     +++ b/drivers/i2c/busses/i2c-xiic.c
1249     @@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1250     {
1251     u8 rx_watermark;
1252     struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1253     + unsigned long flags;
1254    
1255     /* Clear and enable Rx full interrupt. */
1256     xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
1257     @@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1258     rx_watermark = IIC_RX_FIFO_DEPTH;
1259     xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1260    
1261     + local_irq_save(flags);
1262     if (!(msg->flags & I2C_M_NOSTART))
1263     /* write the address */
1264     xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1265     @@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1266    
1267     xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1268     msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1269     + local_irq_restore(flags);
1270     +
1271     if (i2c->nmsgs == 1)
1272     /* very last, enable bus not busy as well */
1273     xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
1274     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1275     index 79843a3ca9dc..752dbc388c27 100644
1276     --- a/drivers/infiniband/core/cma.c
1277     +++ b/drivers/infiniband/core/cma.c
1278     @@ -1459,9 +1459,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
1279     (addr->src_addr.ss_family == AF_IB ||
1280     cma_protocol_roce_dev_port(id->device, port_num));
1281    
1282     - return !addr->dev_addr.bound_dev_if ||
1283     - (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1284     - addr->dev_addr.bound_dev_if == net_dev->ifindex);
1285     + /*
1286     + * Net namespaces must match, and if the listner is listening
1287     + * on a specific netdevice than netdevice must match as well.
1288     + */
1289     + if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1290     + (!!addr->dev_addr.bound_dev_if ==
1291     + (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1292     + return true;
1293     + else
1294     + return false;
1295     }
1296    
1297     static struct rdma_id_private *cma_find_listener(
1298     diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
1299     index fc149ea64be7..59aaac43db91 100644
1300     --- a/drivers/input/touchscreen/atmel_mxt_ts.c
1301     +++ b/drivers/input/touchscreen/atmel_mxt_ts.c
1302     @@ -1647,10 +1647,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
1303     break;
1304     case MXT_TOUCH_MULTI_T9:
1305     data->multitouch = MXT_TOUCH_MULTI_T9;
1306     + /* Only handle messages from first T9 instance */
1307     data->T9_reportid_min = min_id;
1308     - data->T9_reportid_max = max_id;
1309     - data->num_touchids = object->num_report_ids
1310     - * mxt_obj_instances(object);
1311     + data->T9_reportid_max = min_id +
1312     + object->num_report_ids - 1;
1313     + data->num_touchids = object->num_report_ids;
1314     break;
1315     case MXT_SPT_MESSAGECOUNT_T44:
1316     data->T44_address = object->start_address;
1317     diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
1318     index 195d6e93ac71..5d0ba5f644c4 100644
1319     --- a/drivers/iommu/ipmmu-vmsa.c
1320     +++ b/drivers/iommu/ipmmu-vmsa.c
1321     @@ -54,7 +54,7 @@ struct ipmmu_vmsa_domain {
1322     struct io_pgtable_ops *iop;
1323    
1324     unsigned int context_id;
1325     - spinlock_t lock; /* Protects mappings */
1326     + struct mutex mutex; /* Protects mappings */
1327     };
1328    
1329     struct ipmmu_vmsa_iommu_priv {
1330     @@ -523,7 +523,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
1331     if (!domain)
1332     return NULL;
1333    
1334     - spin_lock_init(&domain->lock);
1335     + mutex_init(&domain->mutex);
1336    
1337     return &domain->io_domain;
1338     }
1339     @@ -548,7 +548,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1340     struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1341     struct ipmmu_vmsa_device *mmu = priv->mmu;
1342     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
1343     - unsigned long flags;
1344     unsigned int i;
1345     int ret = 0;
1346    
1347     @@ -557,7 +556,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1348     return -ENXIO;
1349     }
1350    
1351     - spin_lock_irqsave(&domain->lock, flags);
1352     + mutex_lock(&domain->mutex);
1353    
1354     if (!domain->mmu) {
1355     /* The domain hasn't been used yet, initialize it. */
1356     @@ -574,7 +573,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
1357     } else
1358     dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
1359    
1360     - spin_unlock_irqrestore(&domain->lock, flags);
1361     + mutex_unlock(&domain->mutex);
1362    
1363     if (ret < 0)
1364     return ret;
1365     diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
1366     index c4c2b3b85ebc..f6e040fcad9a 100644
1367     --- a/drivers/macintosh/via-pmu.c
1368     +++ b/drivers/macintosh/via-pmu.c
1369     @@ -532,8 +532,9 @@ init_pmu(void)
1370     int timeout;
1371     struct adb_request req;
1372    
1373     - out_8(&via[B], via[B] | TREQ); /* negate TREQ */
1374     - out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
1375     + /* Negate TREQ. Set TACK to input and TREQ to output. */
1376     + out_8(&via[B], in_8(&via[B]) | TREQ);
1377     + out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
1378    
1379     pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
1380     timeout = 100000;
1381     @@ -1455,8 +1456,8 @@ pmu_sr_intr(void)
1382     struct adb_request *req;
1383     int bite = 0;
1384    
1385     - if (via[B] & TREQ) {
1386     - printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
1387     + if (in_8(&via[B]) & TREQ) {
1388     + printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
1389     out_8(&via[IFR], SR_INT);
1390     return NULL;
1391     }
1392     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1393     index 71c3507df9a0..a4b7c2698096 100644
1394     --- a/drivers/md/dm-cache-target.c
1395     +++ b/drivers/md/dm-cache-target.c
1396     @@ -2330,7 +2330,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1397     {0, 2, "Invalid number of cache feature arguments"},
1398     };
1399    
1400     - int r;
1401     + int r, mode_ctr = 0;
1402     unsigned argc;
1403     const char *arg;
1404     struct cache_features *cf = &ca->features;
1405     @@ -2344,14 +2344,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1406     while (argc--) {
1407     arg = dm_shift_arg(as);
1408    
1409     - if (!strcasecmp(arg, "writeback"))
1410     + if (!strcasecmp(arg, "writeback")) {
1411     cf->io_mode = CM_IO_WRITEBACK;
1412     + mode_ctr++;
1413     + }
1414    
1415     - else if (!strcasecmp(arg, "writethrough"))
1416     + else if (!strcasecmp(arg, "writethrough")) {
1417     cf->io_mode = CM_IO_WRITETHROUGH;
1418     + mode_ctr++;
1419     + }
1420    
1421     - else if (!strcasecmp(arg, "passthrough"))
1422     + else if (!strcasecmp(arg, "passthrough")) {
1423     cf->io_mode = CM_IO_PASSTHROUGH;
1424     + mode_ctr++;
1425     + }
1426    
1427     else if (!strcasecmp(arg, "metadata2"))
1428     cf->metadata_version = 2;
1429     @@ -2362,6 +2368,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1430     }
1431     }
1432    
1433     + if (mode_ctr > 1) {
1434     + *error = "Duplicate cache io_mode features requested";
1435     + return -EINVAL;
1436     + }
1437     +
1438     return 0;
1439     }
1440    
1441     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1442     index 07ca2fd10189..5018fb2352c2 100644
1443     --- a/drivers/md/raid5.c
1444     +++ b/drivers/md/raid5.c
1445     @@ -4516,6 +4516,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
1446     s->failed++;
1447     if (rdev && !test_bit(Faulty, &rdev->flags))
1448     do_recovery = 1;
1449     + else if (!rdev) {
1450     + rdev = rcu_dereference(
1451     + conf->disks[i].replacement);
1452     + if (rdev && !test_bit(Faulty, &rdev->flags))
1453     + do_recovery = 1;
1454     + }
1455     }
1456    
1457     if (test_bit(R5_InJournal, &dev->flags))
1458     diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
1459     index 2ab8d83e5576..fcfe658a4328 100644
1460     --- a/drivers/media/dvb-frontends/helene.c
1461     +++ b/drivers/media/dvb-frontends/helene.c
1462     @@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
1463     helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
1464    
1465     /* 0x81 - 0x94 */
1466     - data[0] = 0x18; /* xtal 24 MHz */
1467     + if (priv->xtal == SONY_HELENE_XTAL_16000)
1468     + data[0] = 0x10; /* xtal 16 MHz */
1469     + else
1470     + data[0] = 0x18; /* xtal 24 MHz */
1471     data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
1472     data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
1473     data[3] = 0x80; /* REFOUT signal output 500mVpp */
1474     diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
1475     index 56fe4e5b396e..4a65861433d6 100644
1476     --- a/drivers/media/platform/davinci/vpif_display.c
1477     +++ b/drivers/media/platform/davinci/vpif_display.c
1478     @@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
1479     return err;
1480     }
1481    
1482     +static void free_vpif_objs(void)
1483     +{
1484     + int i;
1485     +
1486     + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
1487     + kfree(vpif_obj.dev[i]);
1488     +}
1489     +
1490     static int vpif_async_bound(struct v4l2_async_notifier *notifier,
1491     struct v4l2_subdev *subdev,
1492     struct v4l2_async_subdev *asd)
1493     @@ -1250,11 +1258,6 @@ static __init int vpif_probe(struct platform_device *pdev)
1494     return -EINVAL;
1495     }
1496    
1497     - if (!pdev->dev.platform_data) {
1498     - dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
1499     - return -EINVAL;
1500     - }
1501     -
1502     vpif_dev = &pdev->dev;
1503     err = initialize_vpif();
1504    
1505     @@ -1266,7 +1269,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1506     err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
1507     if (err) {
1508     v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
1509     - return err;
1510     + goto vpif_free;
1511     }
1512    
1513     while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
1514     @@ -1309,7 +1312,10 @@ static __init int vpif_probe(struct platform_device *pdev)
1515     if (vpif_obj.sd[i])
1516     vpif_obj.sd[i]->grp_id = 1 << i;
1517     }
1518     - vpif_probe_complete();
1519     + err = vpif_probe_complete();
1520     + if (err) {
1521     + goto probe_subdev_out;
1522     + }
1523     } else {
1524     vpif_obj.notifier.subdevs = vpif_obj.config->asd;
1525     vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
1526     @@ -1330,6 +1336,8 @@ probe_subdev_out:
1527     kfree(vpif_obj.sd);
1528     vpif_unregister:
1529     v4l2_device_unregister(&vpif_obj.v4l2_dev);
1530     +vpif_free:
1531     + free_vpif_objs();
1532    
1533     return err;
1534     }
1535     @@ -1351,8 +1359,8 @@ static int vpif_remove(struct platform_device *device)
1536     ch = vpif_obj.dev[i];
1537     /* Unregister video device */
1538     video_unregister_device(&ch->video_dev);
1539     - kfree(vpif_obj.dev[i]);
1540     }
1541     + free_vpif_objs();
1542    
1543     return 0;
1544     }
1545     diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1546     index 64df82817de3..4882ee25bd75 100644
1547     --- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1548     +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
1549     @@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1550     !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
1551     return -ENOLINK;
1552    
1553     - dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
1554     - data_type;
1555     -
1556     if (tg->enabled) {
1557     /* Config Test Generator */
1558     struct v4l2_mbus_framefmt *f =
1559     @@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1560     writel_relaxed(val, csid->base +
1561     CAMSS_CSID_TG_DT_n_CGG_0(0));
1562    
1563     + dt = csid_get_fmt_entry(
1564     + csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
1565     +
1566     /* 5:0 data type */
1567     val = dt;
1568     writel_relaxed(val, csid->base +
1569     @@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1570     val = tg->payload_mode;
1571     writel_relaxed(val, csid->base +
1572     CAMSS_CSID_TG_DT_n_CGG_2(0));
1573     +
1574     + df = csid_get_fmt_entry(
1575     + csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
1576     } else {
1577     struct csid_phy_config *phy = &csid->phy;
1578    
1579     @@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
1580    
1581     writel_relaxed(val,
1582     csid->base + CAMSS_CSID_CORE_CTRL_1);
1583     +
1584     + dt = csid_get_fmt_entry(
1585     + csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
1586     + df = csid_get_fmt_entry(
1587     + csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
1588     }
1589    
1590     /* Config LUT */
1591    
1592     dt_shift = (cid % 4) * 8;
1593     - df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
1594     - decode_format;
1595    
1596     val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
1597     val &= ~(0xff << dt_shift);
1598     diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1599     index 8e9531f7f83f..9942932ecbf9 100644
1600     --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
1601     +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
1602     @@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
1603     static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
1604     {
1605     struct s5p_mfc_dev *dev = ctx->dev;
1606     - struct s5p_mfc_buf *dst_buf, *src_buf;
1607     - size_t dec_y_addr;
1608     + struct s5p_mfc_buf *dst_buf, *src_buf;
1609     + u32 dec_y_addr;
1610     unsigned int frame_type;
1611    
1612     /* Make sure we actually have a new frame before continuing. */
1613     frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
1614     if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
1615     return;
1616     - dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1617     + dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
1618    
1619     /* Copy timestamp / timecode from decoded src to dst and set
1620     appropriate flags. */
1621     src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1622     list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1623     - if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1624     - == dec_y_addr) {
1625     - dst_buf->b->timecode =
1626     - src_buf->b->timecode;
1627     + u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1628     +
1629     + if (addr == dec_y_addr) {
1630     + dst_buf->b->timecode = src_buf->b->timecode;
1631     dst_buf->b->vb2_buf.timestamp =
1632     src_buf->b->vb2_buf.timestamp;
1633     dst_buf->b->flags &=
1634     @@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1635     {
1636     struct s5p_mfc_dev *dev = ctx->dev;
1637     struct s5p_mfc_buf *dst_buf;
1638     - size_t dspl_y_addr;
1639     + u32 dspl_y_addr;
1640     unsigned int frame_type;
1641    
1642     - dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1643     + dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
1644     if (IS_MFCV6_PLUS(dev))
1645     frame_type = s5p_mfc_hw_call(dev->mfc_ops,
1646     get_disp_frame_type, ctx);
1647     @@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
1648     /* The MFC returns address of the buffer, now we have to
1649     * check which videobuf does it correspond to */
1650     list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
1651     + u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
1652     +
1653     /* Check if this is the buffer we're looking for */
1654     - if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
1655     - == dspl_y_addr) {
1656     + if (addr == dspl_y_addr) {
1657     list_del(&dst_buf->list);
1658     ctx->dst_queue_cnt--;
1659     dst_buf->b->sequence = ctx->sequence;
1660     diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
1661     index b421329b21fa..3d09e1c87921 100644
1662     --- a/drivers/media/usb/dvb-usb/dw2102.c
1663     +++ b/drivers/media/usb/dvb-usb/dw2102.c
1664     @@ -2103,14 +2103,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
1665     }
1666     };
1667    
1668     -static struct dvb_usb_device_properties *p1100;
1669     static const struct dvb_usb_device_description d1100 = {
1670     "Prof 1100 USB ",
1671     {&dw2102_table[PROF_1100], NULL},
1672     {NULL},
1673     };
1674    
1675     -static struct dvb_usb_device_properties *s660;
1676     static const struct dvb_usb_device_description d660 = {
1677     "TeVii S660 USB",
1678     {&dw2102_table[TEVII_S660], NULL},
1679     @@ -2129,14 +2127,12 @@ static const struct dvb_usb_device_description d480_2 = {
1680     {NULL},
1681     };
1682    
1683     -static struct dvb_usb_device_properties *p7500;
1684     static const struct dvb_usb_device_description d7500 = {
1685     "Prof 7500 USB DVB-S2",
1686     {&dw2102_table[PROF_7500], NULL},
1687     {NULL},
1688     };
1689    
1690     -static struct dvb_usb_device_properties *s421;
1691     static const struct dvb_usb_device_description d421 = {
1692     "TeVii S421 PCI",
1693     {&dw2102_table[TEVII_S421], NULL},
1694     @@ -2336,6 +2332,11 @@ static int dw2102_probe(struct usb_interface *intf,
1695     const struct usb_device_id *id)
1696     {
1697     int retval = -ENOMEM;
1698     + struct dvb_usb_device_properties *p1100;
1699     + struct dvb_usb_device_properties *s660;
1700     + struct dvb_usb_device_properties *p7500;
1701     + struct dvb_usb_device_properties *s421;
1702     +
1703     p1100 = kmemdup(&s6x0_properties,
1704     sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
1705     if (!p1100)
1706     @@ -2404,8 +2405,16 @@ static int dw2102_probe(struct usb_interface *intf,
1707     0 == dvb_usb_device_init(intf, &t220_properties,
1708     THIS_MODULE, NULL, adapter_nr) ||
1709     0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
1710     - THIS_MODULE, NULL, adapter_nr))
1711     + THIS_MODULE, NULL, adapter_nr)) {
1712     +
1713     + /* clean up copied properties */
1714     + kfree(s421);
1715     + kfree(p7500);
1716     + kfree(s660);
1717     + kfree(p1100);
1718     +
1719     return 0;
1720     + }
1721    
1722     retval = -ENODEV;
1723     kfree(s421);
1724     diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
1725     index 0f3fab47fe48..7dc1cbcd2fb8 100644
1726     --- a/drivers/mfd/ti_am335x_tscadc.c
1727     +++ b/drivers/mfd/ti_am335x_tscadc.c
1728     @@ -210,14 +210,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
1729     * The TSC_ADC_SS controller design assumes the OCP clock is
1730     * at least 6x faster than the ADC clock.
1731     */
1732     - clk = clk_get(&pdev->dev, "adc_tsc_fck");
1733     + clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
1734     if (IS_ERR(clk)) {
1735     dev_err(&pdev->dev, "failed to get TSC fck\n");
1736     err = PTR_ERR(clk);
1737     goto err_disable_clk;
1738     }
1739     clock_rate = clk_get_rate(clk);
1740     - clk_put(clk);
1741     tscadc->clk_div = clock_rate / ADC_CLK;
1742    
1743     /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
1744     diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
1745     index ddc9e4b08b5c..56efa9d18a9a 100644
1746     --- a/drivers/misc/mic/scif/scif_api.c
1747     +++ b/drivers/misc/mic/scif/scif_api.c
1748     @@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
1749     goto scif_bind_exit;
1750     }
1751     } else {
1752     - pn = scif_get_new_port();
1753     - if (!pn) {
1754     - ret = -ENOSPC;
1755     + ret = scif_get_new_port();
1756     + if (ret < 0)
1757     goto scif_bind_exit;
1758     - }
1759     + pn = ret;
1760     }
1761    
1762     ep->state = SCIFEP_BOUND;
1763     @@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
1764     err = -EISCONN;
1765     break;
1766     case SCIFEP_UNBOUND:
1767     - ep->port.port = scif_get_new_port();
1768     - if (!ep->port.port) {
1769     - err = -ENOSPC;
1770     - } else {
1771     - ep->port.node = scif_info.nodeid;
1772     - ep->conn_async_state = ASYNC_CONN_IDLE;
1773     - }
1774     + err = scif_get_new_port();
1775     + if (err < 0)
1776     + break;
1777     + ep->port.port = err;
1778     + ep->port.node = scif_info.nodeid;
1779     + ep->conn_async_state = ASYNC_CONN_IDLE;
1780     /* Fall through */
1781     case SCIFEP_BOUND:
1782     /*
1783     diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
1784     index b77aacafc3fc..dda3ed72d05b 100644
1785     --- a/drivers/misc/ti-st/st_kim.c
1786     +++ b/drivers/misc/ti-st/st_kim.c
1787     @@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
1788     err = gpio_request(kim_gdata->nshutdown, "kim");
1789     if (unlikely(err)) {
1790     pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
1791     - return err;
1792     + goto err_sysfs_group;
1793     }
1794    
1795     /* Configure nShutdown GPIO as output=0 */
1796     err = gpio_direction_output(kim_gdata->nshutdown, 0);
1797     if (unlikely(err)) {
1798     pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
1799     - return err;
1800     + goto err_sysfs_group;
1801     }
1802     /* get reference of pdev for request_firmware
1803     */
1804     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1805     index 23a6986d512b..a8f74d9bba4f 100644
1806     --- a/drivers/mtd/ubi/wl.c
1807     +++ b/drivers/mtd/ubi/wl.c
1808     @@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1809     cond_resched();
1810    
1811     e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1812     - if (!e)
1813     + if (!e) {
1814     + err = -ENOMEM;
1815     goto out_free;
1816     + }
1817    
1818     e->pnum = aeb->pnum;
1819     e->ec = aeb->ec;
1820     @@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1821     cond_resched();
1822    
1823     e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1824     - if (!e)
1825     + if (!e) {
1826     + err = -ENOMEM;
1827     goto out_free;
1828     + }
1829    
1830     e->pnum = aeb->pnum;
1831     e->ec = aeb->ec;
1832     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
1833     index 3deaa3413313..074a5b79d691 100644
1834     --- a/drivers/net/ethernet/marvell/mvneta.c
1835     +++ b/drivers/net/ethernet/marvell/mvneta.c
1836     @@ -3195,7 +3195,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
1837    
1838     on_each_cpu(mvneta_percpu_enable, pp, true);
1839     mvneta_start_dev(pp);
1840     - mvneta_port_up(pp);
1841    
1842     netdev_update_features(dev);
1843    
1844     diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
1845     index 0c5b68e7da51..9b3167054843 100644
1846     --- a/drivers/net/phy/mdio-mux-bcm-iproc.c
1847     +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
1848     @@ -22,7 +22,7 @@
1849     #include <linux/mdio-mux.h>
1850     #include <linux/delay.h>
1851    
1852     -#define MDIO_PARAM_OFFSET 0x00
1853     +#define MDIO_PARAM_OFFSET 0x23c
1854     #define MDIO_PARAM_MIIM_CYCLE 29
1855     #define MDIO_PARAM_INTERNAL_SEL 25
1856     #define MDIO_PARAM_BUS_ID 22
1857     @@ -30,20 +30,22 @@
1858     #define MDIO_PARAM_PHY_ID 16
1859     #define MDIO_PARAM_PHY_DATA 0
1860    
1861     -#define MDIO_READ_OFFSET 0x04
1862     +#define MDIO_READ_OFFSET 0x240
1863     #define MDIO_READ_DATA_MASK 0xffff
1864     -#define MDIO_ADDR_OFFSET 0x08
1865     +#define MDIO_ADDR_OFFSET 0x244
1866    
1867     -#define MDIO_CTRL_OFFSET 0x0C
1868     +#define MDIO_CTRL_OFFSET 0x248
1869     #define MDIO_CTRL_WRITE_OP 0x1
1870     #define MDIO_CTRL_READ_OP 0x2
1871    
1872     -#define MDIO_STAT_OFFSET 0x10
1873     +#define MDIO_STAT_OFFSET 0x24c
1874     #define MDIO_STAT_DONE 1
1875    
1876     #define BUS_MAX_ADDR 32
1877     #define EXT_BUS_START_ADDR 16
1878    
1879     +#define MDIO_REG_ADDR_SPACE_SIZE 0x250
1880     +
1881     struct iproc_mdiomux_desc {
1882     void *mux_handle;
1883     void __iomem *base;
1884     @@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
1885     md->dev = &pdev->dev;
1886    
1887     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1888     + if (res->start & 0xfff) {
1889     + /* For backward compatibility in case the
1890     + * base address is specified with an offset.
1891     + */
1892     + dev_info(&pdev->dev, "fix base address in dt-blob\n");
1893     + res->start &= ~0xfff;
1894     + res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
1895     + }
1896     md->base = devm_ioremap_resource(&pdev->dev, res);
1897     if (IS_ERR(md->base)) {
1898     dev_err(&pdev->dev, "failed to ioremap register\n");
1899     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1900     index cb17ffadfc30..e0baea2dfd3c 100644
1901     --- a/drivers/net/tun.c
1902     +++ b/drivers/net/tun.c
1903     @@ -534,14 +534,6 @@ static void tun_queue_purge(struct tun_file *tfile)
1904     skb_queue_purge(&tfile->sk.sk_error_queue);
1905     }
1906    
1907     -static void tun_cleanup_tx_array(struct tun_file *tfile)
1908     -{
1909     - if (tfile->tx_array.ring.queue) {
1910     - skb_array_cleanup(&tfile->tx_array);
1911     - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1912     - }
1913     -}
1914     -
1915     static void __tun_detach(struct tun_file *tfile, bool clean)
1916     {
1917     struct tun_file *ntfile;
1918     @@ -583,7 +575,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
1919     tun->dev->reg_state == NETREG_REGISTERED)
1920     unregister_netdevice(tun->dev);
1921     }
1922     - tun_cleanup_tx_array(tfile);
1923     + skb_array_cleanup(&tfile->tx_array);
1924     sock_put(&tfile->sk);
1925     }
1926     }
1927     @@ -623,13 +615,11 @@ static void tun_detach_all(struct net_device *dev)
1928     /* Drop read queue */
1929     tun_queue_purge(tfile);
1930     sock_put(&tfile->sk);
1931     - tun_cleanup_tx_array(tfile);
1932     }
1933     list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
1934     tun_enable_queue(tfile);
1935     tun_queue_purge(tfile);
1936     sock_put(&tfile->sk);
1937     - tun_cleanup_tx_array(tfile);
1938     }
1939     BUG_ON(tun->numdisabled != 0);
1940    
1941     @@ -675,7 +665,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1942     }
1943    
1944     if (!tfile->detached &&
1945     - skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1946     + skb_array_resize(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
1947     err = -ENOMEM;
1948     goto out;
1949     }
1950     @@ -2624,6 +2614,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1951     &tun_proto, 0);
1952     if (!tfile)
1953     return -ENOMEM;
1954     + if (skb_array_init(&tfile->tx_array, 0, GFP_KERNEL)) {
1955     + sk_free(&tfile->sk);
1956     + return -ENOMEM;
1957     + }
1958     +
1959     RCU_INIT_POINTER(tfile->tun, NULL);
1960     tfile->flags = 0;
1961     tfile->ifindex = 0;
1962     @@ -2644,8 +2639,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1963    
1964     sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
1965    
1966     - memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
1967     -
1968     return 0;
1969     }
1970    
1971     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
1972     index 52ebed1f55a1..6fa9c223ff93 100644
1973     --- a/drivers/net/wireless/ath/ath10k/mac.c
1974     +++ b/drivers/net/wireless/ath/ath10k/mac.c
1975     @@ -3074,6 +3074,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
1976     passive = channel->flags & IEEE80211_CHAN_NO_IR;
1977     ch->passive = passive;
1978    
1979     + /* the firmware is ignoring the "radar" flag of the
1980     + * channel and is scanning actively using Probe Requests
1981     + * on "Radar detection"/DFS channels which are not
1982     + * marked as "available"
1983     + */
1984     + ch->passive |= ch->chan_radar;
1985     +
1986     ch->freq = channel->center_freq;
1987     ch->band_center_freq1 = channel->center_freq;
1988     ch->min_power = 0;
1989     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
1990     index 7616c1c4bbd3..baec856af90f 100644
1991     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
1992     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
1993     @@ -1451,6 +1451,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1994     cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1995     cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1996     cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1997     + cfg->wmi_send_separate = __cpu_to_le32(0);
1998     + cfg->num_ocb_vdevs = __cpu_to_le32(0);
1999     + cfg->num_ocb_channels = __cpu_to_le32(0);
2000     + cfg->num_ocb_schedules = __cpu_to_le32(0);
2001     + cfg->host_capab = __cpu_to_le32(0);
2002    
2003     ath10k_wmi_put_host_mem_chunks(ar, chunks);
2004    
2005     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2006     index 22cf011e839a..e75bba0bbf67 100644
2007     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2008     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2009     @@ -1228,6 +1228,11 @@ struct wmi_tlv_resource_config {
2010     __le32 keep_alive_pattern_size;
2011     __le32 max_tdls_concurrent_sleep_sta;
2012     __le32 max_tdls_concurrent_buffer_sta;
2013     + __le32 wmi_send_separate;
2014     + __le32 num_ocb_vdevs;
2015     + __le32 num_ocb_channels;
2016     + __le32 num_ocb_schedules;
2017     + __le32 host_capab;
2018     } __packed;
2019    
2020     struct wmi_tlv_init_cmd {
2021     diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2022     index 8c5c2dd8fa7f..a7f506eb7b36 100644
2023     --- a/drivers/net/wireless/ath/ath9k/hw.c
2024     +++ b/drivers/net/wireless/ath/ath9k/hw.c
2025     @@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2026     struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2027     struct ieee80211_channel *channel;
2028     int chan_pwr, new_pwr;
2029     + u16 ctl = NO_CTL;
2030    
2031     if (!chan)
2032     return;
2033    
2034     + if (!test)
2035     + ctl = ath9k_regd_get_ctl(reg, chan);
2036     +
2037     channel = chan->chan;
2038     chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
2039     new_pwr = min_t(int, chan_pwr, reg->power_limit);
2040    
2041     - ah->eep_ops->set_txpower(ah, chan,
2042     - ath9k_regd_get_ctl(reg, chan),
2043     + ah->eep_ops->set_txpower(ah, chan, ctl,
2044     get_antenna_gain(ah, chan), new_pwr, test);
2045     }
2046    
2047     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2048     index d8b041f48ca8..fa64c1cc94ae 100644
2049     --- a/drivers/net/wireless/ath/ath9k/xmit.c
2050     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2051     @@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
2052     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2053     struct ieee80211_sta *sta = info->status.status_driver_data[0];
2054    
2055     - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
2056     + if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
2057     + IEEE80211_TX_STATUS_EOSP)) {
2058     ieee80211_tx_status(hw, skb);
2059     return;
2060     }
2061     diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
2062     index 0f15696195f8..078a4940bc5c 100644
2063     --- a/drivers/net/wireless/ti/wlcore/rx.c
2064     +++ b/drivers/net/wireless/ti/wlcore/rx.c
2065     @@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
2066     static void wl1271_rx_status(struct wl1271 *wl,
2067     struct wl1271_rx_descriptor *desc,
2068     struct ieee80211_rx_status *status,
2069     - u8 beacon)
2070     + u8 beacon, u8 probe_rsp)
2071     {
2072     memset(status, 0, sizeof(struct ieee80211_rx_status));
2073    
2074     @@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
2075     }
2076     }
2077    
2078     + if (beacon || probe_rsp)
2079     + status->boottime_ns = ktime_get_boot_ns();
2080     +
2081     if (beacon)
2082     wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
2083     status->band);
2084     @@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
2085     if (ieee80211_is_data_present(hdr->frame_control))
2086     is_data = 1;
2087    
2088     - wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
2089     + wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
2090     + ieee80211_is_probe_resp(hdr->frame_control));
2091     wlcore_hw_set_rx_csum(wl, desc, skb);
2092    
2093     seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
2094     diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
2095     index af81b2dec42e..620f5b995a12 100644
2096     --- a/drivers/pci/switch/switchtec.c
2097     +++ b/drivers/pci/switch/switchtec.c
2098     @@ -24,6 +24,8 @@
2099     #include <linux/cdev.h>
2100     #include <linux/wait.h>
2101    
2102     +#include <linux/nospec.h>
2103     +
2104     MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
2105     MODULE_VERSION("0.1");
2106     MODULE_LICENSE("GPL");
2107     @@ -1173,6 +1175,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
2108     default:
2109     if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
2110     return -EINVAL;
2111     + p.port = array_index_nospec(p.port,
2112     + ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
2113     p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
2114     break;
2115     }
2116     diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
2117     index 6e472691d8ee..17f2c5a505b2 100644
2118     --- a/drivers/pinctrl/freescale/pinctrl-imx.c
2119     +++ b/drivers/pinctrl/freescale/pinctrl-imx.c
2120     @@ -389,7 +389,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
2121     const char *name;
2122     int i, ret;
2123    
2124     - if (group > pctldev->num_groups)
2125     + if (group >= pctldev->num_groups)
2126     return;
2127    
2128     seq_printf(s, "\n");
2129     diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
2130     index 433af328d981..b78f42abff2f 100644
2131     --- a/drivers/pinctrl/pinctrl-amd.c
2132     +++ b/drivers/pinctrl/pinctrl-amd.c
2133     @@ -530,7 +530,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
2134     /* Each status bit covers four pins */
2135     for (i = 0; i < 4; i++) {
2136     regval = readl(regs + i);
2137     - if (!(regval & PIN_IRQ_PENDING))
2138     + if (!(regval & PIN_IRQ_PENDING) ||
2139     + !(regval & BIT(INTERRUPT_MASK_OFF)))
2140     continue;
2141     irq = irq_find_mapping(gc->irqdomain, irqnr + i);
2142     generic_handle_irq(irq);
2143     diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
2144     index dffa3aab7178..cec4c3223044 100644
2145     --- a/drivers/rpmsg/rpmsg_core.c
2146     +++ b/drivers/rpmsg/rpmsg_core.c
2147     @@ -23,6 +23,7 @@
2148     #include <linux/module.h>
2149     #include <linux/rpmsg.h>
2150     #include <linux/of_device.h>
2151     +#include <linux/pm_domain.h>
2152     #include <linux/slab.h>
2153    
2154     #include "rpmsg_internal.h"
2155     @@ -418,6 +419,10 @@ static int rpmsg_dev_probe(struct device *dev)
2156     struct rpmsg_endpoint *ept = NULL;
2157     int err;
2158    
2159     + err = dev_pm_domain_attach(dev, true);
2160     + if (err)
2161     + goto out;
2162     +
2163     if (rpdrv->callback) {
2164     strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
2165     chinfo.src = rpdev->src;
2166     @@ -459,6 +464,8 @@ static int rpmsg_dev_remove(struct device *dev)
2167    
2168     rpdrv->remove(rpdev);
2169    
2170     + dev_pm_domain_detach(dev, true);
2171     +
2172     if (rpdev->ept)
2173     rpmsg_destroy_ept(rpdev->ept);
2174    
2175     diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
2176     index a1388842e17e..dd342207095a 100644
2177     --- a/drivers/scsi/3w-9xxx.c
2178     +++ b/drivers/scsi/3w-9xxx.c
2179     @@ -2042,6 +2042,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2180    
2181     if (twa_initialize_device_extension(tw_dev)) {
2182     TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2183     + retval = -ENOMEM;
2184     goto out_free_device_extension;
2185     }
2186    
2187     @@ -2064,6 +2065,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2188     tw_dev->base_addr = ioremap(mem_addr, mem_len);
2189     if (!tw_dev->base_addr) {
2190     TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2191     + retval = -ENOMEM;
2192     goto out_release_mem_region;
2193     }
2194    
2195     @@ -2071,8 +2073,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2196     TW_DISABLE_INTERRUPTS(tw_dev);
2197    
2198     /* Initialize the card */
2199     - if (twa_reset_sequence(tw_dev, 0))
2200     + if (twa_reset_sequence(tw_dev, 0)) {
2201     + retval = -ENOMEM;
2202     goto out_iounmap;
2203     + }
2204    
2205     /* Set host specific parameters */
2206     if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2207     diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
2208     index b150e131b2e7..aa317d6909e8 100644
2209     --- a/drivers/scsi/3w-sas.c
2210     +++ b/drivers/scsi/3w-sas.c
2211     @@ -1597,6 +1597,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2212    
2213     if (twl_initialize_device_extension(tw_dev)) {
2214     TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
2215     + retval = -ENOMEM;
2216     goto out_free_device_extension;
2217     }
2218    
2219     @@ -1611,6 +1612,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2220     tw_dev->base_addr = pci_iomap(pdev, 1, 0);
2221     if (!tw_dev->base_addr) {
2222     TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
2223     + retval = -ENOMEM;
2224     goto out_release_mem_region;
2225     }
2226    
2227     @@ -1620,6 +1622,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2228     /* Initialize the card */
2229     if (twl_reset_sequence(tw_dev, 0)) {
2230     TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
2231     + retval = -ENOMEM;
2232     goto out_iounmap;
2233     }
2234    
2235     diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
2236     index f6179e3d6953..961ea6f7def8 100644
2237     --- a/drivers/scsi/3w-xxxx.c
2238     +++ b/drivers/scsi/3w-xxxx.c
2239     @@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2240    
2241     if (tw_initialize_device_extension(tw_dev)) {
2242     printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
2243     + retval = -ENOMEM;
2244     goto out_free_device_extension;
2245     }
2246    
2247     @@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2248     tw_dev->base_addr = pci_resource_start(pdev, 0);
2249     if (!tw_dev->base_addr) {
2250     printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
2251     + retval = -ENOMEM;
2252     goto out_release_mem_region;
2253     }
2254    
2255     diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
2256     index 8eb3f96fe068..bc61cc8bc6f0 100644
2257     --- a/drivers/scsi/lpfc/lpfc.h
2258     +++ b/drivers/scsi/lpfc/lpfc.h
2259     @@ -676,7 +676,7 @@ struct lpfc_hba {
2260     #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
2261     #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
2262     #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
2263     -#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
2264     +#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
2265    
2266     uint32_t hba_flag; /* hba generic flags */
2267     #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
2268     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2269     index e6d51135d105..0d0be7d8b9d6 100644
2270     --- a/drivers/target/target_core_transport.c
2271     +++ b/drivers/target/target_core_transport.c
2272     @@ -317,6 +317,7 @@ void __transport_register_session(
2273     {
2274     const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
2275     unsigned char buf[PR_REG_ISID_LEN];
2276     + unsigned long flags;
2277    
2278     se_sess->se_tpg = se_tpg;
2279     se_sess->fabric_sess_ptr = fabric_sess_ptr;
2280     @@ -353,7 +354,7 @@ void __transport_register_session(
2281     se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
2282     }
2283    
2284     - spin_lock_irq(&se_nacl->nacl_sess_lock);
2285     + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
2286     /*
2287     * The se_nacl->nacl_sess pointer will be set to the
2288     * last active I_T Nexus for each struct se_node_acl.
2289     @@ -362,7 +363,7 @@ void __transport_register_session(
2290    
2291     list_add_tail(&se_sess->sess_acl_list,
2292     &se_nacl->acl_sess_list);
2293     - spin_unlock_irq(&se_nacl->nacl_sess_lock);
2294     + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
2295     }
2296     list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
2297    
2298     diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
2299     index 20d79a6007d5..070733ca94d5 100644
2300     --- a/drivers/tty/rocket.c
2301     +++ b/drivers/tty/rocket.c
2302     @@ -1894,7 +1894,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
2303     ByteIO_t UPCIRingInd = 0;
2304    
2305     if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
2306     - pci_enable_device(dev))
2307     + pci_enable_device(dev) || i >= NUM_BOARDS)
2308     return 0;
2309    
2310     rcktpt_io_addr[i] = pci_resource_start(dev, 0);
2311     diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
2312     index ff04b7f8549f..41784798c789 100644
2313     --- a/drivers/uio/uio.c
2314     +++ b/drivers/uio/uio.c
2315     @@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner,
2316     if (ret)
2317     goto err_uio_dev_add_attributes;
2318    
2319     - info->uio_dev = idev;
2320     -
2321     if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
2322     /*
2323     * Note that we deliberately don't use devm_request_irq
2324     @@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner,
2325     goto err_request_irq;
2326     }
2327    
2328     + info->uio_dev = idev;
2329     return 0;
2330    
2331     err_request_irq:
2332     diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
2333     index 4737615f0eaa..ce696d6c4641 100644
2334     --- a/fs/autofs4/autofs_i.h
2335     +++ b/fs/autofs4/autofs_i.h
2336     @@ -26,6 +26,7 @@
2337     #include <linux/list.h>
2338     #include <linux/completion.h>
2339     #include <asm/current.h>
2340     +#include <linux/magic.h>
2341    
2342     /* This is the range of ioctl() numbers we claim as ours */
2343     #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
2344     @@ -124,7 +125,8 @@ struct autofs_sb_info {
2345    
2346     static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
2347     {
2348     - return (struct autofs_sb_info *)(sb->s_fs_info);
2349     + return sb->s_magic != AUTOFS_SUPER_MAGIC ?
2350     + NULL : (struct autofs_sb_info *)(sb->s_fs_info);
2351     }
2352    
2353     static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
2354     diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
2355     index 09e7d68dff02..3c7e727612fa 100644
2356     --- a/fs/autofs4/inode.c
2357     +++ b/fs/autofs4/inode.c
2358     @@ -14,7 +14,6 @@
2359     #include <linux/pagemap.h>
2360     #include <linux/parser.h>
2361     #include <linux/bitops.h>
2362     -#include <linux/magic.h>
2363     #include "autofs_i.h"
2364     #include <linux/module.h>
2365    
2366     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2367     index 7303ba108112..a507c0d25354 100644
2368     --- a/fs/btrfs/ioctl.c
2369     +++ b/fs/btrfs/ioctl.c
2370     @@ -3158,6 +3158,25 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2371    
2372     same_lock_start = min_t(u64, loff, dst_loff);
2373     same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
2374     + } else {
2375     + /*
2376     + * If the source and destination inodes are different, the
2377     + * source's range end offset matches the source's i_size, that
2378     + * i_size is not a multiple of the sector size, and the
2379     + * destination range does not go past the destination's i_size,
2380     + * we must round down the length to the nearest sector size
2381     + * multiple. If we don't do this adjustment we end replacing
2382     + * with zeroes the bytes in the range that starts at the
2383     + * deduplication range's end offset and ends at the next sector
2384     + * size multiple.
2385     + */
2386     + if (loff + olen == i_size_read(src) &&
2387     + dst_loff + len < i_size_read(dst)) {
2388     + const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
2389     +
2390     + len = round_down(i_size_read(src), sz) - loff;
2391     + olen = len;
2392     + }
2393     }
2394    
2395     /* don't make the dst file partly checksummed */
2396     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2397     index caf9cf91b825..2cd0b3053439 100644
2398     --- a/fs/cifs/inode.c
2399     +++ b/fs/cifs/inode.c
2400     @@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
2401     oparms.cifs_sb = cifs_sb;
2402     oparms.desired_access = GENERIC_READ;
2403     oparms.create_options = CREATE_NOT_DIR;
2404     + if (backup_cred(cifs_sb))
2405     + oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
2406     oparms.disposition = FILE_OPEN;
2407     oparms.path = path;
2408     oparms.fid = &fid;
2409     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2410     index e9f246fe9d80..759cbbf7b1af 100644
2411     --- a/fs/cifs/smb2ops.c
2412     +++ b/fs/cifs/smb2ops.c
2413     @@ -385,7 +385,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
2414     oparms.tcon = tcon;
2415     oparms.desired_access = FILE_READ_ATTRIBUTES;
2416     oparms.disposition = FILE_OPEN;
2417     - oparms.create_options = 0;
2418     + if (backup_cred(cifs_sb))
2419     + oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2420     + else
2421     + oparms.create_options = 0;
2422     oparms.fid = &fid;
2423     oparms.reconnect = false;
2424    
2425     @@ -534,7 +537,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
2426     oparms.tcon = tcon;
2427     oparms.desired_access = FILE_READ_EA;
2428     oparms.disposition = FILE_OPEN;
2429     - oparms.create_options = 0;
2430     + if (backup_cred(cifs_sb))
2431     + oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2432     + else
2433     + oparms.create_options = 0;
2434     oparms.fid = &fid;
2435     oparms.reconnect = false;
2436    
2437     @@ -613,7 +619,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2438     oparms.tcon = tcon;
2439     oparms.desired_access = FILE_WRITE_EA;
2440     oparms.disposition = FILE_OPEN;
2441     - oparms.create_options = 0;
2442     + if (backup_cred(cifs_sb))
2443     + oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2444     + else
2445     + oparms.create_options = 0;
2446     oparms.fid = &fid;
2447     oparms.reconnect = false;
2448    
2449     @@ -1215,7 +1224,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2450     oparms.tcon = tcon;
2451     oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2452     oparms.disposition = FILE_OPEN;
2453     - oparms.create_options = 0;
2454     + if (backup_cred(cifs_sb))
2455     + oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2456     + else
2457     + oparms.create_options = 0;
2458     oparms.fid = fid;
2459     oparms.reconnect = false;
2460    
2461     @@ -1491,7 +1503,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2462     oparms.tcon = tcon;
2463     oparms.desired_access = FILE_READ_ATTRIBUTES;
2464     oparms.disposition = FILE_OPEN;
2465     - oparms.create_options = 0;
2466     + if (backup_cred(cifs_sb))
2467     + oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2468     + else
2469     + oparms.create_options = 0;
2470     oparms.fid = &fid;
2471     oparms.reconnect = false;
2472    
2473     @@ -3200,7 +3215,7 @@ struct smb_version_values smb21_values = {
2474     struct smb_version_values smb3any_values = {
2475     .version_string = SMB3ANY_VERSION_STRING,
2476     .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2477     - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2478     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2479     .large_lock_type = 0,
2480     .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2481     .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2482     @@ -3220,7 +3235,7 @@ struct smb_version_values smb3any_values = {
2483     struct smb_version_values smbdefault_values = {
2484     .version_string = SMBDEFAULT_VERSION_STRING,
2485     .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2486     - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2487     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2488     .large_lock_type = 0,
2489     .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2490     .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2491     @@ -3240,7 +3255,7 @@ struct smb_version_values smbdefault_values = {
2492     struct smb_version_values smb30_values = {
2493     .version_string = SMB30_VERSION_STRING,
2494     .protocol_id = SMB30_PROT_ID,
2495     - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2496     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2497     .large_lock_type = 0,
2498     .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2499     .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2500     @@ -3260,7 +3275,7 @@ struct smb_version_values smb30_values = {
2501     struct smb_version_values smb302_values = {
2502     .version_string = SMB302_VERSION_STRING,
2503     .protocol_id = SMB302_PROT_ID,
2504     - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2505     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2506     .large_lock_type = 0,
2507     .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2508     .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2509     @@ -3281,7 +3296,7 @@ struct smb_version_values smb302_values = {
2510     struct smb_version_values smb311_values = {
2511     .version_string = SMB311_VERSION_STRING,
2512     .protocol_id = SMB311_PROT_ID,
2513     - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2514     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
2515     .large_lock_type = 0,
2516     .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2517     .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2518     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2519     index 58842b36481d..078ec705a5cc 100644
2520     --- a/fs/cifs/smb2pdu.c
2521     +++ b/fs/cifs/smb2pdu.c
2522     @@ -1816,6 +1816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2523     if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
2524     *oplock == SMB2_OPLOCK_LEVEL_NONE)
2525     req->RequestedOplockLevel = *oplock;
2526     + else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2527     + (oparms->create_options & CREATE_NOT_FILE))
2528     + req->RequestedOplockLevel = *oplock; /* no srv lease support */
2529     else {
2530     rc = add_lease_context(server, iov, &n_iov, oplock);
2531     if (rc) {
2532     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2533     index 3b34004a71c1..54f8520ad7a2 100644
2534     --- a/fs/f2fs/f2fs.h
2535     +++ b/fs/f2fs/f2fs.h
2536     @@ -1766,8 +1766,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2537     pgoff_t index, bool for_write)
2538     {
2539     #ifdef CONFIG_F2FS_FAULT_INJECTION
2540     - struct page *page = find_lock_page(mapping, index);
2541     + struct page *page;
2542    
2543     + if (!for_write)
2544     + page = find_get_page_flags(mapping, index,
2545     + FGP_LOCK | FGP_ACCESSED);
2546     + else
2547     + page = find_lock_page(mapping, index);
2548     if (page)
2549     return page;
2550    
2551     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2552     index 87e654c53c31..6f589730782d 100644
2553     --- a/fs/f2fs/file.c
2554     +++ b/fs/f2fs/file.c
2555     @@ -1803,7 +1803,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2556     struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2557     struct super_block *sb = sbi->sb;
2558     __u32 in;
2559     - int ret;
2560     + int ret = 0;
2561    
2562     if (!capable(CAP_SYS_ADMIN))
2563     return -EPERM;
2564     diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
2565     index f2f897cd23c9..f22884418e92 100644
2566     --- a/fs/f2fs/gc.c
2567     +++ b/fs/f2fs/gc.c
2568     @@ -958,7 +958,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
2569     goto next;
2570    
2571     sum = page_address(sum_page);
2572     - f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
2573     + if (type != GET_SUM_TYPE((&sum->footer))) {
2574     + f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
2575     + "type [%d, %d] in SSA and SIT",
2576     + segno, type, GET_SUM_TYPE((&sum->footer)));
2577     + set_sbi_flag(sbi, SBI_NEED_FSCK);
2578     + goto next;
2579     + }
2580    
2581     /*
2582     * this is to avoid deadlock:
2583     diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
2584     index 8322e4e7bb3f..888a9dc13677 100644
2585     --- a/fs/f2fs/inline.c
2586     +++ b/fs/f2fs/inline.c
2587     @@ -128,6 +128,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
2588     if (err)
2589     return err;
2590    
2591     + if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
2592     + f2fs_put_dnode(dn);
2593     + set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
2594     + f2fs_msg(fio.sbi->sb, KERN_WARNING,
2595     + "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2596     + "run fsck to fix.",
2597     + __func__, dn->inode->i_ino, dn->data_blkaddr);
2598     + return -EINVAL;
2599     + }
2600     +
2601     f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
2602    
2603     read_inline_data(page, dn->inode_page);
2604     @@ -365,6 +375,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
2605     if (err)
2606     goto out;
2607    
2608     + if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
2609     + f2fs_put_dnode(&dn);
2610     + set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
2611     + f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
2612     + "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
2613     + "run fsck to fix.",
2614     + __func__, dir->i_ino, dn.data_blkaddr);
2615     + err = -EINVAL;
2616     + goto out;
2617     + }
2618     +
2619     f2fs_wait_on_page_writeback(page, DATA, true);
2620     zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
2621    
2622     @@ -481,6 +502,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
2623     return 0;
2624     recover:
2625     lock_page(ipage);
2626     + f2fs_wait_on_page_writeback(ipage, NODE, true);
2627     memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
2628     f2fs_i_depth_write(dir, 0);
2629     f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
2630     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2631     index f623da26159f..712505ec5de4 100644
2632     --- a/fs/f2fs/node.c
2633     +++ b/fs/f2fs/node.c
2634     @@ -1610,7 +1610,9 @@ next_step:
2635     !is_cold_node(page)))
2636     continue;
2637     lock_node:
2638     - if (!trylock_page(page))
2639     + if (wbc->sync_mode == WB_SYNC_ALL)
2640     + lock_page(page);
2641     + else if (!trylock_page(page))
2642     continue;
2643    
2644     if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2645     diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
2646     index 39ada30889b6..4dfb5080098f 100644
2647     --- a/fs/f2fs/segment.h
2648     +++ b/fs/f2fs/segment.h
2649     @@ -414,6 +414,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2650     if (test_and_clear_bit(segno, free_i->free_segmap)) {
2651     free_i->free_segments++;
2652    
2653     + if (IS_CURSEC(sbi, secno))
2654     + goto skip_free;
2655     next = find_next_bit(free_i->free_segmap,
2656     start_segno + sbi->segs_per_sec, start_segno);
2657     if (next >= start_segno + sbi->segs_per_sec) {
2658     @@ -421,6 +423,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
2659     free_i->free_sections++;
2660     }
2661     }
2662     +skip_free:
2663     spin_unlock(&free_i->segmap_lock);
2664     }
2665    
2666     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2667     index 400c00058bad..eae35909fa51 100644
2668     --- a/fs/f2fs/super.c
2669     +++ b/fs/f2fs/super.c
2670     @@ -1883,12 +1883,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2671     struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2672     unsigned int ovp_segments, reserved_segments;
2673     unsigned int main_segs, blocks_per_seg;
2674     + unsigned int sit_segs, nat_segs;
2675     + unsigned int sit_bitmap_size, nat_bitmap_size;
2676     + unsigned int log_blocks_per_seg;
2677     int i;
2678    
2679     total = le32_to_cpu(raw_super->segment_count);
2680     fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2681     - fsmeta += le32_to_cpu(raw_super->segment_count_sit);
2682     - fsmeta += le32_to_cpu(raw_super->segment_count_nat);
2683     + sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2684     + fsmeta += sit_segs;
2685     + nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2686     + fsmeta += nat_segs;
2687     fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2688     fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2689    
2690     @@ -1919,6 +1924,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2691     return 1;
2692     }
2693    
2694     + sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2695     + nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2696     + log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2697     +
2698     + if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2699     + nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2700     + f2fs_msg(sbi->sb, KERN_ERR,
2701     + "Wrong bitmap size: sit: %u, nat:%u",
2702     + sit_bitmap_size, nat_bitmap_size);
2703     + return 1;
2704     + }
2705     +
2706     if (unlikely(f2fs_cp_error(sbi))) {
2707     f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2708     return 1;
2709     diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
2710     index e2c258f717cd..93af9d7dfcdc 100644
2711     --- a/fs/f2fs/sysfs.c
2712     +++ b/fs/f2fs/sysfs.c
2713     @@ -9,6 +9,7 @@
2714     * it under the terms of the GNU General Public License version 2 as
2715     * published by the Free Software Foundation.
2716     */
2717     +#include <linux/compiler.h>
2718     #include <linux/proc_fs.h>
2719     #include <linux/f2fs_fs.h>
2720     #include <linux/seq_file.h>
2721     @@ -381,7 +382,8 @@ static struct kobject f2fs_feat = {
2722     .kset = &f2fs_kset,
2723     };
2724    
2725     -static int segment_info_seq_show(struct seq_file *seq, void *offset)
2726     +static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
2727     + void *offset)
2728     {
2729     struct super_block *sb = seq->private;
2730     struct f2fs_sb_info *sbi = F2FS_SB(sb);
2731     @@ -408,7 +410,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
2732     return 0;
2733     }
2734    
2735     -static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2736     +static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
2737     + void *offset)
2738     {
2739     struct super_block *sb = seq->private;
2740     struct f2fs_sb_info *sbi = F2FS_SB(sb);
2741     @@ -432,7 +435,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
2742     return 0;
2743     }
2744    
2745     -static int iostat_info_seq_show(struct seq_file *seq, void *offset)
2746     +static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
2747     + void *offset)
2748     {
2749     struct super_block *sb = seq->private;
2750     struct f2fs_sb_info *sbi = F2FS_SB(sb);
2751     diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
2752     index 2c3f398995f6..b8d55da2f04d 100644
2753     --- a/fs/nfs/callback_proc.c
2754     +++ b/fs/nfs/callback_proc.c
2755     @@ -213,9 +213,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
2756     {
2757     u32 oldseq, newseq;
2758    
2759     - /* Is the stateid still not initialised? */
2760     + /* Is the stateid not initialised? */
2761     if (!pnfs_layout_is_valid(lo))
2762     - return NFS4ERR_DELAY;
2763     + return NFS4ERR_NOMATCHING_LAYOUT;
2764    
2765     /* Mismatched stateid? */
2766     if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
2767     diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
2768     index 123c069429a7..57de914630bc 100644
2769     --- a/fs/nfs/callback_xdr.c
2770     +++ b/fs/nfs/callback_xdr.c
2771     @@ -904,16 +904,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
2772    
2773     if (hdr_arg.minorversion == 0) {
2774     cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
2775     - if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
2776     + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
2777     + if (cps.clp)
2778     + nfs_put_client(cps.clp);
2779     goto out_invalidcred;
2780     + }
2781     }
2782    
2783     cps.minorversion = hdr_arg.minorversion;
2784     hdr_res.taglen = hdr_arg.taglen;
2785     hdr_res.tag = hdr_arg.tag;
2786     - if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
2787     + if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
2788     + if (cps.clp)
2789     + nfs_put_client(cps.clp);
2790     return rpc_system_err;
2791     -
2792     + }
2793     while (status == 0 && nops != hdr_arg.nops) {
2794     status = process_op(nops, rqstp, &xdr_in,
2795     rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
2796     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2797     index 9f0bb908e2b5..e41ef532c4ce 100644
2798     --- a/include/linux/mm_types.h
2799     +++ b/include/linux/mm_types.h
2800     @@ -354,7 +354,7 @@ struct kioctx_table;
2801     struct mm_struct {
2802     struct vm_area_struct *mmap; /* list of VMAs */
2803     struct rb_root mm_rb;
2804     - u32 vmacache_seqnum; /* per-thread vmacache */
2805     + u64 vmacache_seqnum; /* per-thread vmacache */
2806     #ifdef CONFIG_MMU
2807     unsigned long (*get_unmapped_area) (struct file *filp,
2808     unsigned long addr, unsigned long len,
2809     diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
2810     index 5fe87687664c..d7016dcb245e 100644
2811     --- a/include/linux/mm_types_task.h
2812     +++ b/include/linux/mm_types_task.h
2813     @@ -32,7 +32,7 @@
2814     #define VMACACHE_MASK (VMACACHE_SIZE - 1)
2815    
2816     struct vmacache {
2817     - u32 seqnum;
2818     + u64 seqnum;
2819     struct vm_area_struct *vmas[VMACACHE_SIZE];
2820     };
2821    
2822     diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
2823     index 7fd514f36e74..a4be6388a980 100644
2824     --- a/include/linux/rhashtable.h
2825     +++ b/include/linux/rhashtable.h
2826     @@ -152,25 +152,25 @@ struct rhashtable_params {
2827     /**
2828     * struct rhashtable - Hash table handle
2829     * @tbl: Bucket table
2830     - * @nelems: Number of elements in table
2831     * @key_len: Key length for hashfn
2832     - * @p: Configuration parameters
2833     * @max_elems: Maximum number of elements in table
2834     + * @p: Configuration parameters
2835     * @rhlist: True if this is an rhltable
2836     * @run_work: Deferred worker to expand/shrink asynchronously
2837     * @mutex: Mutex to protect current/future table swapping
2838     * @lock: Spin lock to protect walker list
2839     + * @nelems: Number of elements in table
2840     */
2841     struct rhashtable {
2842     struct bucket_table __rcu *tbl;
2843     - atomic_t nelems;
2844     unsigned int key_len;
2845     - struct rhashtable_params p;
2846     unsigned int max_elems;
2847     + struct rhashtable_params p;
2848     bool rhlist;
2849     struct work_struct run_work;
2850     struct mutex mutex;
2851     spinlock_t lock;
2852     + atomic_t nelems;
2853     };
2854    
2855     /**
2856     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2857     index 6dd77767fd5b..f64e88444082 100644
2858     --- a/include/linux/skbuff.h
2859     +++ b/include/linux/skbuff.h
2860     @@ -663,21 +663,26 @@ struct sk_buff {
2861     struct sk_buff *prev;
2862    
2863     union {
2864     - ktime_t tstamp;
2865     - u64 skb_mstamp;
2866     + struct net_device *dev;
2867     + /* Some protocols might use this space to store information,
2868     + * while device pointer would be NULL.
2869     + * UDP receive path is one user.
2870     + */
2871     + unsigned long dev_scratch;
2872     };
2873     };
2874     - struct rb_node rbnode; /* used in netem & tcp stack */
2875     + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
2876     + struct list_head list;
2877     };
2878     - struct sock *sk;
2879    
2880     union {
2881     - struct net_device *dev;
2882     - /* Some protocols might use this space to store information,
2883     - * while device pointer would be NULL.
2884     - * UDP receive path is one user.
2885     - */
2886     - unsigned long dev_scratch;
2887     + struct sock *sk;
2888     + int ip_defrag_offset;
2889     + };
2890     +
2891     + union {
2892     + ktime_t tstamp;
2893     + u64 skb_mstamp;
2894     };
2895     /*
2896     * This is the control buffer. It is free to use for every
2897     @@ -2580,7 +2585,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2898     kfree_skb(skb);
2899     }
2900    
2901     -void skb_rbtree_purge(struct rb_root *root);
2902     +unsigned int skb_rbtree_purge(struct rb_root *root);
2903    
2904     void *netdev_alloc_frag(unsigned int fragsz);
2905    
2906     @@ -3134,6 +3139,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
2907     return skb->data;
2908     }
2909    
2910     +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
2911     /**
2912     * pskb_trim_rcsum - trim received skb and update checksum
2913     * @skb: buffer to trim
2914     @@ -3147,9 +3153,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2915     {
2916     if (likely(len >= skb->len))
2917     return 0;
2918     - if (skb->ip_summed == CHECKSUM_COMPLETE)
2919     - skb->ip_summed = CHECKSUM_NONE;
2920     - return __pskb_trim(skb, len);
2921     + return pskb_trim_rcsum_slow(skb, len);
2922     }
2923    
2924     static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2925     @@ -3169,6 +3173,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2926    
2927     #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2928    
2929     +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
2930     +#define skb_rb_first(root) rb_to_skb(rb_first(root))
2931     +#define skb_rb_last(root) rb_to_skb(rb_last(root))
2932     +#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
2933     +#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
2934     +
2935     #define skb_queue_walk(queue, skb) \
2936     for (skb = (queue)->next; \
2937     skb != (struct sk_buff *)(queue); \
2938     @@ -3183,6 +3193,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2939     for (; skb != (struct sk_buff *)(queue); \
2940     skb = skb->next)
2941    
2942     +#define skb_rbtree_walk(skb, root) \
2943     + for (skb = skb_rb_first(root); skb != NULL; \
2944     + skb = skb_rb_next(skb))
2945     +
2946     +#define skb_rbtree_walk_from(skb) \
2947     + for (; skb != NULL; \
2948     + skb = skb_rb_next(skb))
2949     +
2950     +#define skb_rbtree_walk_from_safe(skb, tmp) \
2951     + for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
2952     + skb = tmp)
2953     +
2954     #define skb_queue_walk_from_safe(queue, skb, tmp) \
2955     for (tmp = skb->next; \
2956     skb != (struct sk_buff *)(queue); \
2957     diff --git a/include/linux/tpm.h b/include/linux/tpm.h
2958     index 2a6c3d96b31f..7f7b29f86c59 100644
2959     --- a/include/linux/tpm.h
2960     +++ b/include/linux/tpm.h
2961     @@ -48,6 +48,8 @@ struct tpm_class_ops {
2962     u8 (*status) (struct tpm_chip *chip);
2963     bool (*update_timeouts)(struct tpm_chip *chip,
2964     unsigned long *timeout_cap);
2965     + int (*go_idle)(struct tpm_chip *chip);
2966     + int (*cmd_ready)(struct tpm_chip *chip);
2967     int (*request_locality)(struct tpm_chip *chip, int loc);
2968     int (*relinquish_locality)(struct tpm_chip *chip, int loc);
2969     void (*clk_enable)(struct tpm_chip *chip, bool value);
2970     diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
2971     index 5c7f010676a7..47a3441cf4c4 100644
2972     --- a/include/linux/vm_event_item.h
2973     +++ b/include/linux/vm_event_item.h
2974     @@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
2975     #ifdef CONFIG_DEBUG_VM_VMACACHE
2976     VMACACHE_FIND_CALLS,
2977     VMACACHE_FIND_HITS,
2978     - VMACACHE_FULL_FLUSHES,
2979     #endif
2980     #ifdef CONFIG_SWAP
2981     SWAP_RA,
2982     diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
2983     index a5b3aa8d281f..a09b28f76460 100644
2984     --- a/include/linux/vmacache.h
2985     +++ b/include/linux/vmacache.h
2986     @@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
2987     memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
2988     }
2989    
2990     -extern void vmacache_flush_all(struct mm_struct *mm);
2991     extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
2992     extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
2993     unsigned long addr);
2994     @@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
2995     static inline void vmacache_invalidate(struct mm_struct *mm)
2996     {
2997     mm->vmacache_seqnum++;
2998     -
2999     - /* deal with overflows */
3000     - if (unlikely(mm->vmacache_seqnum == 0))
3001     - vmacache_flush_all(mm);
3002     }
3003    
3004     #endif /* __LINUX_VMACACHE_H */
3005     diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
3006     index a6e4edd8d4a2..335cf7851f12 100644
3007     --- a/include/net/inet_frag.h
3008     +++ b/include/net/inet_frag.h
3009     @@ -2,14 +2,20 @@
3010     #ifndef __NET_FRAG_H__
3011     #define __NET_FRAG_H__
3012    
3013     +#include <linux/rhashtable.h>
3014     +
3015     struct netns_frags {
3016     - /* Keep atomic mem on separate cachelines in structs that include it */
3017     - atomic_t mem ____cacheline_aligned_in_smp;
3018     /* sysctls */
3019     + long high_thresh;
3020     + long low_thresh;
3021     int timeout;
3022     - int high_thresh;
3023     - int low_thresh;
3024     int max_dist;
3025     + struct inet_frags *f;
3026     +
3027     + struct rhashtable rhashtable ____cacheline_aligned_in_smp;
3028     +
3029     + /* Keep atomic mem on separate cachelines in structs that include it */
3030     + atomic_long_t mem ____cacheline_aligned_in_smp;
3031     };
3032    
3033     /**
3034     @@ -25,130 +31,115 @@ enum {
3035     INET_FRAG_COMPLETE = BIT(2),
3036     };
3037    
3038     +struct frag_v4_compare_key {
3039     + __be32 saddr;
3040     + __be32 daddr;
3041     + u32 user;
3042     + u32 vif;
3043     + __be16 id;
3044     + u16 protocol;
3045     +};
3046     +
3047     +struct frag_v6_compare_key {
3048     + struct in6_addr saddr;
3049     + struct in6_addr daddr;
3050     + u32 user;
3051     + __be32 id;
3052     + u32 iif;
3053     +};
3054     +
3055     /**
3056     * struct inet_frag_queue - fragment queue
3057     *
3058     - * @lock: spinlock protecting the queue
3059     + * @node: rhash node
3060     + * @key: keys identifying this frag.
3061     * @timer: queue expiration timer
3062     - * @list: hash bucket list
3063     + * @lock: spinlock protecting this frag
3064     * @refcnt: reference count of the queue
3065     * @fragments: received fragments head
3066     + * @rb_fragments: received fragments rb-tree root
3067     * @fragments_tail: received fragments tail
3068     + * @last_run_head: the head of the last "run". see ip_fragment.c
3069     * @stamp: timestamp of the last received fragment
3070     * @len: total length of the original datagram
3071     * @meat: length of received fragments so far
3072     * @flags: fragment queue flags
3073     * @max_size: maximum received fragment size
3074     * @net: namespace that this frag belongs to
3075     - * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
3076     + * @rcu: rcu head for freeing deferall
3077     */
3078     struct inet_frag_queue {
3079     - spinlock_t lock;
3080     + struct rhash_head node;
3081     + union {
3082     + struct frag_v4_compare_key v4;
3083     + struct frag_v6_compare_key v6;
3084     + } key;
3085     struct timer_list timer;
3086     - struct hlist_node list;
3087     + spinlock_t lock;
3088     refcount_t refcnt;
3089     - struct sk_buff *fragments;
3090     + struct sk_buff *fragments; /* Used in IPv6. */
3091     + struct rb_root rb_fragments; /* Used in IPv4. */
3092     struct sk_buff *fragments_tail;
3093     + struct sk_buff *last_run_head;
3094     ktime_t stamp;
3095     int len;
3096     int meat;
3097     __u8 flags;
3098     u16 max_size;
3099     - struct netns_frags *net;
3100     - struct hlist_node list_evictor;
3101     -};
3102     -
3103     -#define INETFRAGS_HASHSZ 1024
3104     -
3105     -/* averaged:
3106     - * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
3107     - * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
3108     - * struct frag_queue))
3109     - */
3110     -#define INETFRAGS_MAXDEPTH 128
3111     -
3112     -struct inet_frag_bucket {
3113     - struct hlist_head chain;
3114     - spinlock_t chain_lock;
3115     + struct netns_frags *net;
3116     + struct rcu_head rcu;
3117     };
3118    
3119     struct inet_frags {
3120     - struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
3121     -
3122     - struct work_struct frags_work;
3123     - unsigned int next_bucket;
3124     - unsigned long last_rebuild_jiffies;
3125     - bool rebuild;
3126     -
3127     - /* The first call to hashfn is responsible to initialize
3128     - * rnd. This is best done with net_get_random_once.
3129     - *
3130     - * rnd_seqlock is used to let hash insertion detect
3131     - * when it needs to re-lookup the hash chain to use.
3132     - */
3133     - u32 rnd;
3134     - seqlock_t rnd_seqlock;
3135     unsigned int qsize;
3136    
3137     - unsigned int (*hashfn)(const struct inet_frag_queue *);
3138     - bool (*match)(const struct inet_frag_queue *q,
3139     - const void *arg);
3140     void (*constructor)(struct inet_frag_queue *q,
3141     const void *arg);
3142     void (*destructor)(struct inet_frag_queue *);
3143     - void (*frag_expire)(unsigned long data);
3144     + void (*frag_expire)(struct timer_list *t);
3145     struct kmem_cache *frags_cachep;
3146     const char *frags_cache_name;
3147     + struct rhashtable_params rhash_params;
3148     };
3149    
3150     int inet_frags_init(struct inet_frags *);
3151     void inet_frags_fini(struct inet_frags *);
3152    
3153     -static inline void inet_frags_init_net(struct netns_frags *nf)
3154     +static inline int inet_frags_init_net(struct netns_frags *nf)
3155     {
3156     - atomic_set(&nf->mem, 0);
3157     + atomic_long_set(&nf->mem, 0);
3158     + return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
3159     }
3160     -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
3161     +void inet_frags_exit_net(struct netns_frags *nf);
3162    
3163     -void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
3164     -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
3165     -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
3166     - struct inet_frags *f, void *key, unsigned int hash);
3167     +void inet_frag_kill(struct inet_frag_queue *q);
3168     +void inet_frag_destroy(struct inet_frag_queue *q);
3169     +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
3170    
3171     -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
3172     - const char *prefix);
3173     +/* Free all skbs in the queue; return the sum of their truesizes. */
3174     +unsigned int inet_frag_rbtree_purge(struct rb_root *root);
3175    
3176     -static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
3177     +static inline void inet_frag_put(struct inet_frag_queue *q)
3178     {
3179     if (refcount_dec_and_test(&q->refcnt))
3180     - inet_frag_destroy(q, f);
3181     -}
3182     -
3183     -static inline bool inet_frag_evicting(struct inet_frag_queue *q)
3184     -{
3185     - return !hlist_unhashed(&q->list_evictor);
3186     + inet_frag_destroy(q);
3187     }
3188    
3189     /* Memory Tracking Functions. */
3190    
3191     -static inline int frag_mem_limit(struct netns_frags *nf)
3192     -{
3193     - return atomic_read(&nf->mem);
3194     -}
3195     -
3196     -static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
3197     +static inline long frag_mem_limit(const struct netns_frags *nf)
3198     {
3199     - atomic_sub(i, &nf->mem);
3200     + return atomic_long_read(&nf->mem);
3201     }
3202    
3203     -static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
3204     +static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
3205     {
3206     - atomic_add(i, &nf->mem);
3207     + atomic_long_sub(val, &nf->mem);
3208     }
3209    
3210     -static inline int sum_frag_mem_limit(struct netns_frags *nf)
3211     +static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
3212     {
3213     - return atomic_read(&nf->mem);
3214     + atomic_long_add(val, &nf->mem);
3215     }
3216    
3217     /* RFC 3168 support :
3218     diff --git a/include/net/ip.h b/include/net/ip.h
3219     index 81da1123fc8e..7c430343176a 100644
3220     --- a/include/net/ip.h
3221     +++ b/include/net/ip.h
3222     @@ -570,7 +570,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
3223     return skb;
3224     }
3225     #endif
3226     -int ip_frag_mem(struct net *net);
3227    
3228     /*
3229     * Functions provided by ip_forward.c
3230     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3231     index f280c61e019a..fa87a62e9bd3 100644
3232     --- a/include/net/ipv6.h
3233     +++ b/include/net/ipv6.h
3234     @@ -331,13 +331,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
3235     idev->cnf.accept_ra;
3236     }
3237    
3238     -#if IS_ENABLED(CONFIG_IPV6)
3239     -static inline int ip6_frag_mem(struct net *net)
3240     -{
3241     - return sum_frag_mem_limit(&net->ipv6.frags);
3242     -}
3243     -#endif
3244     -
3245     #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
3246     #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
3247     #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
3248     @@ -531,17 +524,8 @@ enum ip6_defrag_users {
3249     __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
3250     };
3251    
3252     -struct ip6_create_arg {
3253     - __be32 id;
3254     - u32 user;
3255     - const struct in6_addr *src;
3256     - const struct in6_addr *dst;
3257     - int iif;
3258     - u8 ecn;
3259     -};
3260     -
3261     void ip6_frag_init(struct inet_frag_queue *q, const void *a);
3262     -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3263     +extern const struct rhashtable_params ip6_rhash_params;
3264    
3265     /*
3266     * Equivalent of ipv4 struct ip
3267     @@ -549,19 +533,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
3268     struct frag_queue {
3269     struct inet_frag_queue q;
3270    
3271     - __be32 id; /* fragment id */
3272     - u32 user;
3273     - struct in6_addr saddr;
3274     - struct in6_addr daddr;
3275     -
3276     int iif;
3277     unsigned int csum;
3278     __u16 nhoffset;
3279     u8 ecn;
3280     };
3281    
3282     -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
3283     - struct inet_frags *frags);
3284     +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
3285    
3286     static inline bool ipv6_addr_any(const struct in6_addr *a)
3287     {
3288     diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
3289     index ac71559314e7..9eae13eefc49 100644
3290     --- a/include/uapi/linux/ethtool.h
3291     +++ b/include/uapi/linux/ethtool.h
3292     @@ -898,13 +898,13 @@ struct ethtool_rx_flow_spec {
3293     static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
3294     {
3295     return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
3296     -};
3297     +}
3298    
3299     static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
3300     {
3301     return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
3302     ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
3303     -};
3304     +}
3305    
3306     /**
3307     * struct ethtool_rxnfc - command to get or set RX flow classification rules
3308     diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
3309     index 0d941cdd8e8c..f5d753e60836 100644
3310     --- a/include/uapi/linux/snmp.h
3311     +++ b/include/uapi/linux/snmp.h
3312     @@ -56,6 +56,7 @@ enum
3313     IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
3314     IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
3315     IPSTATS_MIB_CEPKTS, /* InCEPkts */
3316     + IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
3317     __IPSTATS_MIB_MAX
3318     };
3319    
3320     diff --git a/kernel/cpu.c b/kernel/cpu.c
3321     index 8f02f9b6e046..f3f389e33343 100644
3322     --- a/kernel/cpu.c
3323     +++ b/kernel/cpu.c
3324     @@ -612,15 +612,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
3325     bool bringup = st->bringup;
3326     enum cpuhp_state state;
3327    
3328     + if (WARN_ON_ONCE(!st->should_run))
3329     + return;
3330     +
3331     /*
3332     * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
3333     * that if we see ->should_run we also see the rest of the state.
3334     */
3335     smp_mb();
3336    
3337     - if (WARN_ON_ONCE(!st->should_run))
3338     - return;
3339     -
3340     cpuhp_lock_acquire(bringup);
3341    
3342     if (st->single) {
3343     @@ -932,7 +932,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3344     ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3345     if (ret) {
3346     st->target = prev_state;
3347     - undo_cpu_down(cpu, st);
3348     + if (st->state < prev_state)
3349     + undo_cpu_down(cpu, st);
3350     break;
3351     }
3352     }
3353     @@ -985,7 +986,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
3354     * to do the further cleanups.
3355     */
3356     ret = cpuhp_down_callbacks(cpu, st, target);
3357     - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3358     + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
3359     cpuhp_reset_state(st, prev_state);
3360     __cpuhp_kick_ap(st);
3361     }
3362     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3363     index 9fe525f410bf..f17c76a1a05f 100644
3364     --- a/kernel/time/timer.c
3365     +++ b/kernel/time/timer.c
3366     @@ -1609,6 +1609,22 @@ static inline void __run_timers(struct timer_base *base)
3367    
3368     raw_spin_lock_irq(&base->lock);
3369    
3370     + /*
3371     + * timer_base::must_forward_clk must be cleared before running
3372     + * timers so that any timer functions that call mod_timer() will
3373     + * not try to forward the base. Idle tracking / clock forwarding
3374     + * logic is only used with BASE_STD timers.
3375     + *
3376     + * The must_forward_clk flag is cleared unconditionally also for
3377     + * the deferrable base. The deferrable base is not affected by idle
3378     + * tracking and never forwarded, so clearing the flag is a NOOP.
3379     + *
3380     + * The fact that the deferrable base is never forwarded can cause
3381     + * large variations in granularity for deferrable timers, but they
3382     + * can be deferred for long periods due to idle anyway.
3383     + */
3384     + base->must_forward_clk = false;
3385     +
3386     while (time_after_eq(jiffies, base->clk)) {
3387    
3388     levels = collect_expired_timers(base, heads);
3389     @@ -1628,19 +1644,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
3390     {
3391     struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
3392    
3393     - /*
3394     - * must_forward_clk must be cleared before running timers so that any
3395     - * timer functions that call mod_timer will not try to forward the
3396     - * base. idle trcking / clock forwarding logic is only used with
3397     - * BASE_STD timers.
3398     - *
3399     - * The deferrable base does not do idle tracking at all, so we do
3400     - * not forward it. This can result in very large variations in
3401     - * granularity for deferrable timers, but they can be deferred for
3402     - * long periods due to idle.
3403     - */
3404     - base->must_forward_clk = false;
3405     -
3406     __run_timers(base);
3407     if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
3408     __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
3409     diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3410     index 39215c724fc7..cebbcec877d7 100644
3411     --- a/lib/rhashtable.c
3412     +++ b/lib/rhashtable.c
3413     @@ -364,6 +364,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
3414     err = rhashtable_rehash_chain(ht, old_hash);
3415     if (err)
3416     return err;
3417     + cond_resched();
3418     }
3419    
3420     /* Publish the new table pointer. */
3421     @@ -1073,6 +1074,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
3422     for (i = 0; i < tbl->size; i++) {
3423     struct rhash_head *pos, *next;
3424    
3425     + cond_resched();
3426     for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
3427     next = !rht_is_a_nulls(pos) ?
3428     rht_dereference(pos->next, ht) : NULL;
3429     diff --git a/mm/debug.c b/mm/debug.c
3430     index 6726bec731c9..c55abc893fdc 100644
3431     --- a/mm/debug.c
3432     +++ b/mm/debug.c
3433     @@ -100,7 +100,7 @@ EXPORT_SYMBOL(dump_vma);
3434    
3435     void dump_mm(const struct mm_struct *mm)
3436     {
3437     - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
3438     + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
3439     #ifdef CONFIG_MMU
3440     "get_unmapped_area %p\n"
3441     #endif
3442     @@ -128,7 +128,7 @@ void dump_mm(const struct mm_struct *mm)
3443     "tlb_flush_pending %d\n"
3444     "def_flags: %#lx(%pGv)\n",
3445    
3446     - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
3447     + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
3448     #ifdef CONFIG_MMU
3449     mm->get_unmapped_area,
3450     #endif
3451     diff --git a/mm/vmacache.c b/mm/vmacache.c
3452     index db7596eb6132..f1729617dc85 100644
3453     --- a/mm/vmacache.c
3454     +++ b/mm/vmacache.c
3455     @@ -7,44 +7,6 @@
3456     #include <linux/mm.h>
3457     #include <linux/vmacache.h>
3458    
3459     -/*
3460     - * Flush vma caches for threads that share a given mm.
3461     - *
3462     - * The operation is safe because the caller holds the mmap_sem
3463     - * exclusively and other threads accessing the vma cache will
3464     - * have mmap_sem held at least for read, so no extra locking
3465     - * is required to maintain the vma cache.
3466     - */
3467     -void vmacache_flush_all(struct mm_struct *mm)
3468     -{
3469     - struct task_struct *g, *p;
3470     -
3471     - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
3472     -
3473     - /*
3474     - * Single threaded tasks need not iterate the entire
3475     - * list of process. We can avoid the flushing as well
3476     - * since the mm's seqnum was increased and don't have
3477     - * to worry about other threads' seqnum. Current's
3478     - * flush will occur upon the next lookup.
3479     - */
3480     - if (atomic_read(&mm->mm_users) == 1)
3481     - return;
3482     -
3483     - rcu_read_lock();
3484     - for_each_process_thread(g, p) {
3485     - /*
3486     - * Only flush the vmacache pointers as the
3487     - * mm seqnum is already set and curr's will
3488     - * be set upon invalidation when the next
3489     - * lookup is done.
3490     - */
3491     - if (mm == p->mm)
3492     - vmacache_flush(p);
3493     - }
3494     - rcu_read_unlock();
3495     -}
3496     -
3497     /*
3498     * This task may be accessing a foreign mm via (for example)
3499     * get_user_pages()->find_vma(). The vmacache is task-local and this
3500     diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
3501     index cef3754408d4..b21fcc838784 100644
3502     --- a/net/bluetooth/hidp/core.c
3503     +++ b/net/bluetooth/hidp/core.c
3504     @@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
3505     hid->version = req->version;
3506     hid->country = req->country;
3507    
3508     - strncpy(hid->name, req->name, sizeof(req->name) - 1);
3509     + strncpy(hid->name, req->name, sizeof(hid->name));
3510    
3511     snprintf(hid->phys, sizeof(hid->phys), "%pMR",
3512     &l2cap_pi(session->ctrl_sock->sk)->chan->src);
3513     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3514     index 2e5eeba97de9..168a3e8883d4 100644
3515     --- a/net/core/skbuff.c
3516     +++ b/net/core/skbuff.c
3517     @@ -1839,6 +1839,20 @@ done:
3518     }
3519     EXPORT_SYMBOL(___pskb_trim);
3520    
3521     +/* Note : use pskb_trim_rcsum() instead of calling this directly
3522     + */
3523     +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
3524     +{
3525     + if (skb->ip_summed == CHECKSUM_COMPLETE) {
3526     + int delta = skb->len - len;
3527     +
3528     + skb->csum = csum_sub(skb->csum,
3529     + skb_checksum(skb, len, delta, 0));
3530     + }
3531     + return __pskb_trim(skb, len);
3532     +}
3533     +EXPORT_SYMBOL(pskb_trim_rcsum_slow);
3534     +
3535     /**
3536     * __pskb_pull_tail - advance tail of skb header
3537     * @skb: buffer to reallocate
3538     @@ -2842,20 +2856,27 @@ EXPORT_SYMBOL(skb_queue_purge);
3539     /**
3540     * skb_rbtree_purge - empty a skb rbtree
3541     * @root: root of the rbtree to empty
3542     + * Return value: the sum of truesizes of all purged skbs.
3543     *
3544     * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3545     * the list and one reference dropped. This function does not take
3546     * any lock. Synchronization should be handled by the caller (e.g., TCP
3547     * out-of-order queue is protected by the socket lock).
3548     */
3549     -void skb_rbtree_purge(struct rb_root *root)
3550     +unsigned int skb_rbtree_purge(struct rb_root *root)
3551     {
3552     - struct sk_buff *skb, *next;
3553     + struct rb_node *p = rb_first(root);
3554     + unsigned int sum = 0;
3555    
3556     - rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
3557     - kfree_skb(skb);
3558     + while (p) {
3559     + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3560    
3561     - *root = RB_ROOT;
3562     + p = rb_next(p);
3563     + rb_erase(&skb->rbnode, root);
3564     + sum += skb->truesize;
3565     + kfree_skb(skb);
3566     + }
3567     + return sum;
3568     }
3569    
3570     /**
3571     diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
3572     index bae7d78aa068..fbeacbc2be5d 100644
3573     --- a/net/dcb/dcbnl.c
3574     +++ b/net/dcb/dcbnl.c
3575     @@ -1765,7 +1765,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
3576     if (itr->app.selector == app->selector &&
3577     itr->app.protocol == app->protocol &&
3578     itr->ifindex == ifindex &&
3579     - (!prio || itr->app.priority == prio))
3580     + ((prio == -1) || itr->app.priority == prio))
3581     return itr;
3582     }
3583    
3584     @@ -1800,7 +1800,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
3585     u8 prio = 0;
3586    
3587     spin_lock_bh(&dcb_lock);
3588     - if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3589     + itr = dcb_app_lookup(app, dev->ifindex, -1);
3590     + if (itr)
3591     prio = itr->app.priority;
3592     spin_unlock_bh(&dcb_lock);
3593    
3594     @@ -1828,7 +1829,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
3595    
3596     spin_lock_bh(&dcb_lock);
3597     /* Search for existing match and replace */
3598     - if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
3599     + itr = dcb_app_lookup(new, dev->ifindex, -1);
3600     + if (itr) {
3601     if (new->priority)
3602     itr->app.priority = new->priority;
3603     else {
3604     @@ -1861,7 +1863,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
3605     u8 prio = 0;
3606    
3607     spin_lock_bh(&dcb_lock);
3608     - if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
3609     + itr = dcb_app_lookup(app, dev->ifindex, -1);
3610     + if (itr)
3611     prio |= 1 << itr->app.priority;
3612     spin_unlock_bh(&dcb_lock);
3613    
3614     diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
3615     index d8de3bcfb103..b8d95cb71c25 100644
3616     --- a/net/ieee802154/6lowpan/6lowpan_i.h
3617     +++ b/net/ieee802154/6lowpan/6lowpan_i.h
3618     @@ -17,37 +17,19 @@ typedef unsigned __bitwise lowpan_rx_result;
3619     #define LOWPAN_DISPATCH_FRAG1 0xc0
3620     #define LOWPAN_DISPATCH_FRAGN 0xe0
3621    
3622     -struct lowpan_create_arg {
3623     +struct frag_lowpan_compare_key {
3624     u16 tag;
3625     u16 d_size;
3626     - const struct ieee802154_addr *src;
3627     - const struct ieee802154_addr *dst;
3628     + const struct ieee802154_addr src;
3629     + const struct ieee802154_addr dst;
3630     };
3631    
3632     -/* Equivalent of ipv4 struct ip
3633     +/* Equivalent of ipv4 struct ipq
3634     */
3635     struct lowpan_frag_queue {
3636     struct inet_frag_queue q;
3637     -
3638     - u16 tag;
3639     - u16 d_size;
3640     - struct ieee802154_addr saddr;
3641     - struct ieee802154_addr daddr;
3642     };
3643    
3644     -static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
3645     -{
3646     - switch (a->mode) {
3647     - case IEEE802154_ADDR_LONG:
3648     - return (((__force u64)a->extended_addr) >> 32) ^
3649     - (((__force u64)a->extended_addr) & 0xffffffff);
3650     - case IEEE802154_ADDR_SHORT:
3651     - return (__force u32)(a->short_addr + (a->pan_id << 16));
3652     - default:
3653     - return 0;
3654     - }
3655     -}
3656     -
3657     int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
3658     void lowpan_net_frag_exit(void);
3659     int lowpan_net_frag_init(void);
3660     diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
3661     index f85b08baff16..1790b65944b3 100644
3662     --- a/net/ieee802154/6lowpan/reassembly.c
3663     +++ b/net/ieee802154/6lowpan/reassembly.c
3664     @@ -37,55 +37,24 @@ static struct inet_frags lowpan_frags;
3665     static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
3666     struct sk_buff *prev, struct net_device *ldev);
3667    
3668     -static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
3669     - const struct ieee802154_addr *saddr,
3670     - const struct ieee802154_addr *daddr)
3671     -{
3672     - net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
3673     - return jhash_3words(ieee802154_addr_hash(saddr),
3674     - ieee802154_addr_hash(daddr),
3675     - (__force u32)(tag + (d_size << 16)),
3676     - lowpan_frags.rnd);
3677     -}
3678     -
3679     -static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
3680     -{
3681     - const struct lowpan_frag_queue *fq;
3682     -
3683     - fq = container_of(q, struct lowpan_frag_queue, q);
3684     - return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
3685     -}
3686     -
3687     -static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
3688     -{
3689     - const struct lowpan_frag_queue *fq;
3690     - const struct lowpan_create_arg *arg = a;
3691     -
3692     - fq = container_of(q, struct lowpan_frag_queue, q);
3693     - return fq->tag == arg->tag && fq->d_size == arg->d_size &&
3694     - ieee802154_addr_equal(&fq->saddr, arg->src) &&
3695     - ieee802154_addr_equal(&fq->daddr, arg->dst);
3696     -}
3697     -
3698     static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
3699     {
3700     - const struct lowpan_create_arg *arg = a;
3701     + const struct frag_lowpan_compare_key *key = a;
3702     struct lowpan_frag_queue *fq;
3703    
3704     fq = container_of(q, struct lowpan_frag_queue, q);
3705    
3706     - fq->tag = arg->tag;
3707     - fq->d_size = arg->d_size;
3708     - fq->saddr = *arg->src;
3709     - fq->daddr = *arg->dst;
3710     + BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
3711     + memcpy(&q->key, key, sizeof(*key));
3712     }
3713    
3714     -static void lowpan_frag_expire(unsigned long data)
3715     +static void lowpan_frag_expire(struct timer_list *t)
3716     {
3717     + struct inet_frag_queue *frag = from_timer(frag, t, timer);
3718     struct frag_queue *fq;
3719     struct net *net;
3720    
3721     - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
3722     + fq = container_of(frag, struct frag_queue, q);
3723     net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
3724    
3725     spin_lock(&fq->q.lock);
3726     @@ -93,10 +62,10 @@ static void lowpan_frag_expire(unsigned long data)
3727     if (fq->q.flags & INET_FRAG_COMPLETE)
3728     goto out;
3729    
3730     - inet_frag_kill(&fq->q, &lowpan_frags);
3731     + inet_frag_kill(&fq->q);
3732     out:
3733     spin_unlock(&fq->q.lock);
3734     - inet_frag_put(&fq->q, &lowpan_frags);
3735     + inet_frag_put(&fq->q);
3736     }
3737    
3738     static inline struct lowpan_frag_queue *
3739     @@ -104,25 +73,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
3740     const struct ieee802154_addr *src,
3741     const struct ieee802154_addr *dst)
3742     {
3743     - struct inet_frag_queue *q;
3744     - struct lowpan_create_arg arg;
3745     - unsigned int hash;
3746     struct netns_ieee802154_lowpan *ieee802154_lowpan =
3747     net_ieee802154_lowpan(net);
3748     + struct frag_lowpan_compare_key key = {
3749     + .tag = cb->d_tag,
3750     + .d_size = cb->d_size,
3751     + .src = *src,
3752     + .dst = *dst,
3753     + };
3754     + struct inet_frag_queue *q;
3755    
3756     - arg.tag = cb->d_tag;
3757     - arg.d_size = cb->d_size;
3758     - arg.src = src;
3759     - arg.dst = dst;
3760     -
3761     - hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
3762     -
3763     - q = inet_frag_find(&ieee802154_lowpan->frags,
3764     - &lowpan_frags, &arg, hash);
3765     - if (IS_ERR_OR_NULL(q)) {
3766     - inet_frag_maybe_warn_overflow(q, pr_fmt());
3767     + q = inet_frag_find(&ieee802154_lowpan->frags, &key);
3768     + if (!q)
3769     return NULL;
3770     - }
3771     +
3772     return container_of(q, struct lowpan_frag_queue, q);
3773     }
3774    
3775     @@ -229,7 +193,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
3776     struct sk_buff *fp, *head = fq->q.fragments;
3777     int sum_truesize;
3778    
3779     - inet_frag_kill(&fq->q, &lowpan_frags);
3780     + inet_frag_kill(&fq->q);
3781    
3782     /* Make the one we just received the head. */
3783     if (prev) {
3784     @@ -437,7 +401,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
3785     ret = lowpan_frag_queue(fq, skb, frag_type);
3786     spin_unlock(&fq->q.lock);
3787    
3788     - inet_frag_put(&fq->q, &lowpan_frags);
3789     + inet_frag_put(&fq->q);
3790     return ret;
3791     }
3792    
3793     @@ -447,24 +411,22 @@ err:
3794     }
3795    
3796     #ifdef CONFIG_SYSCTL
3797     -static int zero;
3798    
3799     static struct ctl_table lowpan_frags_ns_ctl_table[] = {
3800     {
3801     .procname = "6lowpanfrag_high_thresh",
3802     .data = &init_net.ieee802154_lowpan.frags.high_thresh,
3803     - .maxlen = sizeof(int),
3804     + .maxlen = sizeof(unsigned long),
3805     .mode = 0644,
3806     - .proc_handler = proc_dointvec_minmax,
3807     + .proc_handler = proc_doulongvec_minmax,
3808     .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
3809     },
3810     {
3811     .procname = "6lowpanfrag_low_thresh",
3812     .data = &init_net.ieee802154_lowpan.frags.low_thresh,
3813     - .maxlen = sizeof(int),
3814     + .maxlen = sizeof(unsigned long),
3815     .mode = 0644,
3816     - .proc_handler = proc_dointvec_minmax,
3817     - .extra1 = &zero,
3818     + .proc_handler = proc_doulongvec_minmax,
3819     .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
3820     },
3821     {
3822     @@ -580,14 +542,20 @@ static int __net_init lowpan_frags_init_net(struct net *net)
3823     {
3824     struct netns_ieee802154_lowpan *ieee802154_lowpan =
3825     net_ieee802154_lowpan(net);
3826     + int res;
3827    
3828     ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
3829     ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
3830     ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
3831     + ieee802154_lowpan->frags.f = &lowpan_frags;
3832    
3833     - inet_frags_init_net(&ieee802154_lowpan->frags);
3834     -
3835     - return lowpan_frags_ns_sysctl_register(net);
3836     + res = inet_frags_init_net(&ieee802154_lowpan->frags);
3837     + if (res < 0)
3838     + return res;
3839     + res = lowpan_frags_ns_sysctl_register(net);
3840     + if (res < 0)
3841     + inet_frags_exit_net(&ieee802154_lowpan->frags);
3842     + return res;
3843     }
3844    
3845     static void __net_exit lowpan_frags_exit_net(struct net *net)
3846     @@ -596,7 +564,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
3847     net_ieee802154_lowpan(net);
3848    
3849     lowpan_frags_ns_sysctl_unregister(net);
3850     - inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
3851     + inet_frags_exit_net(&ieee802154_lowpan->frags);
3852     }
3853    
3854     static struct pernet_operations lowpan_frags_ops = {
3855     @@ -604,32 +572,63 @@ static struct pernet_operations lowpan_frags_ops = {
3856     .exit = lowpan_frags_exit_net,
3857     };
3858    
3859     -int __init lowpan_net_frag_init(void)
3860     +static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
3861     {
3862     - int ret;
3863     + return jhash2(data,
3864     + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3865     +}
3866    
3867     - ret = lowpan_frags_sysctl_register();
3868     - if (ret)
3869     - return ret;
3870     +static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
3871     +{
3872     + const struct inet_frag_queue *fq = data;
3873    
3874     - ret = register_pernet_subsys(&lowpan_frags_ops);
3875     - if (ret)
3876     - goto err_pernet;
3877     + return jhash2((const u32 *)&fq->key,
3878     + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
3879     +}
3880     +
3881     +static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
3882     +{
3883     + const struct frag_lowpan_compare_key *key = arg->key;
3884     + const struct inet_frag_queue *fq = ptr;
3885     +
3886     + return !!memcmp(&fq->key, key, sizeof(*key));
3887     +}
3888     +
3889     +static const struct rhashtable_params lowpan_rhash_params = {
3890     + .head_offset = offsetof(struct inet_frag_queue, node),
3891     + .hashfn = lowpan_key_hashfn,
3892     + .obj_hashfn = lowpan_obj_hashfn,
3893     + .obj_cmpfn = lowpan_obj_cmpfn,
3894     + .automatic_shrinking = true,
3895     +};
3896     +
3897     +int __init lowpan_net_frag_init(void)
3898     +{
3899     + int ret;
3900    
3901     - lowpan_frags.hashfn = lowpan_hashfn;
3902     lowpan_frags.constructor = lowpan_frag_init;
3903     lowpan_frags.destructor = NULL;
3904     lowpan_frags.qsize = sizeof(struct frag_queue);
3905     - lowpan_frags.match = lowpan_frag_match;
3906     lowpan_frags.frag_expire = lowpan_frag_expire;
3907     lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
3908     + lowpan_frags.rhash_params = lowpan_rhash_params;
3909     ret = inet_frags_init(&lowpan_frags);
3910     if (ret)
3911     - goto err_pernet;
3912     + goto out;
3913    
3914     + ret = lowpan_frags_sysctl_register();
3915     + if (ret)
3916     + goto err_sysctl;
3917     +
3918     + ret = register_pernet_subsys(&lowpan_frags_ops);
3919     + if (ret)
3920     + goto err_pernet;
3921     +out:
3922     return ret;
3923     err_pernet:
3924     lowpan_frags_sysctl_unregister();
3925     +err_sysctl:
3926     + inet_frags_fini(&lowpan_frags);
3927     return ret;
3928     }
3929    
3930     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3931     index ba4454ecdf0f..f6764537148c 100644
3932     --- a/net/ipv4/inet_fragment.c
3933     +++ b/net/ipv4/inet_fragment.c
3934     @@ -25,12 +25,6 @@
3935     #include <net/inet_frag.h>
3936     #include <net/inet_ecn.h>
3937    
3938     -#define INETFRAGS_EVICT_BUCKETS 128
3939     -#define INETFRAGS_EVICT_MAX 512
3940     -
3941     -/* don't rebuild inetfrag table with new secret more often than this */
3942     -#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
3943     -
3944     /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
3945     * Value : 0xff if frame should be dropped.
3946     * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
3947     @@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
3948     };
3949     EXPORT_SYMBOL(ip_frag_ecn_table);
3950    
3951     -static unsigned int
3952     -inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
3953     -{
3954     - return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
3955     -}
3956     -
3957     -static bool inet_frag_may_rebuild(struct inet_frags *f)
3958     -{
3959     - return time_after(jiffies,
3960     - f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
3961     -}
3962     -
3963     -static void inet_frag_secret_rebuild(struct inet_frags *f)
3964     -{
3965     - int i;
3966     -
3967     - write_seqlock_bh(&f->rnd_seqlock);
3968     -
3969     - if (!inet_frag_may_rebuild(f))
3970     - goto out;
3971     -
3972     - get_random_bytes(&f->rnd, sizeof(u32));
3973     -
3974     - for (i = 0; i < INETFRAGS_HASHSZ; i++) {
3975     - struct inet_frag_bucket *hb;
3976     - struct inet_frag_queue *q;
3977     - struct hlist_node *n;
3978     -
3979     - hb = &f->hash[i];
3980     - spin_lock(&hb->chain_lock);
3981     -
3982     - hlist_for_each_entry_safe(q, n, &hb->chain, list) {
3983     - unsigned int hval = inet_frag_hashfn(f, q);
3984     -
3985     - if (hval != i) {
3986     - struct inet_frag_bucket *hb_dest;
3987     -
3988     - hlist_del(&q->list);
3989     -
3990     - /* Relink to new hash chain. */
3991     - hb_dest = &f->hash[hval];
3992     -
3993     - /* This is the only place where we take
3994     - * another chain_lock while already holding
3995     - * one. As this will not run concurrently,
3996     - * we cannot deadlock on hb_dest lock below, if its
3997     - * already locked it will be released soon since
3998     - * other caller cannot be waiting for hb lock
3999     - * that we've taken above.
4000     - */
4001     - spin_lock_nested(&hb_dest->chain_lock,
4002     - SINGLE_DEPTH_NESTING);
4003     - hlist_add_head(&q->list, &hb_dest->chain);
4004     - spin_unlock(&hb_dest->chain_lock);
4005     - }
4006     - }
4007     - spin_unlock(&hb->chain_lock);
4008     - }
4009     -
4010     - f->rebuild = false;
4011     - f->last_rebuild_jiffies = jiffies;
4012     -out:
4013     - write_sequnlock_bh(&f->rnd_seqlock);
4014     -}
4015     -
4016     -static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
4017     -{
4018     - if (!hlist_unhashed(&q->list_evictor))
4019     - return false;
4020     -
4021     - return q->net->low_thresh == 0 ||
4022     - frag_mem_limit(q->net) >= q->net->low_thresh;
4023     -}
4024     -
4025     -static unsigned int
4026     -inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
4027     -{
4028     - struct inet_frag_queue *fq;
4029     - struct hlist_node *n;
4030     - unsigned int evicted = 0;
4031     - HLIST_HEAD(expired);
4032     -
4033     - spin_lock(&hb->chain_lock);
4034     -
4035     - hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
4036     - if (!inet_fragq_should_evict(fq))
4037     - continue;
4038     -
4039     - if (!del_timer(&fq->timer))
4040     - continue;
4041     -
4042     - hlist_add_head(&fq->list_evictor, &expired);
4043     - ++evicted;
4044     - }
4045     -
4046     - spin_unlock(&hb->chain_lock);
4047     -
4048     - hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
4049     - f->frag_expire((unsigned long) fq);
4050     -
4051     - return evicted;
4052     -}
4053     -
4054     -static void inet_frag_worker(struct work_struct *work)
4055     -{
4056     - unsigned int budget = INETFRAGS_EVICT_BUCKETS;
4057     - unsigned int i, evicted = 0;
4058     - struct inet_frags *f;
4059     -
4060     - f = container_of(work, struct inet_frags, frags_work);
4061     -
4062     - BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
4063     -
4064     - local_bh_disable();
4065     -
4066     - for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
4067     - evicted += inet_evict_bucket(f, &f->hash[i]);
4068     - i = (i + 1) & (INETFRAGS_HASHSZ - 1);
4069     - if (evicted > INETFRAGS_EVICT_MAX)
4070     - break;
4071     - }
4072     -
4073     - f->next_bucket = i;
4074     -
4075     - local_bh_enable();
4076     -
4077     - if (f->rebuild && inet_frag_may_rebuild(f))
4078     - inet_frag_secret_rebuild(f);
4079     -}
4080     -
4081     -static void inet_frag_schedule_worker(struct inet_frags *f)
4082     -{
4083     - if (unlikely(!work_pending(&f->frags_work)))
4084     - schedule_work(&f->frags_work);
4085     -}
4086     -
4087     int inet_frags_init(struct inet_frags *f)
4088     {
4089     - int i;
4090     -
4091     - INIT_WORK(&f->frags_work, inet_frag_worker);
4092     -
4093     - for (i = 0; i < INETFRAGS_HASHSZ; i++) {
4094     - struct inet_frag_bucket *hb = &f->hash[i];
4095     -
4096     - spin_lock_init(&hb->chain_lock);
4097     - INIT_HLIST_HEAD(&hb->chain);
4098     - }
4099     -
4100     - seqlock_init(&f->rnd_seqlock);
4101     - f->last_rebuild_jiffies = 0;
4102     f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
4103     NULL);
4104     if (!f->frags_cachep)
4105     @@ -214,83 +59,75 @@ EXPORT_SYMBOL(inet_frags_init);
4106    
4107     void inet_frags_fini(struct inet_frags *f)
4108     {
4109     - cancel_work_sync(&f->frags_work);
4110     + /* We must wait that all inet_frag_destroy_rcu() have completed. */
4111     + rcu_barrier();
4112     +
4113     kmem_cache_destroy(f->frags_cachep);
4114     + f->frags_cachep = NULL;
4115     }
4116     EXPORT_SYMBOL(inet_frags_fini);
4117    
4118     -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
4119     +static void inet_frags_free_cb(void *ptr, void *arg)
4120     {
4121     - unsigned int seq;
4122     - int i;
4123     -
4124     - nf->low_thresh = 0;
4125     + struct inet_frag_queue *fq = ptr;
4126    
4127     -evict_again:
4128     - local_bh_disable();
4129     - seq = read_seqbegin(&f->rnd_seqlock);
4130     -
4131     - for (i = 0; i < INETFRAGS_HASHSZ ; i++)
4132     - inet_evict_bucket(f, &f->hash[i]);
4133     -
4134     - local_bh_enable();
4135     - cond_resched();
4136     -
4137     - if (read_seqretry(&f->rnd_seqlock, seq) ||
4138     - sum_frag_mem_limit(nf))
4139     - goto evict_again;
4140     -}
4141     -EXPORT_SYMBOL(inet_frags_exit_net);
4142     -
4143     -static struct inet_frag_bucket *
4144     -get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
4145     -__acquires(hb->chain_lock)
4146     -{
4147     - struct inet_frag_bucket *hb;
4148     - unsigned int seq, hash;
4149     -
4150     - restart:
4151     - seq = read_seqbegin(&f->rnd_seqlock);
4152     -
4153     - hash = inet_frag_hashfn(f, fq);
4154     - hb = &f->hash[hash];
4155     + /* If we can not cancel the timer, it means this frag_queue
4156     + * is already disappearing, we have nothing to do.
4157     + * Otherwise, we own a refcount until the end of this function.
4158     + */
4159     + if (!del_timer(&fq->timer))
4160     + return;
4161    
4162     - spin_lock(&hb->chain_lock);
4163     - if (read_seqretry(&f->rnd_seqlock, seq)) {
4164     - spin_unlock(&hb->chain_lock);
4165     - goto restart;
4166     + spin_lock_bh(&fq->lock);
4167     + if (!(fq->flags & INET_FRAG_COMPLETE)) {
4168     + fq->flags |= INET_FRAG_COMPLETE;
4169     + refcount_dec(&fq->refcnt);
4170     }
4171     + spin_unlock_bh(&fq->lock);
4172    
4173     - return hb;
4174     + inet_frag_put(fq);
4175     }
4176    
4177     -static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
4178     +void inet_frags_exit_net(struct netns_frags *nf)
4179     {
4180     - struct inet_frag_bucket *hb;
4181     + nf->low_thresh = 0; /* prevent creation of new frags */
4182    
4183     - hb = get_frag_bucket_locked(fq, f);
4184     - hlist_del(&fq->list);
4185     - fq->flags |= INET_FRAG_COMPLETE;
4186     - spin_unlock(&hb->chain_lock);
4187     + rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
4188     }
4189     +EXPORT_SYMBOL(inet_frags_exit_net);
4190    
4191     -void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
4192     +void inet_frag_kill(struct inet_frag_queue *fq)
4193     {
4194     if (del_timer(&fq->timer))
4195     refcount_dec(&fq->refcnt);
4196    
4197     if (!(fq->flags & INET_FRAG_COMPLETE)) {
4198     - fq_unlink(fq, f);
4199     + struct netns_frags *nf = fq->net;
4200     +
4201     + fq->flags |= INET_FRAG_COMPLETE;
4202     + rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
4203     refcount_dec(&fq->refcnt);
4204     }
4205     }
4206     EXPORT_SYMBOL(inet_frag_kill);
4207    
4208     -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4209     +static void inet_frag_destroy_rcu(struct rcu_head *head)
4210     +{
4211     + struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
4212     + rcu);
4213     + struct inet_frags *f = q->net->f;
4214     +
4215     + if (f->destructor)
4216     + f->destructor(q);
4217     + kmem_cache_free(f->frags_cachep, q);
4218     +}
4219     +
4220     +void inet_frag_destroy(struct inet_frag_queue *q)
4221     {
4222     struct sk_buff *fp;
4223     struct netns_frags *nf;
4224     unsigned int sum, sum_truesize = 0;
4225     + struct inet_frags *f;
4226    
4227     WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
4228     WARN_ON(del_timer(&q->timer) != 0);
4229     @@ -298,64 +135,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
4230     /* Release all fragment data. */
4231     fp = q->fragments;
4232     nf = q->net;
4233     - while (fp) {
4234     - struct sk_buff *xp = fp->next;
4235     -
4236     - sum_truesize += fp->truesize;
4237     - kfree_skb(fp);
4238     - fp = xp;
4239     + f = nf->f;
4240     + if (fp) {
4241     + do {
4242     + struct sk_buff *xp = fp->next;
4243     +
4244     + sum_truesize += fp->truesize;
4245     + kfree_skb(fp);
4246     + fp = xp;
4247     + } while (fp);
4248     + } else {
4249     + sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
4250     }
4251     sum = sum_truesize + f->qsize;
4252    
4253     - if (f->destructor)
4254     - f->destructor(q);
4255     - kmem_cache_free(f->frags_cachep, q);
4256     + call_rcu(&q->rcu, inet_frag_destroy_rcu);
4257    
4258     sub_frag_mem_limit(nf, sum);
4259     }
4260     EXPORT_SYMBOL(inet_frag_destroy);
4261    
4262     -static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
4263     - struct inet_frag_queue *qp_in,
4264     - struct inet_frags *f,
4265     - void *arg)
4266     -{
4267     - struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
4268     - struct inet_frag_queue *qp;
4269     -
4270     -#ifdef CONFIG_SMP
4271     - /* With SMP race we have to recheck hash table, because
4272     - * such entry could have been created on other cpu before
4273     - * we acquired hash bucket lock.
4274     - */
4275     - hlist_for_each_entry(qp, &hb->chain, list) {
4276     - if (qp->net == nf && f->match(qp, arg)) {
4277     - refcount_inc(&qp->refcnt);
4278     - spin_unlock(&hb->chain_lock);
4279     - qp_in->flags |= INET_FRAG_COMPLETE;
4280     - inet_frag_put(qp_in, f);
4281     - return qp;
4282     - }
4283     - }
4284     -#endif
4285     - qp = qp_in;
4286     - if (!mod_timer(&qp->timer, jiffies + nf->timeout))
4287     - refcount_inc(&qp->refcnt);
4288     -
4289     - refcount_inc(&qp->refcnt);
4290     - hlist_add_head(&qp->list, &hb->chain);
4291     -
4292     - spin_unlock(&hb->chain_lock);
4293     -
4294     - return qp;
4295     -}
4296     -
4297     static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4298     struct inet_frags *f,
4299     void *arg)
4300     {
4301     struct inet_frag_queue *q;
4302    
4303     + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
4304     + return NULL;
4305     +
4306     q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
4307     if (!q)
4308     return NULL;
4309     @@ -364,77 +172,53 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
4310     f->constructor(q, arg);
4311     add_frag_mem_limit(nf, f->qsize);
4312    
4313     - setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
4314     + timer_setup(&q->timer, f->frag_expire, 0);
4315     spin_lock_init(&q->lock);
4316     - refcount_set(&q->refcnt, 1);
4317     + refcount_set(&q->refcnt, 3);
4318    
4319     return q;
4320     }
4321    
4322     static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
4323     - struct inet_frags *f,
4324     void *arg)
4325     {
4326     + struct inet_frags *f = nf->f;
4327     struct inet_frag_queue *q;
4328     + int err;
4329    
4330     q = inet_frag_alloc(nf, f, arg);
4331     if (!q)
4332     return NULL;
4333    
4334     - return inet_frag_intern(nf, q, f, arg);
4335     -}
4336     -
4337     -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
4338     - struct inet_frags *f, void *key,
4339     - unsigned int hash)
4340     -{
4341     - struct inet_frag_bucket *hb;
4342     - struct inet_frag_queue *q;
4343     - int depth = 0;
4344     + mod_timer(&q->timer, jiffies + nf->timeout);
4345    
4346     - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
4347     - inet_frag_schedule_worker(f);
4348     + err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
4349     + f->rhash_params);
4350     + if (err < 0) {
4351     + q->flags |= INET_FRAG_COMPLETE;
4352     + inet_frag_kill(q);
4353     + inet_frag_destroy(q);
4354     return NULL;
4355     }
4356     + return q;
4357     +}
4358    
4359     - if (frag_mem_limit(nf) > nf->low_thresh)
4360     - inet_frag_schedule_worker(f);
4361     -
4362     - hash &= (INETFRAGS_HASHSZ - 1);
4363     - hb = &f->hash[hash];
4364     -
4365     - spin_lock(&hb->chain_lock);
4366     - hlist_for_each_entry(q, &hb->chain, list) {
4367     - if (q->net == nf && f->match(q, key)) {
4368     - refcount_inc(&q->refcnt);
4369     - spin_unlock(&hb->chain_lock);
4370     - return q;
4371     - }
4372     - depth++;
4373     - }
4374     - spin_unlock(&hb->chain_lock);
4375     +/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
4376     +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
4377     +{
4378     + struct inet_frag_queue *fq;
4379    
4380     - if (depth <= INETFRAGS_MAXDEPTH)
4381     - return inet_frag_create(nf, f, key);
4382     + rcu_read_lock();
4383    
4384     - if (inet_frag_may_rebuild(f)) {
4385     - if (!f->rebuild)
4386     - f->rebuild = true;
4387     - inet_frag_schedule_worker(f);
4388     + fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
4389     + if (fq) {
4390     + if (!refcount_inc_not_zero(&fq->refcnt))
4391     + fq = NULL;
4392     + rcu_read_unlock();
4393     + return fq;
4394     }
4395     + rcu_read_unlock();
4396    
4397     - return ERR_PTR(-ENOBUFS);
4398     + return inet_frag_create(nf, key);
4399     }
4400     EXPORT_SYMBOL(inet_frag_find);
4401     -
4402     -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
4403     - const char *prefix)
4404     -{
4405     - static const char msg[] = "inet_frag_find: Fragment hash bucket"
4406     - " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
4407     - ". Dropping fragment.\n";
4408     -
4409     - if (PTR_ERR(q) == -ENOBUFS)
4410     - net_dbg_ratelimited("%s%s", prefix, msg);
4411     -}
4412     -EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
4413     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
4414     index 4cb1befc3949..e7227128df2c 100644
4415     --- a/net/ipv4/ip_fragment.c
4416     +++ b/net/ipv4/ip_fragment.c
4417     @@ -57,27 +57,64 @@
4418     */
4419     static const char ip_frag_cache_name[] = "ip4-frags";
4420    
4421     -struct ipfrag_skb_cb
4422     -{
4423     +/* Use skb->cb to track consecutive/adjacent fragments coming at
4424     + * the end of the queue. Nodes in the rb-tree queue will
4425     + * contain "runs" of one or more adjacent fragments.
4426     + *
4427     + * Invariants:
4428     + * - next_frag is NULL at the tail of a "run";
4429     + * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
4430     + */
4431     +struct ipfrag_skb_cb {
4432     struct inet_skb_parm h;
4433     - int offset;
4434     + struct sk_buff *next_frag;
4435     + int frag_run_len;
4436     };
4437    
4438     -#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4439     +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
4440     +
4441     +static void ip4_frag_init_run(struct sk_buff *skb)
4442     +{
4443     + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
4444     +
4445     + FRAG_CB(skb)->next_frag = NULL;
4446     + FRAG_CB(skb)->frag_run_len = skb->len;
4447     +}
4448     +
4449     +/* Append skb to the last "run". */
4450     +static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
4451     + struct sk_buff *skb)
4452     +{
4453     + RB_CLEAR_NODE(&skb->rbnode);
4454     + FRAG_CB(skb)->next_frag = NULL;
4455     +
4456     + FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
4457     + FRAG_CB(q->fragments_tail)->next_frag = skb;
4458     + q->fragments_tail = skb;
4459     +}
4460     +
4461     +/* Create a new "run" with the skb. */
4462     +static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
4463     +{
4464     + if (q->last_run_head)
4465     + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
4466     + &q->last_run_head->rbnode.rb_right);
4467     + else
4468     + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
4469     + rb_insert_color(&skb->rbnode, &q->rb_fragments);
4470     +
4471     + ip4_frag_init_run(skb);
4472     + q->fragments_tail = skb;
4473     + q->last_run_head = skb;
4474     +}
4475    
4476     /* Describe an entry in the "incomplete datagrams" queue. */
4477     struct ipq {
4478     struct inet_frag_queue q;
4479    
4480     - u32 user;
4481     - __be32 saddr;
4482     - __be32 daddr;
4483     - __be16 id;
4484     - u8 protocol;
4485     u8 ecn; /* RFC3168 support */
4486     u16 max_df_size; /* largest frag with DF set seen */
4487     int iif;
4488     - int vif; /* L3 master device index */
4489     unsigned int rid;
4490     struct inet_peer *peer;
4491     };
4492     @@ -89,49 +126,9 @@ static u8 ip4_frag_ecn(u8 tos)
4493    
4494     static struct inet_frags ip4_frags;
4495    
4496     -int ip_frag_mem(struct net *net)
4497     -{
4498     - return sum_frag_mem_limit(&net->ipv4.frags);
4499     -}
4500     -
4501     -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4502     - struct net_device *dev);
4503     -
4504     -struct ip4_create_arg {
4505     - struct iphdr *iph;
4506     - u32 user;
4507     - int vif;
4508     -};
4509     +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
4510     + struct sk_buff *prev_tail, struct net_device *dev);
4511    
4512     -static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
4513     -{
4514     - net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
4515     - return jhash_3words((__force u32)id << 16 | prot,
4516     - (__force u32)saddr, (__force u32)daddr,
4517     - ip4_frags.rnd);
4518     -}
4519     -
4520     -static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
4521     -{
4522     - const struct ipq *ipq;
4523     -
4524     - ipq = container_of(q, struct ipq, q);
4525     - return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
4526     -}
4527     -
4528     -static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
4529     -{
4530     - const struct ipq *qp;
4531     - const struct ip4_create_arg *arg = a;
4532     -
4533     - qp = container_of(q, struct ipq, q);
4534     - return qp->id == arg->iph->id &&
4535     - qp->saddr == arg->iph->saddr &&
4536     - qp->daddr == arg->iph->daddr &&
4537     - qp->protocol == arg->iph->protocol &&
4538     - qp->user == arg->user &&
4539     - qp->vif == arg->vif;
4540     -}
4541    
4542     static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4543     {
4544     @@ -140,17 +137,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
4545     frags);
4546     struct net *net = container_of(ipv4, struct net, ipv4);
4547    
4548     - const struct ip4_create_arg *arg = a;
4549     + const struct frag_v4_compare_key *key = a;
4550    
4551     - qp->protocol = arg->iph->protocol;
4552     - qp->id = arg->iph->id;
4553     - qp->ecn = ip4_frag_ecn(arg->iph->tos);
4554     - qp->saddr = arg->iph->saddr;
4555     - qp->daddr = arg->iph->daddr;
4556     - qp->vif = arg->vif;
4557     - qp->user = arg->user;
4558     + q->key.v4 = *key;
4559     + qp->ecn = 0;
4560     qp->peer = q->net->max_dist ?
4561     - inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
4562     + inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
4563     NULL;
4564     }
4565    
4566     @@ -168,7 +160,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
4567    
4568     static void ipq_put(struct ipq *ipq)
4569     {
4570     - inet_frag_put(&ipq->q, &ip4_frags);
4571     + inet_frag_put(&ipq->q);
4572     }
4573    
4574     /* Kill ipq entry. It is not destroyed immediately,
4575     @@ -176,7 +168,7 @@ static void ipq_put(struct ipq *ipq)
4576     */
4577     static void ipq_kill(struct ipq *ipq)
4578     {
4579     - inet_frag_kill(&ipq->q, &ip4_frags);
4580     + inet_frag_kill(&ipq->q);
4581     }
4582    
4583     static bool frag_expire_skip_icmp(u32 user)
4584     @@ -191,12 +183,16 @@ static bool frag_expire_skip_icmp(u32 user)
4585     /*
4586     * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
4587     */
4588     -static void ip_expire(unsigned long arg)
4589     +static void ip_expire(struct timer_list *t)
4590     {
4591     - struct ipq *qp;
4592     + struct inet_frag_queue *frag = from_timer(frag, t, timer);
4593     + const struct iphdr *iph;
4594     + struct sk_buff *head = NULL;
4595     struct net *net;
4596     + struct ipq *qp;
4597     + int err;
4598    
4599     - qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
4600     + qp = container_of(frag, struct ipq, q);
4601     net = container_of(qp->q.net, struct net, ipv4.frags);
4602    
4603     rcu_read_lock();
4604     @@ -207,51 +203,65 @@ static void ip_expire(unsigned long arg)
4605    
4606     ipq_kill(qp);
4607     __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
4608     + __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4609    
4610     - if (!inet_frag_evicting(&qp->q)) {
4611     - struct sk_buff *clone, *head = qp->q.fragments;
4612     - const struct iphdr *iph;
4613     - int err;
4614     -
4615     - __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
4616     + if (!(qp->q.flags & INET_FRAG_FIRST_IN))
4617     + goto out;
4618    
4619     - if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
4620     + /* sk_buff::dev and sk_buff::rbnode are unionized. So we
4621     + * pull the head out of the tree in order to be able to
4622     + * deal with head->dev.
4623     + */
4624     + if (qp->q.fragments) {
4625     + head = qp->q.fragments;
4626     + qp->q.fragments = head->next;
4627     + } else {
4628     + head = skb_rb_first(&qp->q.rb_fragments);
4629     + if (!head)
4630     goto out;
4631     + if (FRAG_CB(head)->next_frag)
4632     + rb_replace_node(&head->rbnode,
4633     + &FRAG_CB(head)->next_frag->rbnode,
4634     + &qp->q.rb_fragments);
4635     + else
4636     + rb_erase(&head->rbnode, &qp->q.rb_fragments);
4637     + memset(&head->rbnode, 0, sizeof(head->rbnode));
4638     + barrier();
4639     + }
4640     + if (head == qp->q.fragments_tail)
4641     + qp->q.fragments_tail = NULL;
4642    
4643     - head->dev = dev_get_by_index_rcu(net, qp->iif);
4644     - if (!head->dev)
4645     - goto out;
4646     + sub_frag_mem_limit(qp->q.net, head->truesize);
4647     +
4648     + head->dev = dev_get_by_index_rcu(net, qp->iif);
4649     + if (!head->dev)
4650     + goto out;
4651    
4652    
4653     - /* skb has no dst, perform route lookup again */
4654     - iph = ip_hdr(head);
4655     - err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4656     + /* skb has no dst, perform route lookup again */
4657     + iph = ip_hdr(head);
4658     + err = ip_route_input_noref(head, iph->daddr, iph->saddr,
4659     iph->tos, head->dev);
4660     - if (err)
4661     - goto out;
4662     + if (err)
4663     + goto out;
4664    
4665     - /* Only an end host needs to send an ICMP
4666     - * "Fragment Reassembly Timeout" message, per RFC792.
4667     - */
4668     - if (frag_expire_skip_icmp(qp->user) &&
4669     - (skb_rtable(head)->rt_type != RTN_LOCAL))
4670     - goto out;
4671     + /* Only an end host needs to send an ICMP
4672     + * "Fragment Reassembly Timeout" message, per RFC792.
4673     + */
4674     + if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
4675     + (skb_rtable(head)->rt_type != RTN_LOCAL))
4676     + goto out;
4677    
4678     - clone = skb_clone(head, GFP_ATOMIC);
4679     + spin_unlock(&qp->q.lock);
4680     + icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
4681     + goto out_rcu_unlock;
4682    
4683     - /* Send an ICMP "Fragment Reassembly Timeout" message. */
4684     - if (clone) {
4685     - spin_unlock(&qp->q.lock);
4686     - icmp_send(clone, ICMP_TIME_EXCEEDED,
4687     - ICMP_EXC_FRAGTIME, 0);
4688     - consume_skb(clone);
4689     - goto out_rcu_unlock;
4690     - }
4691     - }
4692     out:
4693     spin_unlock(&qp->q.lock);
4694     out_rcu_unlock:
4695     rcu_read_unlock();
4696     + if (head)
4697     + kfree_skb(head);
4698     ipq_put(qp);
4699     }
4700    
4701     @@ -261,21 +271,20 @@ out_rcu_unlock:
4702     static struct ipq *ip_find(struct net *net, struct iphdr *iph,
4703     u32 user, int vif)
4704     {
4705     + struct frag_v4_compare_key key = {
4706     + .saddr = iph->saddr,
4707     + .daddr = iph->daddr,
4708     + .user = user,
4709     + .vif = vif,
4710     + .id = iph->id,
4711     + .protocol = iph->protocol,
4712     + };
4713     struct inet_frag_queue *q;
4714     - struct ip4_create_arg arg;
4715     - unsigned int hash;
4716     -
4717     - arg.iph = iph;
4718     - arg.user = user;
4719     - arg.vif = vif;
4720    
4721     - hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
4722     -
4723     - q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
4724     - if (IS_ERR_OR_NULL(q)) {
4725     - inet_frag_maybe_warn_overflow(q, pr_fmt());
4726     + q = inet_frag_find(&net->ipv4.frags, &key);
4727     + if (!q)
4728     return NULL;
4729     - }
4730     +
4731     return container_of(q, struct ipq, q);
4732     }
4733    
4734     @@ -295,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
4735     end = atomic_inc_return(&peer->rid);
4736     qp->rid = end;
4737    
4738     - rc = qp->q.fragments && (end - start) > max;
4739     + rc = qp->q.fragments_tail && (end - start) > max;
4740    
4741     if (rc) {
4742     struct net *net;
4743     @@ -309,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
4744    
4745     static int ip_frag_reinit(struct ipq *qp)
4746     {
4747     - struct sk_buff *fp;
4748     unsigned int sum_truesize = 0;
4749    
4750     if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
4751     @@ -317,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
4752     return -ETIMEDOUT;
4753     }
4754    
4755     - fp = qp->q.fragments;
4756     - do {
4757     - struct sk_buff *xp = fp->next;
4758     -
4759     - sum_truesize += fp->truesize;
4760     - kfree_skb(fp);
4761     - fp = xp;
4762     - } while (fp);
4763     + sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
4764     sub_frag_mem_limit(qp->q.net, sum_truesize);
4765    
4766     qp->q.flags = 0;
4767     qp->q.len = 0;
4768     qp->q.meat = 0;
4769     qp->q.fragments = NULL;
4770     + qp->q.rb_fragments = RB_ROOT;
4771     qp->q.fragments_tail = NULL;
4772     + qp->q.last_run_head = NULL;
4773     qp->iif = 0;
4774     qp->ecn = 0;
4775    
4776     @@ -341,7 +344,9 @@ static int ip_frag_reinit(struct ipq *qp)
4777     /* Add new segment to existing queue. */
4778     static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4779     {
4780     - struct sk_buff *prev, *next;
4781     + struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
4782     + struct rb_node **rbn, *parent;
4783     + struct sk_buff *skb1, *prev_tail;
4784     struct net_device *dev;
4785     unsigned int fragsize;
4786     int flags, offset;
4787     @@ -404,99 +409,61 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
4788     if (err)
4789     goto err;
4790    
4791     - /* Find out which fragments are in front and at the back of us
4792     - * in the chain of fragments so far. We must know where to put
4793     - * this fragment, right?
4794     - */
4795     - prev = qp->q.fragments_tail;
4796     - if (!prev || FRAG_CB(prev)->offset < offset) {
4797     - next = NULL;
4798     - goto found;
4799     - }
4800     - prev = NULL;
4801     - for (next = qp->q.fragments; next != NULL; next = next->next) {
4802     - if (FRAG_CB(next)->offset >= offset)
4803     - break; /* bingo! */
4804     - prev = next;
4805     - }
4806     -
4807     -found:
4808     - /* We found where to put this one. Check for overlap with
4809     - * preceding fragment, and, if needed, align things so that
4810     - * any overlaps are eliminated.
4811     + /* Note : skb->rbnode and skb->dev share the same location. */
4812     + dev = skb->dev;
4813     + /* Makes sure compiler wont do silly aliasing games */
4814     + barrier();
4815     +
4816     + /* RFC5722, Section 4, amended by Errata ID : 3089
4817     + * When reassembling an IPv6 datagram, if
4818     + * one or more its constituent fragments is determined to be an
4819     + * overlapping fragment, the entire datagram (and any constituent
4820     + * fragments) MUST be silently discarded.
4821     + *
4822     + * We do the same here for IPv4 (and increment an snmp counter).
4823     */
4824     - if (prev) {
4825     - int i = (FRAG_CB(prev)->offset + prev->len) - offset;
4826    
4827     - if (i > 0) {
4828     - offset += i;
4829     - err = -EINVAL;
4830     - if (end <= offset)
4831     - goto err;
4832     - err = -ENOMEM;
4833     - if (!pskb_pull(skb, i))
4834     - goto err;
4835     - if (skb->ip_summed != CHECKSUM_UNNECESSARY)
4836     - skb->ip_summed = CHECKSUM_NONE;
4837     - }
4838     - }
4839     -
4840     - err = -ENOMEM;
4841     -
4842     - while (next && FRAG_CB(next)->offset < end) {
4843     - int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
4844     -
4845     - if (i < next->len) {
4846     - int delta = -next->truesize;
4847     -
4848     - /* Eat head of the next overlapped fragment
4849     - * and leave the loop. The next ones cannot overlap.
4850     - */
4851     - if (!pskb_pull(next, i))
4852     - goto err;
4853     - delta += next->truesize;
4854     - if (delta)
4855     - add_frag_mem_limit(qp->q.net, delta);
4856     - FRAG_CB(next)->offset += i;
4857     - qp->q.meat -= i;
4858     - if (next->ip_summed != CHECKSUM_UNNECESSARY)
4859     - next->ip_summed = CHECKSUM_NONE;
4860     - break;
4861     - } else {
4862     - struct sk_buff *free_it = next;
4863     -
4864     - /* Old fragment is completely overridden with
4865     - * new one drop it.
4866     - */
4867     - next = next->next;
4868     -
4869     - if (prev)
4870     - prev->next = next;
4871     - else
4872     - qp->q.fragments = next;
4873     -
4874     - qp->q.meat -= free_it->len;
4875     - sub_frag_mem_limit(qp->q.net, free_it->truesize);
4876     - kfree_skb(free_it);
4877     - }
4878     + /* Find out where to put this fragment. */
4879     + prev_tail = qp->q.fragments_tail;
4880     + if (!prev_tail)
4881     + ip4_frag_create_run(&qp->q, skb); /* First fragment. */
4882     + else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
4883     + /* This is the common case: skb goes to the end. */
4884     + /* Detect and discard overlaps. */
4885     + if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
4886     + goto discard_qp;
4887     + if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
4888     + ip4_frag_append_to_last_run(&qp->q, skb);
4889     + else
4890     + ip4_frag_create_run(&qp->q, skb);
4891     + } else {
4892     + /* Binary search. Note that skb can become the first fragment,
4893     + * but not the last (covered above).
4894     + */
4895     + rbn = &qp->q.rb_fragments.rb_node;
4896     + do {
4897     + parent = *rbn;
4898     + skb1 = rb_to_skb(parent);
4899     + if (end <= skb1->ip_defrag_offset)
4900     + rbn = &parent->rb_left;
4901     + else if (offset >= skb1->ip_defrag_offset +
4902     + FRAG_CB(skb1)->frag_run_len)
4903     + rbn = &parent->rb_right;
4904     + else /* Found an overlap with skb1. */
4905     + goto discard_qp;
4906     + } while (*rbn);
4907     + /* Here we have parent properly set, and rbn pointing to
4908     + * one of its NULL left/right children. Insert skb.
4909     + */
4910     + ip4_frag_init_run(skb);
4911     + rb_link_node(&skb->rbnode, parent, rbn);
4912     + rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
4913     }
4914    
4915     - FRAG_CB(skb)->offset = offset;
4916     -
4917     - /* Insert this fragment in the chain of fragments. */
4918     - skb->next = next;
4919     - if (!next)
4920     - qp->q.fragments_tail = skb;
4921     - if (prev)
4922     - prev->next = skb;
4923     - else
4924     - qp->q.fragments = skb;
4925     -
4926     - dev = skb->dev;
4927     - if (dev) {
4928     + if (dev)
4929     qp->iif = dev->ifindex;
4930     - skb->dev = NULL;
4931     - }
4932     + skb->ip_defrag_offset = offset;
4933     +
4934     qp->q.stamp = skb->tstamp;
4935     qp->q.meat += skb->len;
4936     qp->ecn |= ecn;
4937     @@ -518,7 +485,7 @@ found:
4938     unsigned long orefdst = skb->_skb_refdst;
4939    
4940     skb->_skb_refdst = 0UL;
4941     - err = ip_frag_reasm(qp, prev, dev);
4942     + err = ip_frag_reasm(qp, skb, prev_tail, dev);
4943     skb->_skb_refdst = orefdst;
4944     return err;
4945     }
4946     @@ -526,20 +493,24 @@ found:
4947     skb_dst_drop(skb);
4948     return -EINPROGRESS;
4949    
4950     +discard_qp:
4951     + inet_frag_kill(&qp->q);
4952     + err = -EINVAL;
4953     + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
4954     err:
4955     kfree_skb(skb);
4956     return err;
4957     }
4958    
4959     -
4960     /* Build a new IP datagram from all its fragments. */
4961     -
4962     -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4963     - struct net_device *dev)
4964     +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
4965     + struct sk_buff *prev_tail, struct net_device *dev)
4966     {
4967     struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
4968     struct iphdr *iph;
4969     - struct sk_buff *fp, *head = qp->q.fragments;
4970     + struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
4971     + struct sk_buff **nextp; /* To build frag_list. */
4972     + struct rb_node *rbn;
4973     int len;
4974     int ihlen;
4975     int err;
4976     @@ -553,26 +524,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
4977     goto out_fail;
4978     }
4979     /* Make the one we just received the head. */
4980     - if (prev) {
4981     - head = prev->next;
4982     - fp = skb_clone(head, GFP_ATOMIC);
4983     + if (head != skb) {
4984     + fp = skb_clone(skb, GFP_ATOMIC);
4985     if (!fp)
4986     goto out_nomem;
4987     -
4988     - fp->next = head->next;
4989     - if (!fp->next)
4990     + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
4991     + if (RB_EMPTY_NODE(&skb->rbnode))
4992     + FRAG_CB(prev_tail)->next_frag = fp;
4993     + else
4994     + rb_replace_node(&skb->rbnode, &fp->rbnode,
4995     + &qp->q.rb_fragments);
4996     + if (qp->q.fragments_tail == skb)
4997     qp->q.fragments_tail = fp;
4998     - prev->next = fp;
4999     -
5000     - skb_morph(head, qp->q.fragments);
5001     - head->next = qp->q.fragments->next;
5002     -
5003     - consume_skb(qp->q.fragments);
5004     - qp->q.fragments = head;
5005     + skb_morph(skb, head);
5006     + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
5007     + rb_replace_node(&head->rbnode, &skb->rbnode,
5008     + &qp->q.rb_fragments);
5009     + consume_skb(head);
5010     + head = skb;
5011     }
5012    
5013     - WARN_ON(!head);
5014     - WARN_ON(FRAG_CB(head)->offset != 0);
5015     + WARN_ON(head->ip_defrag_offset != 0);
5016    
5017     /* Allocate a new buffer for the datagram. */
5018     ihlen = ip_hdrlen(head);
5019     @@ -596,35 +568,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5020     clone = alloc_skb(0, GFP_ATOMIC);
5021     if (!clone)
5022     goto out_nomem;
5023     - clone->next = head->next;
5024     - head->next = clone;
5025     skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
5026     skb_frag_list_init(head);
5027     for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
5028     plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
5029     clone->len = clone->data_len = head->data_len - plen;
5030     - head->data_len -= clone->len;
5031     - head->len -= clone->len;
5032     + head->truesize += clone->truesize;
5033     clone->csum = 0;
5034     clone->ip_summed = head->ip_summed;
5035     add_frag_mem_limit(qp->q.net, clone->truesize);
5036     + skb_shinfo(head)->frag_list = clone;
5037     + nextp = &clone->next;
5038     + } else {
5039     + nextp = &skb_shinfo(head)->frag_list;
5040     }
5041    
5042     - skb_shinfo(head)->frag_list = head->next;
5043     skb_push(head, head->data - skb_network_header(head));
5044    
5045     - for (fp=head->next; fp; fp = fp->next) {
5046     - head->data_len += fp->len;
5047     - head->len += fp->len;
5048     - if (head->ip_summed != fp->ip_summed)
5049     - head->ip_summed = CHECKSUM_NONE;
5050     - else if (head->ip_summed == CHECKSUM_COMPLETE)
5051     - head->csum = csum_add(head->csum, fp->csum);
5052     - head->truesize += fp->truesize;
5053     + /* Traverse the tree in order, to build frag_list. */
5054     + fp = FRAG_CB(head)->next_frag;
5055     + rbn = rb_next(&head->rbnode);
5056     + rb_erase(&head->rbnode, &qp->q.rb_fragments);
5057     + while (rbn || fp) {
5058     + /* fp points to the next sk_buff in the current run;
5059     + * rbn points to the next run.
5060     + */
5061     + /* Go through the current run. */
5062     + while (fp) {
5063     + *nextp = fp;
5064     + nextp = &fp->next;
5065     + fp->prev = NULL;
5066     + memset(&fp->rbnode, 0, sizeof(fp->rbnode));
5067     + fp->sk = NULL;
5068     + head->data_len += fp->len;
5069     + head->len += fp->len;
5070     + if (head->ip_summed != fp->ip_summed)
5071     + head->ip_summed = CHECKSUM_NONE;
5072     + else if (head->ip_summed == CHECKSUM_COMPLETE)
5073     + head->csum = csum_add(head->csum, fp->csum);
5074     + head->truesize += fp->truesize;
5075     + fp = FRAG_CB(fp)->next_frag;
5076     + }
5077     + /* Move to the next run. */
5078     + if (rbn) {
5079     + struct rb_node *rbnext = rb_next(rbn);
5080     +
5081     + fp = rb_to_skb(rbn);
5082     + rb_erase(rbn, &qp->q.rb_fragments);
5083     + rbn = rbnext;
5084     + }
5085     }
5086     sub_frag_mem_limit(qp->q.net, head->truesize);
5087    
5088     + *nextp = NULL;
5089     head->next = NULL;
5090     + head->prev = NULL;
5091     head->dev = dev;
5092     head->tstamp = qp->q.stamp;
5093     IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
5094     @@ -652,7 +650,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
5095    
5096     __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
5097     qp->q.fragments = NULL;
5098     + qp->q.rb_fragments = RB_ROOT;
5099     qp->q.fragments_tail = NULL;
5100     + qp->q.last_run_head = NULL;
5101     return 0;
5102    
5103     out_nomem:
5104     @@ -660,7 +660,7 @@ out_nomem:
5105     err = -ENOMEM;
5106     goto out_fail;
5107     out_oversize:
5108     - net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
5109     + net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
5110     out_fail:
5111     __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
5112     return err;
5113     @@ -734,25 +734,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
5114     }
5115     EXPORT_SYMBOL(ip_check_defrag);
5116    
5117     +unsigned int inet_frag_rbtree_purge(struct rb_root *root)
5118     +{
5119     + struct rb_node *p = rb_first(root);
5120     + unsigned int sum = 0;
5121     +
5122     + while (p) {
5123     + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
5124     +
5125     + p = rb_next(p);
5126     + rb_erase(&skb->rbnode, root);
5127     + while (skb) {
5128     + struct sk_buff *next = FRAG_CB(skb)->next_frag;
5129     +
5130     + sum += skb->truesize;
5131     + kfree_skb(skb);
5132     + skb = next;
5133     + }
5134     + }
5135     + return sum;
5136     +}
5137     +EXPORT_SYMBOL(inet_frag_rbtree_purge);
5138     +
5139     #ifdef CONFIG_SYSCTL
5140     -static int zero;
5141     +static int dist_min;
5142    
5143     static struct ctl_table ip4_frags_ns_ctl_table[] = {
5144     {
5145     .procname = "ipfrag_high_thresh",
5146     .data = &init_net.ipv4.frags.high_thresh,
5147     - .maxlen = sizeof(int),
5148     + .maxlen = sizeof(unsigned long),
5149     .mode = 0644,
5150     - .proc_handler = proc_dointvec_minmax,
5151     + .proc_handler = proc_doulongvec_minmax,
5152     .extra1 = &init_net.ipv4.frags.low_thresh
5153     },
5154     {
5155     .procname = "ipfrag_low_thresh",
5156     .data = &init_net.ipv4.frags.low_thresh,
5157     - .maxlen = sizeof(int),
5158     + .maxlen = sizeof(unsigned long),
5159     .mode = 0644,
5160     - .proc_handler = proc_dointvec_minmax,
5161     - .extra1 = &zero,
5162     + .proc_handler = proc_doulongvec_minmax,
5163     .extra2 = &init_net.ipv4.frags.high_thresh
5164     },
5165     {
5166     @@ -768,7 +789,7 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
5167     .maxlen = sizeof(int),
5168     .mode = 0644,
5169     .proc_handler = proc_dointvec_minmax,
5170     - .extra1 = &zero
5171     + .extra1 = &dist_min,
5172     },
5173     { }
5174     };
5175     @@ -850,6 +871,8 @@ static void __init ip4_frags_ctl_register(void)
5176    
5177     static int __net_init ipv4_frags_init_net(struct net *net)
5178     {
5179     + int res;
5180     +
5181     /* Fragment cache limits.
5182     *
5183     * The fragment memory accounting code, (tries to) account for
5184     @@ -874,16 +897,21 @@ static int __net_init ipv4_frags_init_net(struct net *net)
5185     net->ipv4.frags.timeout = IP_FRAG_TIME;
5186    
5187     net->ipv4.frags.max_dist = 64;
5188     -
5189     - inet_frags_init_net(&net->ipv4.frags);
5190     -
5191     - return ip4_frags_ns_ctl_register(net);
5192     + net->ipv4.frags.f = &ip4_frags;
5193     +
5194     + res = inet_frags_init_net(&net->ipv4.frags);
5195     + if (res < 0)
5196     + return res;
5197     + res = ip4_frags_ns_ctl_register(net);
5198     + if (res < 0)
5199     + inet_frags_exit_net(&net->ipv4.frags);
5200     + return res;
5201     }
5202    
5203     static void __net_exit ipv4_frags_exit_net(struct net *net)
5204     {
5205     ip4_frags_ns_ctl_unregister(net);
5206     - inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
5207     + inet_frags_exit_net(&net->ipv4.frags);
5208     }
5209    
5210     static struct pernet_operations ip4_frags_ops = {
5211     @@ -891,17 +919,49 @@ static struct pernet_operations ip4_frags_ops = {
5212     .exit = ipv4_frags_exit_net,
5213     };
5214    
5215     +
5216     +static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
5217     +{
5218     + return jhash2(data,
5219     + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5220     +}
5221     +
5222     +static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
5223     +{
5224     + const struct inet_frag_queue *fq = data;
5225     +
5226     + return jhash2((const u32 *)&fq->key.v4,
5227     + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
5228     +}
5229     +
5230     +static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
5231     +{
5232     + const struct frag_v4_compare_key *key = arg->key;
5233     + const struct inet_frag_queue *fq = ptr;
5234     +
5235     + return !!memcmp(&fq->key, key, sizeof(*key));
5236     +}
5237     +
5238     +static const struct rhashtable_params ip4_rhash_params = {
5239     + .head_offset = offsetof(struct inet_frag_queue, node),
5240     + .key_offset = offsetof(struct inet_frag_queue, key),
5241     + .key_len = sizeof(struct frag_v4_compare_key),
5242     + .hashfn = ip4_key_hashfn,
5243     + .obj_hashfn = ip4_obj_hashfn,
5244     + .obj_cmpfn = ip4_obj_cmpfn,
5245     + .automatic_shrinking = true,
5246     +};
5247     +
5248     void __init ipfrag_init(void)
5249     {
5250     - ip4_frags_ctl_register();
5251     - register_pernet_subsys(&ip4_frags_ops);
5252     - ip4_frags.hashfn = ip4_hashfn;
5253     ip4_frags.constructor = ip4_frag_init;
5254     ip4_frags.destructor = ip4_frag_free;
5255     ip4_frags.qsize = sizeof(struct ipq);
5256     - ip4_frags.match = ip4_frag_match;
5257     ip4_frags.frag_expire = ip_expire;
5258     ip4_frags.frags_cache_name = ip_frag_cache_name;
5259     + ip4_frags.rhash_params = ip4_rhash_params;
5260     if (inet_frags_init(&ip4_frags))
5261     panic("IP: failed to allocate ip4_frags cache\n");
5262     + ip4_frags_ctl_register();
5263     + register_pernet_subsys(&ip4_frags_ops);
5264     }
5265     diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
5266     index 127153f1ed8a..3fbf688a1943 100644
5267     --- a/net/ipv4/proc.c
5268     +++ b/net/ipv4/proc.c
5269     @@ -54,7 +54,6 @@
5270     static int sockstat_seq_show(struct seq_file *seq, void *v)
5271     {
5272     struct net *net = seq->private;
5273     - unsigned int frag_mem;
5274     int orphans, sockets;
5275    
5276     orphans = percpu_counter_sum_positive(&tcp_orphan_count);
5277     @@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
5278     sock_prot_inuse_get(net, &udplite_prot));
5279     seq_printf(seq, "RAW: inuse %d\n",
5280     sock_prot_inuse_get(net, &raw_prot));
5281     - frag_mem = ip_frag_mem(net);
5282     - seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
5283     + seq_printf(seq, "FRAG: inuse %u memory %lu\n",
5284     + atomic_read(&net->ipv4.frags.rhashtable.nelems),
5285     + frag_mem_limit(&net->ipv4.frags));
5286     return 0;
5287     }
5288    
5289     @@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
5290     SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
5291     SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
5292     SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
5293     + SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
5294     SNMP_MIB_SENTINEL
5295     };
5296    
5297     diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
5298     index fbbeda647774..0567edb76522 100644
5299     --- a/net/ipv4/tcp_fastopen.c
5300     +++ b/net/ipv4/tcp_fastopen.c
5301     @@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
5302     void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
5303     {
5304     struct tcp_sock *tp = tcp_sk(sk);
5305     - struct rb_node *p;
5306     - struct sk_buff *skb;
5307     struct dst_entry *dst;
5308     + struct sk_buff *skb;
5309    
5310     if (!tp->syn_fastopen)
5311     return;
5312    
5313     if (!tp->data_segs_in) {
5314     - p = rb_first(&tp->out_of_order_queue);
5315     - if (p && !rb_next(p)) {
5316     - skb = rb_entry(p, struct sk_buff, rbnode);
5317     + skb = skb_rb_first(&tp->out_of_order_queue);
5318     + if (skb && !skb_rb_next(skb)) {
5319     if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
5320     tcp_fastopen_active_disable(sk);
5321     return;
5322     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5323     index bdabd748f4bc..991f382afc1b 100644
5324     --- a/net/ipv4/tcp_input.c
5325     +++ b/net/ipv4/tcp_input.c
5326     @@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
5327    
5328     p = rb_first(&tp->out_of_order_queue);
5329     while (p) {
5330     - skb = rb_entry(p, struct sk_buff, rbnode);
5331     + skb = rb_to_skb(p);
5332     if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
5333     break;
5334    
5335     @@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
5336     static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5337     {
5338     struct tcp_sock *tp = tcp_sk(sk);
5339     - struct rb_node **p, *q, *parent;
5340     + struct rb_node **p, *parent;
5341     struct sk_buff *skb1;
5342     u32 seq, end_seq;
5343     bool fragstolen;
5344     @@ -4503,7 +4503,7 @@ coalesce_done:
5345     parent = NULL;
5346     while (*p) {
5347     parent = *p;
5348     - skb1 = rb_entry(parent, struct sk_buff, rbnode);
5349     + skb1 = rb_to_skb(parent);
5350     if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5351     p = &parent->rb_left;
5352     continue;
5353     @@ -4548,9 +4548,7 @@ insert:
5354    
5355     merge_right:
5356     /* Remove other segments covered by skb. */
5357     - while ((q = rb_next(&skb->rbnode)) != NULL) {
5358     - skb1 = rb_entry(q, struct sk_buff, rbnode);
5359     -
5360     + while ((skb1 = skb_rb_next(skb)) != NULL) {
5361     if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5362     break;
5363     if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5364     @@ -4565,7 +4563,7 @@ merge_right:
5365     tcp_drop(sk, skb1);
5366     }
5367     /* If there is no skb after us, we are the last_skb ! */
5368     - if (!q)
5369     + if (!skb1)
5370     tp->ooo_last_skb = skb;
5371    
5372     add_sack:
5373     @@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
5374     if (list)
5375     return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5376    
5377     - return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
5378     + return skb_rb_next(skb);
5379     }
5380    
5381     static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5382     @@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
5383    
5384     while (*p) {
5385     parent = *p;
5386     - skb1 = rb_entry(parent, struct sk_buff, rbnode);
5387     + skb1 = rb_to_skb(parent);
5388     if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
5389     p = &parent->rb_left;
5390     else
5391     @@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
5392     struct tcp_sock *tp = tcp_sk(sk);
5393     u32 range_truesize, sum_tiny = 0;
5394     struct sk_buff *skb, *head;
5395     - struct rb_node *p;
5396     u32 start, end;
5397    
5398     - p = rb_first(&tp->out_of_order_queue);
5399     - skb = rb_entry_safe(p, struct sk_buff, rbnode);
5400     + skb = skb_rb_first(&tp->out_of_order_queue);
5401     new_range:
5402     if (!skb) {
5403     - p = rb_last(&tp->out_of_order_queue);
5404     - /* Note: This is possible p is NULL here. We do not
5405     - * use rb_entry_safe(), as ooo_last_skb is valid only
5406     - * if rbtree is not empty.
5407     - */
5408     - tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
5409     + tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5410     return;
5411     }
5412     start = TCP_SKB_CB(skb)->seq;
5413     @@ -4918,7 +4909,7 @@ new_range:
5414     range_truesize = skb->truesize;
5415    
5416     for (head = skb;;) {
5417     - skb = tcp_skb_next(skb, NULL);
5418     + skb = skb_rb_next(skb);
5419    
5420     /* Range is terminated when we see a gap or when
5421     * we are at the queue end.
5422     @@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5423     prev = rb_prev(node);
5424     rb_erase(node, &tp->out_of_order_queue);
5425     goal -= rb_to_skb(node)->truesize;
5426     - tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
5427     + tcp_drop(sk, rb_to_skb(node));
5428     if (!prev || goal <= 0) {
5429     sk_mem_reclaim(sk);
5430     if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5431     @@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
5432     }
5433     node = prev;
5434     } while (node);
5435     - tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
5436     + tp->ooo_last_skb = rb_to_skb(prev);
5437    
5438     /* Reset SACK state. A conforming SACK implementation will
5439     * do the same at a timeout based retransmit. When a connection
5440     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
5441     index ee33a6743f3b..2ed8536e10b6 100644
5442     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
5443     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
5444     @@ -63,7 +63,6 @@ struct nf_ct_frag6_skb_cb
5445     static struct inet_frags nf_frags;
5446    
5447     #ifdef CONFIG_SYSCTL
5448     -static int zero;
5449    
5450     static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5451     {
5452     @@ -76,18 +75,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
5453     {
5454     .procname = "nf_conntrack_frag6_low_thresh",
5455     .data = &init_net.nf_frag.frags.low_thresh,
5456     - .maxlen = sizeof(unsigned int),
5457     + .maxlen = sizeof(unsigned long),
5458     .mode = 0644,
5459     - .proc_handler = proc_dointvec_minmax,
5460     - .extra1 = &zero,
5461     + .proc_handler = proc_doulongvec_minmax,
5462     .extra2 = &init_net.nf_frag.frags.high_thresh
5463     },
5464     {
5465     .procname = "nf_conntrack_frag6_high_thresh",
5466     .data = &init_net.nf_frag.frags.high_thresh,
5467     - .maxlen = sizeof(unsigned int),
5468     + .maxlen = sizeof(unsigned long),
5469     .mode = 0644,
5470     - .proc_handler = proc_dointvec_minmax,
5471     + .proc_handler = proc_doulongvec_minmax,
5472     .extra1 = &init_net.nf_frag.frags.low_thresh
5473     },
5474     { }
5475     @@ -152,59 +150,35 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
5476     return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
5477     }
5478    
5479     -static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
5480     - const struct in6_addr *daddr)
5481     -{
5482     - net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
5483     - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5484     - (__force u32)id, nf_frags.rnd);
5485     -}
5486     -
5487     -
5488     -static unsigned int nf_hashfn(const struct inet_frag_queue *q)
5489     -{
5490     - const struct frag_queue *nq;
5491     -
5492     - nq = container_of(q, struct frag_queue, q);
5493     - return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
5494     -}
5495     -
5496     -static void nf_ct_frag6_expire(unsigned long data)
5497     +static void nf_ct_frag6_expire(struct timer_list *t)
5498     {
5499     + struct inet_frag_queue *frag = from_timer(frag, t, timer);
5500     struct frag_queue *fq;
5501     struct net *net;
5502    
5503     - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5504     + fq = container_of(frag, struct frag_queue, q);
5505     net = container_of(fq->q.net, struct net, nf_frag.frags);
5506    
5507     - ip6_expire_frag_queue(net, fq, &nf_frags);
5508     + ip6_expire_frag_queue(net, fq);
5509     }
5510    
5511     /* Creation primitives. */
5512     -static inline struct frag_queue *fq_find(struct net *net, __be32 id,
5513     - u32 user, struct in6_addr *src,
5514     - struct in6_addr *dst, int iif, u8 ecn)
5515     +static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
5516     + const struct ipv6hdr *hdr, int iif)
5517     {
5518     + struct frag_v6_compare_key key = {
5519     + .id = id,
5520     + .saddr = hdr->saddr,
5521     + .daddr = hdr->daddr,
5522     + .user = user,
5523     + .iif = iif,
5524     + };
5525     struct inet_frag_queue *q;
5526     - struct ip6_create_arg arg;
5527     - unsigned int hash;
5528     -
5529     - arg.id = id;
5530     - arg.user = user;
5531     - arg.src = src;
5532     - arg.dst = dst;
5533     - arg.iif = iif;
5534     - arg.ecn = ecn;
5535     -
5536     - local_bh_disable();
5537     - hash = nf_hash_frag(id, src, dst);
5538     -
5539     - q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
5540     - local_bh_enable();
5541     - if (IS_ERR_OR_NULL(q)) {
5542     - inet_frag_maybe_warn_overflow(q, pr_fmt());
5543     +
5544     + q = inet_frag_find(&net->nf_frag.frags, &key);
5545     + if (!q)
5546     return NULL;
5547     - }
5548     +
5549     return container_of(q, struct frag_queue, q);
5550     }
5551    
5552     @@ -263,7 +237,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
5553     * this case. -DaveM
5554     */
5555     pr_debug("end of fragment not rounded to 8 bytes.\n");
5556     - inet_frag_kill(&fq->q, &nf_frags);
5557     + inet_frag_kill(&fq->q);
5558     return -EPROTO;
5559     }
5560     if (end > fq->q.len) {
5561     @@ -356,7 +330,7 @@ found:
5562     return 0;
5563    
5564     discard_fq:
5565     - inet_frag_kill(&fq->q, &nf_frags);
5566     + inet_frag_kill(&fq->q);
5567     err:
5568     return -EINVAL;
5569     }
5570     @@ -378,7 +352,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5571     int payload_len;
5572     u8 ecn;
5573    
5574     - inet_frag_kill(&fq->q, &nf_frags);
5575     + inet_frag_kill(&fq->q);
5576    
5577     WARN_ON(head == NULL);
5578     WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
5579     @@ -479,6 +453,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5580     else if (head->ip_summed == CHECKSUM_COMPLETE)
5581     head->csum = csum_add(head->csum, fp->csum);
5582     head->truesize += fp->truesize;
5583     + fp->sk = NULL;
5584     }
5585     sub_frag_mem_limit(fq->q.net, head->truesize);
5586    
5587     @@ -497,6 +472,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
5588     head->csum);
5589    
5590     fq->q.fragments = NULL;
5591     + fq->q.rb_fragments = RB_ROOT;
5592     fq->q.fragments_tail = NULL;
5593    
5594     return true;
5595     @@ -591,9 +567,13 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5596     hdr = ipv6_hdr(skb);
5597     fhdr = (struct frag_hdr *)skb_transport_header(skb);
5598    
5599     + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5600     + fhdr->frag_off & htons(IP6_MF))
5601     + return -EINVAL;
5602     +
5603     skb_orphan(skb);
5604     - fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
5605     - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5606     + fq = fq_find(net, fhdr->identification, user, hdr,
5607     + skb->dev ? skb->dev->ifindex : 0);
5608     if (fq == NULL) {
5609     pr_debug("Can't find and can't create new queue\n");
5610     return -ENOMEM;
5611     @@ -623,25 +603,33 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
5612    
5613     out_unlock:
5614     spin_unlock_bh(&fq->q.lock);
5615     - inet_frag_put(&fq->q, &nf_frags);
5616     + inet_frag_put(&fq->q);
5617     return ret;
5618     }
5619     EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
5620    
5621     static int nf_ct_net_init(struct net *net)
5622     {
5623     + int res;
5624     +
5625     net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
5626     net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
5627     net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
5628     - inet_frags_init_net(&net->nf_frag.frags);
5629     -
5630     - return nf_ct_frag6_sysctl_register(net);
5631     + net->nf_frag.frags.f = &nf_frags;
5632     +
5633     + res = inet_frags_init_net(&net->nf_frag.frags);
5634     + if (res < 0)
5635     + return res;
5636     + res = nf_ct_frag6_sysctl_register(net);
5637     + if (res < 0)
5638     + inet_frags_exit_net(&net->nf_frag.frags);
5639     + return res;
5640     }
5641    
5642     static void nf_ct_net_exit(struct net *net)
5643     {
5644     nf_ct_frags6_sysctl_unregister(net);
5645     - inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
5646     + inet_frags_exit_net(&net->nf_frag.frags);
5647     }
5648    
5649     static struct pernet_operations nf_ct_net_ops = {
5650     @@ -653,13 +641,12 @@ int nf_ct_frag6_init(void)
5651     {
5652     int ret = 0;
5653    
5654     - nf_frags.hashfn = nf_hashfn;
5655     nf_frags.constructor = ip6_frag_init;
5656     nf_frags.destructor = NULL;
5657     nf_frags.qsize = sizeof(struct frag_queue);
5658     - nf_frags.match = ip6_frag_match;
5659     nf_frags.frag_expire = nf_ct_frag6_expire;
5660     nf_frags.frags_cache_name = nf_frags_cache_name;
5661     + nf_frags.rhash_params = ip6_rhash_params;
5662     ret = inet_frags_init(&nf_frags);
5663     if (ret)
5664     goto out;
5665     diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
5666     index e88bcb8ff0fd..dc04c024986c 100644
5667     --- a/net/ipv6/proc.c
5668     +++ b/net/ipv6/proc.c
5669     @@ -38,7 +38,6 @@
5670     static int sockstat6_seq_show(struct seq_file *seq, void *v)
5671     {
5672     struct net *net = seq->private;
5673     - unsigned int frag_mem = ip6_frag_mem(net);
5674    
5675     seq_printf(seq, "TCP6: inuse %d\n",
5676     sock_prot_inuse_get(net, &tcpv6_prot));
5677     @@ -48,7 +47,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
5678     sock_prot_inuse_get(net, &udplitev6_prot));
5679     seq_printf(seq, "RAW6: inuse %d\n",
5680     sock_prot_inuse_get(net, &rawv6_prot));
5681     - seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
5682     + seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
5683     + atomic_read(&net->ipv6.frags.rhashtable.nelems),
5684     + frag_mem_limit(&net->ipv6.frags));
5685     return 0;
5686     }
5687    
5688     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
5689     index 846012eae526..ede0061b6f5d 100644
5690     --- a/net/ipv6/reassembly.c
5691     +++ b/net/ipv6/reassembly.c
5692     @@ -79,130 +79,93 @@ static struct inet_frags ip6_frags;
5693     static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5694     struct net_device *dev);
5695    
5696     -/*
5697     - * callers should be careful not to use the hash value outside the ipfrag_lock
5698     - * as doing so could race with ipfrag_hash_rnd being recalculated.
5699     - */
5700     -static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
5701     - const struct in6_addr *daddr)
5702     -{
5703     - net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
5704     - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
5705     - (__force u32)id, ip6_frags.rnd);
5706     -}
5707     -
5708     -static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
5709     -{
5710     - const struct frag_queue *fq;
5711     -
5712     - fq = container_of(q, struct frag_queue, q);
5713     - return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
5714     -}
5715     -
5716     -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
5717     -{
5718     - const struct frag_queue *fq;
5719     - const struct ip6_create_arg *arg = a;
5720     -
5721     - fq = container_of(q, struct frag_queue, q);
5722     - return fq->id == arg->id &&
5723     - fq->user == arg->user &&
5724     - ipv6_addr_equal(&fq->saddr, arg->src) &&
5725     - ipv6_addr_equal(&fq->daddr, arg->dst) &&
5726     - (arg->iif == fq->iif ||
5727     - !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
5728     - IPV6_ADDR_LINKLOCAL)));
5729     -}
5730     -EXPORT_SYMBOL(ip6_frag_match);
5731     -
5732     void ip6_frag_init(struct inet_frag_queue *q, const void *a)
5733     {
5734     struct frag_queue *fq = container_of(q, struct frag_queue, q);
5735     - const struct ip6_create_arg *arg = a;
5736     + const struct frag_v6_compare_key *key = a;
5737    
5738     - fq->id = arg->id;
5739     - fq->user = arg->user;
5740     - fq->saddr = *arg->src;
5741     - fq->daddr = *arg->dst;
5742     - fq->ecn = arg->ecn;
5743     + q->key.v6 = *key;
5744     + fq->ecn = 0;
5745     }
5746     EXPORT_SYMBOL(ip6_frag_init);
5747    
5748     -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
5749     - struct inet_frags *frags)
5750     +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
5751     {
5752     struct net_device *dev = NULL;
5753     + struct sk_buff *head;
5754    
5755     + rcu_read_lock();
5756     spin_lock(&fq->q.lock);
5757    
5758     if (fq->q.flags & INET_FRAG_COMPLETE)
5759     goto out;
5760    
5761     - inet_frag_kill(&fq->q, frags);
5762     + inet_frag_kill(&fq->q);
5763    
5764     - rcu_read_lock();
5765     dev = dev_get_by_index_rcu(net, fq->iif);
5766     if (!dev)
5767     - goto out_rcu_unlock;
5768     + goto out;
5769    
5770     __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
5771     -
5772     - if (inet_frag_evicting(&fq->q))
5773     - goto out_rcu_unlock;
5774     -
5775     __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
5776    
5777     /* Don't send error if the first segment did not arrive. */
5778     - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
5779     - goto out_rcu_unlock;
5780     + head = fq->q.fragments;
5781     + if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
5782     + goto out;
5783    
5784     /* But use as source device on which LAST ARRIVED
5785     * segment was received. And do not use fq->dev
5786     * pointer directly, device might already disappeared.
5787     */
5788     - fq->q.fragments->dev = dev;
5789     - icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5790     -out_rcu_unlock:
5791     - rcu_read_unlock();
5792     + head->dev = dev;
5793     + skb_get(head);
5794     + spin_unlock(&fq->q.lock);
5795     +
5796     + icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
5797     + kfree_skb(head);
5798     + goto out_rcu_unlock;
5799     +
5800     out:
5801     spin_unlock(&fq->q.lock);
5802     - inet_frag_put(&fq->q, frags);
5803     +out_rcu_unlock:
5804     + rcu_read_unlock();
5805     + inet_frag_put(&fq->q);
5806     }
5807     EXPORT_SYMBOL(ip6_expire_frag_queue);
5808    
5809     -static void ip6_frag_expire(unsigned long data)
5810     +static void ip6_frag_expire(struct timer_list *t)
5811     {
5812     + struct inet_frag_queue *frag = from_timer(frag, t, timer);
5813     struct frag_queue *fq;
5814     struct net *net;
5815    
5816     - fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
5817     + fq = container_of(frag, struct frag_queue, q);
5818     net = container_of(fq->q.net, struct net, ipv6.frags);
5819    
5820     - ip6_expire_frag_queue(net, fq, &ip6_frags);
5821     + ip6_expire_frag_queue(net, fq);
5822     }
5823    
5824     static struct frag_queue *
5825     -fq_find(struct net *net, __be32 id, const struct in6_addr *src,
5826     - const struct in6_addr *dst, int iif, u8 ecn)
5827     +fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
5828     {
5829     + struct frag_v6_compare_key key = {
5830     + .id = id,
5831     + .saddr = hdr->saddr,
5832     + .daddr = hdr->daddr,
5833     + .user = IP6_DEFRAG_LOCAL_DELIVER,
5834     + .iif = iif,
5835     + };
5836     struct inet_frag_queue *q;
5837     - struct ip6_create_arg arg;
5838     - unsigned int hash;
5839    
5840     - arg.id = id;
5841     - arg.user = IP6_DEFRAG_LOCAL_DELIVER;
5842     - arg.src = src;
5843     - arg.dst = dst;
5844     - arg.iif = iif;
5845     - arg.ecn = ecn;
5846     + if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
5847     + IPV6_ADDR_LINKLOCAL)))
5848     + key.iif = 0;
5849    
5850     - hash = inet6_hash_frag(id, src, dst);
5851     -
5852     - q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
5853     - if (IS_ERR_OR_NULL(q)) {
5854     - inet_frag_maybe_warn_overflow(q, pr_fmt());
5855     + q = inet_frag_find(&net->ipv6.frags, &key);
5856     + if (!q)
5857     return NULL;
5858     - }
5859     +
5860     return container_of(q, struct frag_queue, q);
5861     }
5862    
5863     @@ -363,7 +326,7 @@ found:
5864     return -1;
5865    
5866     discard_fq:
5867     - inet_frag_kill(&fq->q, &ip6_frags);
5868     + inet_frag_kill(&fq->q);
5869     err:
5870     __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
5871     IPSTATS_MIB_REASMFAILS);
5872     @@ -390,7 +353,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5873     int sum_truesize;
5874     u8 ecn;
5875    
5876     - inet_frag_kill(&fq->q, &ip6_frags);
5877     + inet_frag_kill(&fq->q);
5878    
5879     ecn = ip_frag_ecn_table[fq->ecn];
5880     if (unlikely(ecn == 0xff))
5881     @@ -509,6 +472,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
5882     __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
5883     rcu_read_unlock();
5884     fq->q.fragments = NULL;
5885     + fq->q.rb_fragments = RB_ROOT;
5886     fq->q.fragments_tail = NULL;
5887     return 1;
5888    
5889     @@ -530,6 +494,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5890     struct frag_queue *fq;
5891     const struct ipv6hdr *hdr = ipv6_hdr(skb);
5892     struct net *net = dev_net(skb_dst(skb)->dev);
5893     + int iif;
5894    
5895     if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
5896     goto fail_hdr;
5897     @@ -558,17 +523,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
5898     return 1;
5899     }
5900    
5901     - fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
5902     - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
5903     + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
5904     + fhdr->frag_off & htons(IP6_MF))
5905     + goto fail_hdr;
5906     +
5907     + iif = skb->dev ? skb->dev->ifindex : 0;
5908     + fq = fq_find(net, fhdr->identification, hdr, iif);
5909     if (fq) {
5910     int ret;
5911    
5912     spin_lock(&fq->q.lock);
5913    
5914     + fq->iif = iif;
5915     ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
5916    
5917     spin_unlock(&fq->q.lock);
5918     - inet_frag_put(&fq->q, &ip6_frags);
5919     + inet_frag_put(&fq->q);
5920     return ret;
5921     }
5922    
5923     @@ -589,24 +559,22 @@ static const struct inet6_protocol frag_protocol = {
5924     };
5925    
5926     #ifdef CONFIG_SYSCTL
5927     -static int zero;
5928    
5929     static struct ctl_table ip6_frags_ns_ctl_table[] = {
5930     {
5931     .procname = "ip6frag_high_thresh",
5932     .data = &init_net.ipv6.frags.high_thresh,
5933     - .maxlen = sizeof(int),
5934     + .maxlen = sizeof(unsigned long),
5935     .mode = 0644,
5936     - .proc_handler = proc_dointvec_minmax,
5937     + .proc_handler = proc_doulongvec_minmax,
5938     .extra1 = &init_net.ipv6.frags.low_thresh
5939     },
5940     {
5941     .procname = "ip6frag_low_thresh",
5942     .data = &init_net.ipv6.frags.low_thresh,
5943     - .maxlen = sizeof(int),
5944     + .maxlen = sizeof(unsigned long),
5945     .mode = 0644,
5946     - .proc_handler = proc_dointvec_minmax,
5947     - .extra1 = &zero,
5948     + .proc_handler = proc_doulongvec_minmax,
5949     .extra2 = &init_net.ipv6.frags.high_thresh
5950     },
5951     {
5952     @@ -649,10 +617,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
5953     table[1].data = &net->ipv6.frags.low_thresh;
5954     table[1].extra2 = &net->ipv6.frags.high_thresh;
5955     table[2].data = &net->ipv6.frags.timeout;
5956     -
5957     - /* Don't export sysctls to unprivileged users */
5958     - if (net->user_ns != &init_user_ns)
5959     - table[0].procname = NULL;
5960     }
5961    
5962     hdr = register_net_sysctl(net, "net/ipv6", table);
5963     @@ -714,19 +678,27 @@ static void ip6_frags_sysctl_unregister(void)
5964    
5965     static int __net_init ipv6_frags_init_net(struct net *net)
5966     {
5967     + int res;
5968     +
5969     net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
5970     net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
5971     net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
5972     + net->ipv6.frags.f = &ip6_frags;
5973    
5974     - inet_frags_init_net(&net->ipv6.frags);
5975     + res = inet_frags_init_net(&net->ipv6.frags);
5976     + if (res < 0)
5977     + return res;
5978    
5979     - return ip6_frags_ns_sysctl_register(net);
5980     + res = ip6_frags_ns_sysctl_register(net);
5981     + if (res < 0)
5982     + inet_frags_exit_net(&net->ipv6.frags);
5983     + return res;
5984     }
5985    
5986     static void __net_exit ipv6_frags_exit_net(struct net *net)
5987     {
5988     ip6_frags_ns_sysctl_unregister(net);
5989     - inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
5990     + inet_frags_exit_net(&net->ipv6.frags);
5991     }
5992    
5993     static struct pernet_operations ip6_frags_ops = {
5994     @@ -734,14 +706,55 @@ static struct pernet_operations ip6_frags_ops = {
5995     .exit = ipv6_frags_exit_net,
5996     };
5997    
5998     +static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
5999     +{
6000     + return jhash2(data,
6001     + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6002     +}
6003     +
6004     +static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
6005     +{
6006     + const struct inet_frag_queue *fq = data;
6007     +
6008     + return jhash2((const u32 *)&fq->key.v6,
6009     + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
6010     +}
6011     +
6012     +static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
6013     +{
6014     + const struct frag_v6_compare_key *key = arg->key;
6015     + const struct inet_frag_queue *fq = ptr;
6016     +
6017     + return !!memcmp(&fq->key, key, sizeof(*key));
6018     +}
6019     +
6020     +const struct rhashtable_params ip6_rhash_params = {
6021     + .head_offset = offsetof(struct inet_frag_queue, node),
6022     + .hashfn = ip6_key_hashfn,
6023     + .obj_hashfn = ip6_obj_hashfn,
6024     + .obj_cmpfn = ip6_obj_cmpfn,
6025     + .automatic_shrinking = true,
6026     +};
6027     +EXPORT_SYMBOL(ip6_rhash_params);
6028     +
6029     int __init ipv6_frag_init(void)
6030     {
6031     int ret;
6032    
6033     - ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6034     + ip6_frags.constructor = ip6_frag_init;
6035     + ip6_frags.destructor = NULL;
6036     + ip6_frags.qsize = sizeof(struct frag_queue);
6037     + ip6_frags.frag_expire = ip6_frag_expire;
6038     + ip6_frags.frags_cache_name = ip6_frag_cache_name;
6039     + ip6_frags.rhash_params = ip6_rhash_params;
6040     + ret = inet_frags_init(&ip6_frags);
6041     if (ret)
6042     goto out;
6043    
6044     + ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6045     + if (ret)
6046     + goto err_protocol;
6047     +
6048     ret = ip6_frags_sysctl_register();
6049     if (ret)
6050     goto err_sysctl;
6051     @@ -750,16 +763,6 @@ int __init ipv6_frag_init(void)
6052     if (ret)
6053     goto err_pernet;
6054    
6055     - ip6_frags.hashfn = ip6_hashfn;
6056     - ip6_frags.constructor = ip6_frag_init;
6057     - ip6_frags.destructor = NULL;
6058     - ip6_frags.qsize = sizeof(struct frag_queue);
6059     - ip6_frags.match = ip6_frag_match;
6060     - ip6_frags.frag_expire = ip6_frag_expire;
6061     - ip6_frags.frags_cache_name = ip6_frag_cache_name;
6062     - ret = inet_frags_init(&ip6_frags);
6063     - if (ret)
6064     - goto err_pernet;
6065     out:
6066     return ret;
6067    
6068     @@ -767,6 +770,8 @@ err_pernet:
6069     ip6_frags_sysctl_unregister();
6070     err_sysctl:
6071     inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
6072     +err_protocol:
6073     + inet_frags_fini(&ip6_frags);
6074     goto out;
6075     }
6076    
6077     diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
6078     index 8c8df75dbead..2a2ab6bfe5d8 100644
6079     --- a/net/sched/sch_netem.c
6080     +++ b/net/sched/sch_netem.c
6081     @@ -149,12 +149,6 @@ struct netem_skb_cb {
6082     ktime_t tstamp_save;
6083     };
6084    
6085     -
6086     -static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
6087     -{
6088     - return rb_entry(rb, struct sk_buff, rbnode);
6089     -}
6090     -
6091     static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
6092     {
6093     /* we assume we can use skb next/prev/tstamp as storage for rb_node */
6094     @@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
6095     struct rb_node *p;
6096    
6097     while ((p = rb_first(&q->t_root))) {
6098     - struct sk_buff *skb = netem_rb_to_skb(p);
6099     + struct sk_buff *skb = rb_to_skb(p);
6100    
6101     rb_erase(p, &q->t_root);
6102     rtnl_kfree_skbs(skb, skb);
6103     @@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
6104     struct sk_buff *skb;
6105    
6106     parent = *p;
6107     - skb = netem_rb_to_skb(parent);
6108     + skb = rb_to_skb(parent);
6109     if (tnext >= netem_skb_cb(skb)->time_to_send)
6110     p = &parent->rb_right;
6111     else
6112     @@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
6113     struct sk_buff *t_skb;
6114     struct netem_skb_cb *t_last;
6115    
6116     - t_skb = netem_rb_to_skb(rb_last(&q->t_root));
6117     + t_skb = skb_rb_last(&q->t_root);
6118     t_last = netem_skb_cb(t_skb);
6119     if (!last ||
6120     t_last->time_to_send > last->time_to_send) {
6121     @@ -618,7 +612,7 @@ deliver:
6122     if (p) {
6123     psched_time_t time_to_send;
6124    
6125     - skb = netem_rb_to_skb(p);
6126     + skb = rb_to_skb(p);
6127    
6128     /* if more time remaining? */
6129     time_to_send = netem_skb_cb(skb)->time_to_send;
6130     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
6131     index 417abbb1f72c..8a027973f2ad 100644
6132     --- a/sound/pci/hda/hda_codec.c
6133     +++ b/sound/pci/hda/hda_codec.c
6134     @@ -3923,7 +3923,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
6135    
6136     list_for_each_codec(codec, bus) {
6137     /* FIXME: maybe a better way needed for forced reset */
6138     - cancel_delayed_work_sync(&codec->jackpoll_work);
6139     + if (current_work() != &codec->jackpoll_work.work)
6140     + cancel_delayed_work_sync(&codec->jackpoll_work);
6141     #ifdef CONFIG_PM
6142     if (hda_codec_is_power_on(codec)) {
6143     hda_call_codec_suspend(codec);
6144     diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
6145     index 3479a1bc7caa..fb76423022e8 100644
6146     --- a/tools/perf/builtin-c2c.c
6147     +++ b/tools/perf/builtin-c2c.c
6148     @@ -2229,6 +2229,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
6149     " s Togle full lenght of symbol and source line columns \n"
6150     " q Return back to cacheline list \n";
6151    
6152     + if (!he)
6153     + return 0;
6154     +
6155     /* Display compact version first. */
6156     c2c.symbol_full = false;
6157    
6158     diff --git a/tools/perf/perf.h b/tools/perf/perf.h
6159     index 55086389fc06..96f62dd7e3ed 100644
6160     --- a/tools/perf/perf.h
6161     +++ b/tools/perf/perf.h
6162     @@ -24,7 +24,9 @@ static inline unsigned long long rdclock(void)
6163     return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6164     }
6165    
6166     +#ifndef MAX_NR_CPUS
6167     #define MAX_NR_CPUS 1024
6168     +#endif
6169    
6170     extern const char *input_name;
6171     extern bool perf_host, perf_guest;
6172     diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
6173     index 226a9245d1db..2227ee92d8e2 100644
6174     --- a/tools/perf/util/evsel.c
6175     +++ b/tools/perf/util/evsel.c
6176     @@ -824,6 +824,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
6177     }
6178     }
6179    
6180     +static bool is_dummy_event(struct perf_evsel *evsel)
6181     +{
6182     + return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
6183     + (evsel->attr.config == PERF_COUNT_SW_DUMMY);
6184     +}
6185     +
6186     /*
6187     * The enable_on_exec/disabled value strategy:
6188     *
6189     @@ -1054,6 +1060,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
6190     else
6191     perf_evsel__reset_sample_bit(evsel, PERIOD);
6192     }
6193     +
6194     + /*
6195     + * For initial_delay, a dummy event is added implicitly.
6196     + * The software event will trigger -EOPNOTSUPP error out,
6197     + * if BRANCH_STACK bit is set.
6198     + */
6199     + if (opts->initial_delay && is_dummy_event(evsel))
6200     + perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
6201     }
6202    
6203     static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
6204     diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
6205     index b53596ad601b..2e7fd8227969 100644
6206     --- a/tools/testing/nvdimm/pmem-dax.c
6207     +++ b/tools/testing/nvdimm/pmem-dax.c
6208     @@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
6209     if (get_nfit_res(pmem->phys_addr + offset)) {
6210     struct page *page;
6211    
6212     - *kaddr = pmem->virt_addr + offset;
6213     + if (kaddr)
6214     + *kaddr = pmem->virt_addr + offset;
6215     page = vmalloc_to_page(pmem->virt_addr + offset);
6216     - *pfn = page_to_pfn_t(page);
6217     + if (pfn)
6218     + *pfn = page_to_pfn_t(page);
6219     pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
6220     __func__, pmem, pgoff, page_to_pfn(page));
6221    
6222     return 1;
6223     }
6224    
6225     - *kaddr = pmem->virt_addr + offset;
6226     - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6227     + if (kaddr)
6228     + *kaddr = pmem->virt_addr + offset;
6229     + if (pfn)
6230     + *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
6231    
6232     /*
6233     * If badblocks are present, limit known good range to the
6234     diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
6235     index 9167ee976314..041dbbb30ff0 100644
6236     --- a/tools/testing/selftests/bpf/test_verifier.c
6237     +++ b/tools/testing/selftests/bpf/test_verifier.c
6238     @@ -5895,7 +5895,7 @@ static struct bpf_test tests[] = {
6239     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6240     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6241     BPF_FUNC_map_lookup_elem),
6242     - BPF_MOV64_REG(BPF_REG_0, 0),
6243     + BPF_MOV64_IMM(BPF_REG_0, 0),
6244     BPF_EXIT_INSN(),
6245     },
6246     .fixup_map_in_map = { 3 },
6247     @@ -5918,7 +5918,7 @@ static struct bpf_test tests[] = {
6248     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6249     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6250     BPF_FUNC_map_lookup_elem),
6251     - BPF_MOV64_REG(BPF_REG_0, 0),
6252     + BPF_MOV64_IMM(BPF_REG_0, 0),
6253     BPF_EXIT_INSN(),
6254     },
6255     .fixup_map_in_map = { 3 },
6256     @@ -5941,7 +5941,7 @@ static struct bpf_test tests[] = {
6257     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6258     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6259     BPF_FUNC_map_lookup_elem),
6260     - BPF_MOV64_REG(BPF_REG_0, 0),
6261     + BPF_MOV64_IMM(BPF_REG_0, 0),
6262     BPF_EXIT_INSN(),
6263     },
6264     .fixup_map_in_map = { 3 },