Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0112-4.19.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3391 - (hide annotations) (download)
Fri Aug 2 11:47:26 2019 UTC (4 years, 9 months ago) by niro
File size: 74632 byte(s)
-linux-4.19.13
1 niro 3391 diff --git a/Makefile b/Makefile
2     index 9770f29a690a..892ff14cbc9d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 12
10     +SUBLEVEL = 13
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
15     index 92fd2c8a9af0..12659ce5c1f3 100644
16     --- a/arch/arm/include/asm/pgtable-2level.h
17     +++ b/arch/arm/include/asm/pgtable-2level.h
18     @@ -10,7 +10,7 @@
19     #ifndef _ASM_PGTABLE_2LEVEL_H
20     #define _ASM_PGTABLE_2LEVEL_H
21    
22     -#define __PAGETABLE_PMD_FOLDED
23     +#define __PAGETABLE_PMD_FOLDED 1
24    
25     /*
26     * Hardware-wise, we have a two level page table structure, where the first
27     diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
28     index 6181e4134483..fe3ddd73a0cc 100644
29     --- a/arch/m68k/include/asm/pgtable_mm.h
30     +++ b/arch/m68k/include/asm/pgtable_mm.h
31     @@ -55,12 +55,12 @@
32     */
33     #ifdef CONFIG_SUN3
34     #define PTRS_PER_PTE 16
35     -#define __PAGETABLE_PMD_FOLDED
36     +#define __PAGETABLE_PMD_FOLDED 1
37     #define PTRS_PER_PMD 1
38     #define PTRS_PER_PGD 2048
39     #elif defined(CONFIG_COLDFIRE)
40     #define PTRS_PER_PTE 512
41     -#define __PAGETABLE_PMD_FOLDED
42     +#define __PAGETABLE_PMD_FOLDED 1
43     #define PTRS_PER_PMD 1
44     #define PTRS_PER_PGD 1024
45     #else
46     diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
47     index 7b650ab14fa0..2ca598534cc7 100644
48     --- a/arch/microblaze/include/asm/pgtable.h
49     +++ b/arch/microblaze/include/asm/pgtable.h
50     @@ -63,7 +63,7 @@ extern int mem_init_done;
51    
52     #include <asm-generic/4level-fixup.h>
53    
54     -#define __PAGETABLE_PMD_FOLDED
55     +#define __PAGETABLE_PMD_FOLDED 1
56    
57     #ifdef __KERNEL__
58     #ifndef __ASSEMBLY__
59     diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
60     index d3e19a55cf53..9f52db930c00 100644
61     --- a/arch/nds32/include/asm/pgtable.h
62     +++ b/arch/nds32/include/asm/pgtable.h
63     @@ -4,7 +4,7 @@
64     #ifndef _ASMNDS32_PGTABLE_H
65     #define _ASMNDS32_PGTABLE_H
66    
67     -#define __PAGETABLE_PMD_FOLDED
68     +#define __PAGETABLE_PMD_FOLDED 1
69     #include <asm-generic/4level-fixup.h>
70     #include <asm-generic/sizes.h>
71    
72     diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
73     index fa6b7c78f18a..ff0860b2b21a 100644
74     --- a/arch/parisc/include/asm/pgtable.h
75     +++ b/arch/parisc/include/asm/pgtable.h
76     @@ -117,7 +117,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
77     #if CONFIG_PGTABLE_LEVELS == 3
78     #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
79     #else
80     -#define __PAGETABLE_PMD_FOLDED
81     +#define __PAGETABLE_PMD_FOLDED 1
82     #define BITS_PER_PMD 0
83     #endif
84     #define PTRS_PER_PMD (1UL << BITS_PER_PMD)
85     diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
86     index 141d415a8c80..c3d7ccd25381 100644
87     --- a/arch/x86/entry/vdso/Makefile
88     +++ b/arch/x86/entry/vdso/Makefile
89     @@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
90     sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
91    
92     VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
93     - $(call ld-option, --build-id) -Bsymbolic
94     + $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
95     + -Bsymbolic
96     GCOV_PROFILE := n
97    
98     #
99     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
100     index b3486c8b570a..1f9de7635bcb 100644
101     --- a/arch/x86/include/asm/msr-index.h
102     +++ b/arch/x86/include/asm/msr-index.h
103     @@ -389,6 +389,7 @@
104     #define MSR_F15H_NB_PERF_CTR 0xc0010241
105     #define MSR_F15H_PTSC 0xc0010280
106     #define MSR_F15H_IC_CFG 0xc0011021
107     +#define MSR_F15H_EX_CFG 0xc001102c
108    
109     /* Fam 10h MSRs */
110     #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
111     diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
112     index 0f53049719cd..627e5c809b33 100644
113     --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
114     +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
115     @@ -23,6 +23,7 @@
116    
117     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
118    
119     +#include <linux/cpu.h>
120     #include <linux/kernfs.h>
121     #include <linux/seq_file.h>
122     #include <linux/slab.h>
123     @@ -310,9 +311,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
124     return -EINVAL;
125     buf[nbytes - 1] = '\0';
126    
127     + cpus_read_lock();
128     rdtgrp = rdtgroup_kn_lock_live(of->kn);
129     if (!rdtgrp) {
130     rdtgroup_kn_unlock(of->kn);
131     + cpus_read_unlock();
132     return -ENOENT;
133     }
134     rdt_last_cmd_clear();
135     @@ -367,6 +370,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
136    
137     out:
138     rdtgroup_kn_unlock(of->kn);
139     + cpus_read_unlock();
140     return ret ?: nbytes;
141     }
142    
143     diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
144     index 40eee6cc4124..254683b503a9 100644
145     --- a/arch/x86/kernel/cpu/mtrr/if.c
146     +++ b/arch/x86/kernel/cpu/mtrr/if.c
147     @@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
148     struct mtrr_gentry gentry;
149     void __user *arg = (void __user *) __arg;
150    
151     + memset(&gentry, 0, sizeof(gentry));
152     +
153     switch (cmd) {
154     case MTRRIOC_ADD_ENTRY:
155     case MTRRIOC_SET_ENTRY:
156     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
157     index c97a9d60d305..33ffb6d17e73 100644
158     --- a/arch/x86/kvm/vmx.c
159     +++ b/arch/x86/kvm/vmx.c
160     @@ -11471,6 +11471,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
161     kunmap(vmx->nested.pi_desc_page);
162     kvm_release_page_dirty(vmx->nested.pi_desc_page);
163     vmx->nested.pi_desc_page = NULL;
164     + vmx->nested.pi_desc = NULL;
165     + vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
166     }
167     page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
168     if (is_error_page(page))
169     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
170     index 68b53f05a420..956eecd227f8 100644
171     --- a/arch/x86/kvm/x86.c
172     +++ b/arch/x86/kvm/x86.c
173     @@ -2343,6 +2343,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
174     case MSR_AMD64_PATCH_LOADER:
175     case MSR_AMD64_BU_CFG2:
176     case MSR_AMD64_DC_CFG:
177     + case MSR_F15H_EX_CFG:
178     break;
179    
180     case MSR_IA32_UCODE_REV:
181     @@ -2638,6 +2639,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
182     case MSR_AMD64_BU_CFG2:
183     case MSR_IA32_PERF_CTL:
184     case MSR_AMD64_DC_CFG:
185     + case MSR_F15H_EX_CFG:
186     msr_info->data = 0;
187     break;
188     case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
189     @@ -7304,7 +7306,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
190    
191     static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
192     {
193     - if (!kvm_apic_hw_enabled(vcpu->arch.apic))
194     + if (!kvm_apic_present(vcpu))
195     return;
196    
197     bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
198     diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
199     index 3d0c83ef6aab..a3c9ea29d7cc 100644
200     --- a/arch/x86/mm/pat.c
201     +++ b/arch/x86/mm/pat.c
202     @@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
203     * for a "decoy" virtual address (bit 63 clear) passed to
204     * set_memory_X(). __pa() on a "decoy" address results in a
205     * physical address with bit 63 set.
206     + *
207     + * Decoy addresses are not present for 32-bit builds, see
208     + * set_mce_nospec().
209     */
210     - return address & __PHYSICAL_MASK;
211     + if (IS_ENABLED(CONFIG_X86_64))
212     + return address & __PHYSICAL_MASK;
213     + return address;
214     }
215    
216     /*
217     @@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
218    
219     start = sanitize_phys(start);
220     end = sanitize_phys(end);
221     - BUG_ON(start >= end); /* end is exclusive */
222     + if (start >= end) {
223     + WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
224     + start, end - 1, cattr_name(req_type));
225     + return -EINVAL;
226     + }
227    
228     if (!pat_enabled()) {
229     /* This is identical to page table setting without PAT */
230     diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
231     index 05813fbf3daf..647dfbbc4e1c 100644
232     --- a/drivers/gpio/gpio-max7301.c
233     +++ b/drivers/gpio/gpio-max7301.c
234     @@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
235     struct spi_device *spi = to_spi_device(dev);
236     u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
237    
238     - return spi_write(spi, (const u8 *)&word, sizeof(word));
239     + return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
240     }
241    
242     /* A read from the MAX7301 means two transfers; here, one message each */
243     @@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
244     struct spi_device *spi = to_spi_device(dev);
245    
246     word = 0x8000 | (reg << 8);
247     - ret = spi_write(spi, (const u8 *)&word, sizeof(word));
248     - if (ret)
249     - return ret;
250     - /*
251     - * This relies on the fact, that a transfer with NULL tx_buf shifts out
252     - * zero bytes (=NOOP for MAX7301)
253     - */
254     - ret = spi_read(spi, (u8 *)&word, sizeof(word));
255     + ret = spi_write_then_read(spi, &word, sizeof(word), &word,
256     + sizeof(word));
257     if (ret)
258     return ret;
259     return word & 0xff;
260     diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
261     index 8b9d7e42c600..c5e009f61021 100644
262     --- a/drivers/gpio/gpiolib-acpi.c
263     +++ b/drivers/gpio/gpiolib-acpi.c
264     @@ -23,11 +23,28 @@
265    
266     #include "gpiolib.h"
267    
268     +/**
269     + * struct acpi_gpio_event - ACPI GPIO event handler data
270     + *
271     + * @node: list-entry of the events list of the struct acpi_gpio_chip
272     + * @handle: handle of ACPI method to execute when the IRQ triggers
273     + * @handler: irq_handler to pass to request_irq when requesting the IRQ
274     + * @pin: GPIO pin number on the gpio_chip
275     + * @irq: Linux IRQ number for the event, for request_ / free_irq
276     + * @irqflags: flags to pass to request_irq when requesting the IRQ
277     + * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
278     + * @is_requested: True if request_irq has been done
279     + * @desc: gpio_desc for the GPIO pin for this event
280     + */
281     struct acpi_gpio_event {
282     struct list_head node;
283     acpi_handle handle;
284     + irq_handler_t handler;
285     unsigned int pin;
286     unsigned int irq;
287     + unsigned long irqflags;
288     + bool irq_is_wake;
289     + bool irq_requested;
290     struct gpio_desc *desc;
291     };
292    
293     @@ -53,10 +70,10 @@ struct acpi_gpio_chip {
294    
295     /*
296     * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
297     - * (so builtin drivers) we register the ACPI GpioInt event handlers from a
298     + * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
299     * late_initcall_sync handler, so that other builtin drivers can register their
300     * OpRegions before the event handlers can run. This list contains gpiochips
301     - * for which the acpi_gpiochip_request_interrupts() has been deferred.
302     + * for which the acpi_gpiochip_request_irqs() call has been deferred.
303     */
304     static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
305     static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
306     @@ -137,8 +154,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
307     }
308     EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
309    
310     -static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
311     - void *context)
312     +static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
313     + struct acpi_gpio_event *event)
314     +{
315     + int ret, value;
316     +
317     + ret = request_threaded_irq(event->irq, NULL, event->handler,
318     + event->irqflags, "ACPI:Event", event);
319     + if (ret) {
320     + dev_err(acpi_gpio->chip->parent,
321     + "Failed to setup interrupt handler for %d\n",
322     + event->irq);
323     + return;
324     + }
325     +
326     + if (event->irq_is_wake)
327     + enable_irq_wake(event->irq);
328     +
329     + event->irq_requested = true;
330     +
331     + /* Make sure we trigger the initial state of edge-triggered IRQs */
332     + value = gpiod_get_raw_value_cansleep(event->desc);
333     + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
334     + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
335     + event->handler(event->irq, event);
336     +}
337     +
338     +static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
339     +{
340     + struct acpi_gpio_event *event;
341     +
342     + list_for_each_entry(event, &acpi_gpio->events, node)
343     + acpi_gpiochip_request_irq(acpi_gpio, event);
344     +}
345     +
346     +static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
347     + void *context)
348     {
349     struct acpi_gpio_chip *acpi_gpio = context;
350     struct gpio_chip *chip = acpi_gpio->chip;
351     @@ -147,8 +198,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
352     struct acpi_gpio_event *event;
353     irq_handler_t handler = NULL;
354     struct gpio_desc *desc;
355     - unsigned long irqflags;
356     - int ret, pin, irq, value;
357     + int ret, pin, irq;
358    
359     if (!acpi_gpio_get_irq_resource(ares, &agpio))
360     return AE_OK;
361     @@ -179,8 +229,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
362    
363     gpiod_direction_input(desc);
364    
365     - value = gpiod_get_value_cansleep(desc);
366     -
367     ret = gpiochip_lock_as_irq(chip, pin);
368     if (ret) {
369     dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
370     @@ -193,64 +241,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
371     goto fail_unlock_irq;
372     }
373    
374     - irqflags = IRQF_ONESHOT;
375     + event = kzalloc(sizeof(*event), GFP_KERNEL);
376     + if (!event)
377     + goto fail_unlock_irq;
378     +
379     + event->irqflags = IRQF_ONESHOT;
380     if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
381     if (agpio->polarity == ACPI_ACTIVE_HIGH)
382     - irqflags |= IRQF_TRIGGER_HIGH;
383     + event->irqflags |= IRQF_TRIGGER_HIGH;
384     else
385     - irqflags |= IRQF_TRIGGER_LOW;
386     + event->irqflags |= IRQF_TRIGGER_LOW;
387     } else {
388     switch (agpio->polarity) {
389     case ACPI_ACTIVE_HIGH:
390     - irqflags |= IRQF_TRIGGER_RISING;
391     + event->irqflags |= IRQF_TRIGGER_RISING;
392     break;
393     case ACPI_ACTIVE_LOW:
394     - irqflags |= IRQF_TRIGGER_FALLING;
395     + event->irqflags |= IRQF_TRIGGER_FALLING;
396     break;
397     default:
398     - irqflags |= IRQF_TRIGGER_RISING |
399     - IRQF_TRIGGER_FALLING;
400     + event->irqflags |= IRQF_TRIGGER_RISING |
401     + IRQF_TRIGGER_FALLING;
402     break;
403     }
404     }
405    
406     - event = kzalloc(sizeof(*event), GFP_KERNEL);
407     - if (!event)
408     - goto fail_unlock_irq;
409     -
410     event->handle = evt_handle;
411     + event->handler = handler;
412     event->irq = irq;
413     + event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
414     event->pin = pin;
415     event->desc = desc;
416    
417     - ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
418     - "ACPI:Event", event);
419     - if (ret) {
420     - dev_err(chip->parent,
421     - "Failed to setup interrupt handler for %d\n",
422     - event->irq);
423     - goto fail_free_event;
424     - }
425     -
426     - if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
427     - enable_irq_wake(irq);
428     -
429     list_add_tail(&event->node, &acpi_gpio->events);
430    
431     - /*
432     - * Make sure we trigger the initial state of the IRQ when using RISING
433     - * or FALLING. Note we run the handlers on late_init, the AML code
434     - * may refer to OperationRegions from other (builtin) drivers which
435     - * may be probed after us.
436     - */
437     - if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
438     - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
439     - handler(event->irq, event);
440     -
441     return AE_OK;
442    
443     -fail_free_event:
444     - kfree(event);
445     fail_unlock_irq:
446     gpiochip_unlock_as_irq(chip, pin);
447     fail_free_desc:
448     @@ -287,6 +313,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
449     if (ACPI_FAILURE(status))
450     return;
451    
452     + acpi_walk_resources(handle, "_AEI",
453     + acpi_gpiochip_alloc_event, acpi_gpio);
454     +
455     mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
456     defer = !acpi_gpio_deferred_req_irqs_done;
457     if (defer)
458     @@ -297,8 +326,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
459     if (defer)
460     return;
461    
462     - acpi_walk_resources(handle, "_AEI",
463     - acpi_gpiochip_request_interrupt, acpi_gpio);
464     + acpi_gpiochip_request_irqs(acpi_gpio);
465     }
466     EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
467    
468     @@ -335,10 +363,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
469     list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
470     struct gpio_desc *desc;
471    
472     - if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
473     - disable_irq_wake(event->irq);
474     + if (event->irq_requested) {
475     + if (event->irq_is_wake)
476     + disable_irq_wake(event->irq);
477     +
478     + free_irq(event->irq, event);
479     + }
480    
481     - free_irq(event->irq, event);
482     desc = event->desc;
483     if (WARN_ON(IS_ERR(desc)))
484     continue;
485     @@ -1204,23 +1235,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
486     return con_id == NULL;
487     }
488    
489     -/* Run deferred acpi_gpiochip_request_interrupts() */
490     -static int acpi_gpio_handle_deferred_request_interrupts(void)
491     +/* Run deferred acpi_gpiochip_request_irqs() */
492     +static int acpi_gpio_handle_deferred_request_irqs(void)
493     {
494     struct acpi_gpio_chip *acpi_gpio, *tmp;
495    
496     mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
497     list_for_each_entry_safe(acpi_gpio, tmp,
498     &acpi_gpio_deferred_req_irqs_list,
499     - deferred_req_irqs_list_entry) {
500     - acpi_handle handle;
501     -
502     - handle = ACPI_HANDLE(acpi_gpio->chip->parent);
503     - acpi_walk_resources(handle, "_AEI",
504     - acpi_gpiochip_request_interrupt, acpi_gpio);
505     -
506     - list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
507     - }
508     + deferred_req_irqs_list_entry)
509     + acpi_gpiochip_request_irqs(acpi_gpio);
510    
511     acpi_gpio_deferred_req_irqs_done = true;
512     mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
513     @@ -1228,4 +1252,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
514     return 0;
515     }
516     /* We must use _sync so that this runs after the first deferred_probe run */
517     -late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
518     +late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
519     diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
520     index ea10e9a26aad..ba129b64b61f 100644
521     --- a/drivers/gpu/drm/drm_ioctl.c
522     +++ b/drivers/gpu/drm/drm_ioctl.c
523     @@ -37,6 +37,7 @@
524    
525     #include <linux/pci.h>
526     #include <linux/export.h>
527     +#include <linux/nospec.h>
528    
529     /**
530     * DOC: getunique and setversion story
531     @@ -794,13 +795,17 @@ long drm_ioctl(struct file *filp,
532    
533     if (is_driver_ioctl) {
534     /* driver ioctl */
535     - if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
536     + unsigned int index = nr - DRM_COMMAND_BASE;
537     +
538     + if (index >= dev->driver->num_ioctls)
539     goto err_i1;
540     - ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
541     + index = array_index_nospec(index, dev->driver->num_ioctls);
542     + ioctl = &dev->driver->ioctls[index];
543     } else {
544     /* core ioctl */
545     if (nr >= DRM_CORE_IOCTL_COUNT)
546     goto err_i1;
547     + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
548     ioctl = &drm_ioctls[nr];
549     }
550    
551     @@ -882,6 +887,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
552    
553     if (nr >= DRM_CORE_IOCTL_COUNT)
554     return false;
555     + nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
556    
557     *flags = drm_ioctls[nr].flags;
558     return true;
559     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
560     index c71cc857b649..2c6d5c7a4445 100644
561     --- a/drivers/hv/vmbus_drv.c
562     +++ b/drivers/hv/vmbus_drv.c
563     @@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
564    
565     if (!hv_dev->channel)
566     return -ENODEV;
567     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
568     + return -EINVAL;
569     hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
570     return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
571     }
572     @@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
573    
574     if (!hv_dev->channel)
575     return -ENODEV;
576     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
577     + return -EINVAL;
578     hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
579     return sprintf(buf, "%d\n", outbound.current_read_index);
580     }
581     @@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
582    
583     if (!hv_dev->channel)
584     return -ENODEV;
585     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
586     + return -EINVAL;
587     hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
588     return sprintf(buf, "%d\n", outbound.current_write_index);
589     }
590     @@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
591    
592     if (!hv_dev->channel)
593     return -ENODEV;
594     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
595     + return -EINVAL;
596     hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
597     return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
598     }
599     @@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
600    
601     if (!hv_dev->channel)
602     return -ENODEV;
603     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
604     + return -EINVAL;
605     hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
606     return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
607     }
608     @@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
609    
610     if (!hv_dev->channel)
611     return -ENODEV;
612     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
613     + return -EINVAL;
614     hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
615     return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
616     }
617     @@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
618    
619     if (!hv_dev->channel)
620     return -ENODEV;
621     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
622     + return -EINVAL;
623     hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
624     return sprintf(buf, "%d\n", inbound.current_read_index);
625     }
626     @@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
627    
628     if (!hv_dev->channel)
629     return -ENODEV;
630     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
631     + return -EINVAL;
632     hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
633     return sprintf(buf, "%d\n", inbound.current_write_index);
634     }
635     @@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
636    
637     if (!hv_dev->channel)
638     return -ENODEV;
639     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
640     + return -EINVAL;
641     hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
642     return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
643     }
644     @@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
645    
646     if (!hv_dev->channel)
647     return -ENODEV;
648     + if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
649     + return -EINVAL;
650     hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
651     return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
652     }
653     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
654     index 2d95e8d93cc7..9fe075c137dc 100644
655     --- a/drivers/input/mouse/elantech.c
656     +++ b/drivers/input/mouse/elantech.c
657     @@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ?
658     module_param_named(elantech_smbus, elantech_smbus, int, 0644);
659     MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
660    
661     +static const char * const i2c_blacklist_pnp_ids[] = {
662     + /*
663     + * These are known to not be working properly as bits are missing
664     + * in elan_i2c.
665     + */
666     + "LEN2131", /* ThinkPad P52 w/ NFC */
667     + "LEN2132", /* ThinkPad P52 */
668     + "LEN2133", /* ThinkPad P72 w/ NFC */
669     + "LEN2134", /* ThinkPad P72 */
670     + NULL
671     +};
672     +
673     static int elantech_create_smbus(struct psmouse *psmouse,
674     struct elantech_device_info *info,
675     bool leave_breadcrumbs)
676     @@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
677    
678     if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
679     /*
680     - * New ICs are enabled by default.
681     + * New ICs are enabled by default, unless mentioned in
682     + * i2c_blacklist_pnp_ids.
683     * Old ICs are up to the user to decide.
684     */
685     - if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
686     + if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
687     + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
688     return -ENXIO;
689     }
690    
691     diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
692     index 30b15e91d8be..8e7a2a59cd32 100644
693     --- a/drivers/media/i2c/ov5640.c
694     +++ b/drivers/media/i2c/ov5640.c
695     @@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
696     struct ov5640_dev *sensor = to_ov5640_dev(sd);
697     const struct ov5640_mode_info *new_mode;
698     struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
699     + struct v4l2_mbus_framefmt *fmt;
700     int ret;
701    
702     if (format->pad != 0)
703     @@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
704     if (ret)
705     goto out;
706    
707     - if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
708     - struct v4l2_mbus_framefmt *fmt =
709     - v4l2_subdev_get_try_format(sd, cfg, 0);
710     + if (format->which == V4L2_SUBDEV_FORMAT_TRY)
711     + fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
712     + else
713     + fmt = &sensor->fmt;
714    
715     - *fmt = *mbus_fmt;
716     - goto out;
717     - }
718     + *fmt = *mbus_fmt;
719    
720     if (new_mode != sensor->current_mode) {
721     sensor->current_mode = new_mode;
722     sensor->pending_mode_change = true;
723     }
724     - if (mbus_fmt->code != sensor->fmt.code) {
725     - sensor->fmt = *mbus_fmt;
726     + if (mbus_fmt->code != sensor->fmt.code)
727     sensor->pending_fmt_change = true;
728     - }
729     +
730     out:
731     mutex_unlock(&sensor->lock);
732     return ret;
733     diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
734     index bc1bd2c25613..55997cf84b39 100644
735     --- a/drivers/mmc/core/mmc.c
736     +++ b/drivers/mmc/core/mmc.c
737     @@ -30,6 +30,7 @@
738     #include "pwrseq.h"
739    
740     #define DEFAULT_CMD6_TIMEOUT_MS 500
741     +#define MIN_CACHE_EN_TIMEOUT_MS 1600
742    
743     static const unsigned int tran_exp[] = {
744     10000, 100000, 1000000, 10000000,
745     @@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
746     card->cid.year += 16;
747    
748     /* check whether the eMMC card supports BKOPS */
749     - if (!mmc_card_broken_hpi(card) &&
750     - ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
751     + if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
752     card->ext_csd.bkops = 1;
753     card->ext_csd.man_bkops_en =
754     (ext_csd[EXT_CSD_BKOPS_EN] &
755     @@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
756     if (err) {
757     pr_warn("%s: Enabling HPI failed\n",
758     mmc_hostname(card->host));
759     + card->ext_csd.hpi_en = 0;
760     err = 0;
761     - } else
762     + } else {
763     card->ext_csd.hpi_en = 1;
764     + }
765     }
766    
767     /*
768     - * If cache size is higher than 0, this indicates
769     - * the existence of cache and it can be turned on.
770     + * If cache size is higher than 0, this indicates the existence of cache
771     + * and it can be turned on. Note that some eMMCs from Micron has been
772     + * reported to need ~800 ms timeout, while enabling the cache after
773     + * sudden power failure tests. Let's extend the timeout to a minimum of
774     + * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
775     */
776     - if (!mmc_card_broken_hpi(card) &&
777     - card->ext_csd.cache_size > 0) {
778     + if (card->ext_csd.cache_size > 0) {
779     + unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
780     +
781     + timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
782     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
783     - EXT_CSD_CACHE_CTRL, 1,
784     - card->ext_csd.generic_cmd6_time);
785     + EXT_CSD_CACHE_CTRL, 1, timeout_ms);
786     if (err && err != -EBADMSG)
787     goto free_card;
788    
789     diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
790     index 68760d4a5d3d..b23c57e07f36 100644
791     --- a/drivers/mmc/host/omap_hsmmc.c
792     +++ b/drivers/mmc/host/omap_hsmmc.c
793     @@ -2066,7 +2066,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
794     mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
795     mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
796     mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
797     - mmc->max_seg_size = mmc->max_req_size;
798    
799     mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
800     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
801     @@ -2096,6 +2095,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
802     goto err_irq;
803     }
804    
805     + /*
806     + * Limit the maximum segment size to the lower of the request size
807     + * and the DMA engine device segment size limits. In reality, with
808     + * 32-bit transfers, the DMA engine can do longer segments than this
809     + * but there is no way to represent that in the DMA model - if we
810     + * increase this figure here, we get warnings from the DMA API debug.
811     + */
812     + mmc->max_seg_size = min3(mmc->max_req_size,
813     + dma_get_max_seg_size(host->rx_chan->device->dev),
814     + dma_get_max_seg_size(host->tx_chan->device->dev));
815     +
816     /* Request IRQ for MMC operations */
817     ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
818     mmc_hostname(mmc), host);
819     diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
820     index 184c24baca15..d6916f787fce 100644
821     --- a/drivers/net/usb/hso.c
822     +++ b/drivers/net/usb/hso.c
823     @@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
824     return -EIO;
825     }
826    
827     + /* check if we have a valid interface */
828     + if (if_num > 16) {
829     + kfree(config_data);
830     + return -EINVAL;
831     + }
832     +
833     switch (config_data[if_num]) {
834     case 0x0:
835     result = 0;
836     @@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
837    
838     /* Get the interface/port specification from either driver_info or from
839     * the device itself */
840     - if (id->driver_info)
841     + if (id->driver_info) {
842     + /* if_num is controlled by the device, driver_info is a 0 terminated
843     + * array. Make sure, the access is in bounds! */
844     + for (i = 0; i <= if_num; ++i)
845     + if (((u32 *)(id->driver_info))[i] == 0)
846     + goto exit;
847     port_spec = ((u32 *)(id->driver_info))[if_num];
848     - else
849     + } else {
850     port_spec = hso_get_config_data(interface);
851     + if (port_spec < 0)
852     + goto exit;
853     + }
854    
855     /* Check if we need to switch to alt interfaces prior to port
856     * configuration */
857     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
858     index 4d49a1a3f504..16c6c7f921a8 100644
859     --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
860     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
861     @@ -868,6 +868,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
862     int ret, i, j;
863     u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
864    
865     + /*
866     + * This command is not supported on earlier firmware versions.
867     + * Unfortunately, we don't have a TLV API flag to rely on, so
868     + * rely on the major version which is in the first byte of
869     + * ucode_ver.
870     + */
871     + if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
872     + return 0;
873     +
874     ret = iwl_mvm_sar_get_wgds_table(mvm);
875     if (ret < 0) {
876     IWL_DEBUG_RADIO(mvm,
877     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
878     index b150da4c6721..5d65500a8aa7 100644
879     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
880     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
881     @@ -518,6 +518,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
882     {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
883    
884     /* 9000 Series */
885     + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
886     + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
887     + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
888     + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
889     + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
890     + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
891     + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
892     + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
893     + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
894     + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
895     + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
896     + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
897     + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
898     + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
899     + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
900     + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
901     + {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
902     + {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
903     + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
904     + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
905     + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
906     + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
907     + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
908     + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
909     + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
910     + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
911     + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
912     + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
913     + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
914     + {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
915     + {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
916     + {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
917     + {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
918     + {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
919     + {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
920     + {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
921     + {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
922     + {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
923     + {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
924     + {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
925     + {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
926     + {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
927     + {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
928     + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
929     + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
930     + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
931     + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
932     + {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
933     + {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
934     + {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
935     {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
936     {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
937     {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
938     diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
939     index e2addd8b878b..5d75c971004b 100644
940     --- a/drivers/net/wireless/marvell/mwifiex/11n.c
941     +++ b/drivers/net/wireless/marvell/mwifiex/11n.c
942     @@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
943     "Send delba to tid=%d, %pM\n",
944     tid, rx_reor_tbl_ptr->ta);
945     mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
946     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
947     - flags);
948     - return;
949     + goto exit;
950     }
951     }
952     +exit:
953     spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
954     }
955    
956     diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
957     index 8e63d14c1e1c..5380fba652cc 100644
958     --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
959     +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
960     @@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
961     * There could be holes in the buffer, which are skipped by the function.
962     * Since the buffer is linear, the function uses rotation to simulate
963     * circular buffer.
964     - *
965     - * The caller must hold rx_reorder_tbl_lock spinlock.
966     */
967     static void
968     mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
969     @@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
970     {
971     int pkt_to_send, i;
972     void *rx_tmp_ptr;
973     + unsigned long flags;
974    
975     pkt_to_send = (start_win > tbl->start_win) ?
976     min((start_win - tbl->start_win), tbl->win_size) :
977     tbl->win_size;
978    
979     for (i = 0; i < pkt_to_send; ++i) {
980     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
981     rx_tmp_ptr = NULL;
982     if (tbl->rx_reorder_ptr[i]) {
983     rx_tmp_ptr = tbl->rx_reorder_ptr[i];
984     tbl->rx_reorder_ptr[i] = NULL;
985     }
986     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
987     if (rx_tmp_ptr)
988     mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
989     }
990    
991     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
992     /*
993     * We don't have a circular buffer, hence use rotation to simulate
994     * circular buffer
995     @@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
996     }
997    
998     tbl->start_win = start_win;
999     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1000     }
1001    
1002     /*
1003     @@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
1004     * The start window is adjusted automatically when a hole is located.
1005     * Since the buffer is linear, the function uses rotation to simulate
1006     * circular buffer.
1007     - *
1008     - * The caller must hold rx_reorder_tbl_lock spinlock.
1009     */
1010     static void
1011     mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1012     @@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1013     {
1014     int i, j, xchg;
1015     void *rx_tmp_ptr;
1016     + unsigned long flags;
1017    
1018     for (i = 0; i < tbl->win_size; ++i) {
1019     - if (!tbl->rx_reorder_ptr[i])
1020     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1021     + if (!tbl->rx_reorder_ptr[i]) {
1022     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1023     + flags);
1024     break;
1025     + }
1026     rx_tmp_ptr = tbl->rx_reorder_ptr[i];
1027     tbl->rx_reorder_ptr[i] = NULL;
1028     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1029     mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
1030     }
1031    
1032     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1033     /*
1034     * We don't have a circular buffer, hence use rotation to simulate
1035     * circular buffer
1036     @@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1037     }
1038     }
1039     tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
1040     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1041     }
1042    
1043     /*
1044     @@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
1045     *
1046     * The function stops the associated timer and dispatches all the
1047     * pending packets in the Rx reorder table before deletion.
1048     - *
1049     - * The caller must hold rx_reorder_tbl_lock spinlock.
1050     */
1051     static void
1052     mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1053     @@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1054    
1055     del_timer_sync(&tbl->timer_context.timer);
1056     tbl->timer_context.timer_is_set = false;
1057     +
1058     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1059     list_del(&tbl->list);
1060     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1061     +
1062     kfree(tbl->rx_reorder_ptr);
1063     kfree(tbl);
1064    
1065     @@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
1066     /*
1067     * This function returns the pointer to an entry in Rx reordering
1068     * table which matches the given TA/TID pair.
1069     - *
1070     - * The caller must hold rx_reorder_tbl_lock spinlock.
1071     */
1072     struct mwifiex_rx_reorder_tbl *
1073     mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
1074     {
1075     struct mwifiex_rx_reorder_tbl *tbl;
1076     + unsigned long flags;
1077    
1078     - list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
1079     - if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
1080     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1081     + list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
1082     + if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
1083     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1084     + flags);
1085     return tbl;
1086     + }
1087     + }
1088     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1089    
1090     return NULL;
1091     }
1092     @@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
1093     return;
1094    
1095     spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1096     - list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
1097     - if (!memcmp(tbl->ta, ta, ETH_ALEN))
1098     + list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
1099     + if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
1100     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1101     + flags);
1102     mwifiex_del_rx_reorder_entry(priv, tbl);
1103     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1104     + }
1105     + }
1106     spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1107    
1108     return;
1109     @@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
1110     /*
1111     * This function finds the last sequence number used in the packets
1112     * buffered in Rx reordering table.
1113     - *
1114     - * The caller must hold rx_reorder_tbl_lock spinlock.
1115     */
1116     static int
1117     mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
1118     {
1119     struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
1120     + struct mwifiex_private *priv = ctx->priv;
1121     + unsigned long flags;
1122     int i;
1123    
1124     - for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
1125     - if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
1126     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1127     + for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
1128     + if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
1129     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1130     + flags);
1131     return i;
1132     + }
1133     + }
1134     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1135    
1136     return -1;
1137     }
1138     @@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
1139     struct reorder_tmr_cnxt *ctx =
1140     from_timer(ctx, t, timer);
1141     int start_win, seq_num;
1142     - unsigned long flags;
1143    
1144     ctx->timer_is_set = false;
1145     - spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
1146     seq_num = mwifiex_11n_find_last_seq_num(ctx);
1147    
1148     - if (seq_num < 0) {
1149     - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
1150     + if (seq_num < 0)
1151     return;
1152     - }
1153    
1154     mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
1155     start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
1156     mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
1157     start_win);
1158     - spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
1159     }
1160    
1161     /*
1162     @@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
1163     * If we get a TID, ta pair which is already present dispatch all the
1164     * the packets and move the window size until the ssn
1165     */
1166     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1167     tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
1168     if (tbl) {
1169     mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
1170     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1171     return;
1172     }
1173     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1174     /* if !tbl then create one */
1175     new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
1176     if (!new_node)
1177     @@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
1178     int prev_start_win, start_win, end_win, win_size;
1179     u16 pkt_index;
1180     bool init_window_shift = false;
1181     - unsigned long flags;
1182     int ret = 0;
1183    
1184     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1185     tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
1186     if (!tbl) {
1187     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1188     if (pkt_type != PKT_TYPE_BAR)
1189     mwifiex_11n_dispatch_pkt(priv, payload);
1190     return ret;
1191     }
1192    
1193     if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
1194     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1195     mwifiex_11n_dispatch_pkt(priv, payload);
1196     return ret;
1197     }
1198     @@ -651,8 +666,6 @@ done:
1199     if (!tbl->timer_context.timer_is_set ||
1200     prev_start_win != tbl->start_win)
1201     mwifiex_11n_rxreorder_timer_restart(tbl);
1202     -
1203     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1204     return ret;
1205     }
1206    
1207     @@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
1208     peer_mac, tid, initiator);
1209    
1210     if (cleanup_rx_reorder_tbl) {
1211     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1212     tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1213     peer_mac);
1214     if (!tbl) {
1215     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1216     - flags);
1217     mwifiex_dbg(priv->adapter, EVENT,
1218     "event: TID, TA not found in table\n");
1219     return;
1220     }
1221     mwifiex_del_rx_reorder_entry(priv, tbl);
1222     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1223     } else {
1224     ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
1225     if (!ptx_tbl) {
1226     @@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1227     int tid, win_size;
1228     struct mwifiex_rx_reorder_tbl *tbl;
1229     uint16_t block_ack_param_set;
1230     - unsigned long flags;
1231    
1232     block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
1233    
1234     @@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1235     mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
1236     add_ba_rsp->peer_mac_addr, tid);
1237    
1238     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1239     tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1240     add_ba_rsp->peer_mac_addr);
1241     if (tbl)
1242     mwifiex_del_rx_reorder_entry(priv, tbl);
1243    
1244     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1245     return 0;
1246     }
1247    
1248     win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
1249     >> BLOCKACKPARAM_WINSIZE_POS;
1250    
1251     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1252     tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
1253     add_ba_rsp->peer_mac_addr);
1254     if (tbl) {
1255     @@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
1256     else
1257     tbl->amsdu = false;
1258     }
1259     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1260    
1261     mwifiex_dbg(priv->adapter, CMD,
1262     "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
1263     @@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
1264    
1265     spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1266     list_for_each_entry_safe(del_tbl_ptr, tmp_node,
1267     - &priv->rx_reorder_tbl_ptr, list)
1268     + &priv->rx_reorder_tbl_ptr, list) {
1269     + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1270     mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
1271     + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1272     + }
1273     INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
1274     spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1275    
1276     @@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
1277     int tlv_buf_left = len;
1278     int ret;
1279     u8 *tmp;
1280     - unsigned long flags;
1281    
1282     mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
1283     event_buf, len);
1284     @@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
1285     tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
1286     tlv_bitmap_len);
1287    
1288     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1289     rx_reor_tbl_ptr =
1290     mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
1291     tlv_rxba->mac);
1292     if (!rx_reor_tbl_ptr) {
1293     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
1294     - flags);
1295     mwifiex_dbg(priv->adapter, ERROR,
1296     "Can not find rx_reorder_tbl!");
1297     return;
1298     }
1299     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1300    
1301     for (i = 0; i < tlv_bitmap_len; i++) {
1302     for (j = 0 ; j < 8; j++) {
1303     diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1304     index a83c5afc256a..5ce85d5727e4 100644
1305     --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1306     +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
1307     @@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
1308     spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
1309     }
1310    
1311     - spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
1312     if (!priv->ap_11n_enabled ||
1313     (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
1314     (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
1315     ret = mwifiex_handle_uap_rx_forward(priv, skb);
1316     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1317     return ret;
1318     }
1319     - spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
1320    
1321     /* Reorder and send to kernel */
1322     pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
1323     diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
1324     index f4122c8fdd97..ef9b502ce576 100644
1325     --- a/drivers/net/wireless/realtek/rtlwifi/base.c
1326     +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
1327     @@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
1328    
1329     if (rtl_c2h_fast_cmd(hw, skb)) {
1330     rtl_c2h_content_parsing(hw, skb);
1331     + kfree_skb(skb);
1332     return;
1333     }
1334    
1335     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1336     index 4a57ffecc7e6..5c9acb634ff7 100644
1337     --- a/drivers/scsi/sd.c
1338     +++ b/drivers/scsi/sd.c
1339     @@ -132,6 +132,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
1340    
1341     static struct kmem_cache *sd_cdb_cache;
1342     static mempool_t *sd_cdb_pool;
1343     +static mempool_t *sd_page_pool;
1344    
1345     static const char *sd_cache_types[] = {
1346     "write through", "none", "write back",
1347     @@ -758,9 +759,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
1348     unsigned int data_len = 24;
1349     char *buf;
1350    
1351     - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1352     + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1353     if (!rq->special_vec.bv_page)
1354     return BLKPREP_DEFER;
1355     + clear_highpage(rq->special_vec.bv_page);
1356     rq->special_vec.bv_offset = 0;
1357     rq->special_vec.bv_len = data_len;
1358     rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1359     @@ -791,9 +793,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
1360     u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
1361     u32 data_len = sdp->sector_size;
1362    
1363     - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1364     + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1365     if (!rq->special_vec.bv_page)
1366     return BLKPREP_DEFER;
1367     + clear_highpage(rq->special_vec.bv_page);
1368     rq->special_vec.bv_offset = 0;
1369     rq->special_vec.bv_len = data_len;
1370     rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1371     @@ -821,9 +824,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
1372     u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
1373     u32 data_len = sdp->sector_size;
1374    
1375     - rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1376     + rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
1377     if (!rq->special_vec.bv_page)
1378     return BLKPREP_DEFER;
1379     + clear_highpage(rq->special_vec.bv_page);
1380     rq->special_vec.bv_offset = 0;
1381     rq->special_vec.bv_len = data_len;
1382     rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1383     @@ -1287,7 +1291,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1384     u8 *cmnd;
1385    
1386     if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1387     - __free_page(rq->special_vec.bv_page);
1388     + mempool_free(rq->special_vec.bv_page, sd_page_pool);
1389    
1390     if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1391     cmnd = SCpnt->cmnd;
1392     @@ -3635,6 +3639,13 @@ static int __init init_sd(void)
1393     goto err_out_cache;
1394     }
1395    
1396     + sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
1397     + if (!sd_page_pool) {
1398     + printk(KERN_ERR "sd: can't init discard page pool\n");
1399     + err = -ENOMEM;
1400     + goto err_out_ppool;
1401     + }
1402     +
1403     err = scsi_register_driver(&sd_template.gendrv);
1404     if (err)
1405     goto err_out_driver;
1406     @@ -3642,6 +3653,9 @@ static int __init init_sd(void)
1407     return 0;
1408    
1409     err_out_driver:
1410     + mempool_destroy(sd_page_pool);
1411     +
1412     +err_out_ppool:
1413     mempool_destroy(sd_cdb_pool);
1414    
1415     err_out_cache:
1416     @@ -3668,6 +3682,7 @@ static void __exit exit_sd(void)
1417    
1418     scsi_unregister_driver(&sd_template.gendrv);
1419     mempool_destroy(sd_cdb_pool);
1420     + mempool_destroy(sd_page_pool);
1421     kmem_cache_destroy(sd_cdb_cache);
1422    
1423     class_unregister(&sd_disk_class);
1424     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1425     index 94aca1b5ac8a..01b5818a4be5 100644
1426     --- a/drivers/usb/host/xhci-hub.c
1427     +++ b/drivers/usb/host/xhci-hub.c
1428     @@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1429     portsc_buf[port_index] = 0;
1430    
1431     /* Bail out if a USB3 port has a new device in link training */
1432     - if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1433     + if ((hcd->speed >= HCD_USB3) &&
1434     + (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1435     bus_state->bus_suspended = 0;
1436     spin_unlock_irqrestore(&xhci->lock, flags);
1437     xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
1438     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1439     index c3ed7d1c9f65..e88060ea1e33 100644
1440     --- a/drivers/usb/host/xhci.h
1441     +++ b/drivers/usb/host/xhci.h
1442     @@ -1860,6 +1860,8 @@ struct xhci_hcd {
1443     unsigned sw_lpm_support:1;
1444     /* support xHCI 1.0 spec USB2 hardware LPM */
1445     unsigned hw_lpm_support:1;
1446     + /* Broken Suspend flag for SNPS Suspend resume issue */
1447     + unsigned broken_suspend:1;
1448     /* cached usb2 extened protocol capabilites */
1449     u32 *ext_caps;
1450     unsigned int num_ext_caps;
1451     @@ -1877,8 +1879,6 @@ struct xhci_hcd {
1452     void *dbc;
1453     /* platform-specific data -- must come last */
1454     unsigned long priv[0] __aligned(sizeof(s64));
1455     - /* Broken Suspend flag for SNPS Suspend resume issue */
1456     - u8 broken_suspend;
1457     };
1458    
1459     /* Platform specific overrides to generic XHCI hc_driver ops */
1460     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1461     index e72ad9f81c73..17787dc349f8 100644
1462     --- a/drivers/usb/serial/option.c
1463     +++ b/drivers/usb/serial/option.c
1464     @@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
1465     { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1466     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1467     .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1468     + { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1469     + .driver_info = NCTRL(0) | RSVD(1) },
1470     + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1471     + .driver_info = NCTRL(0) },
1472     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1473     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1474     .driver_info = RSVD(1) },
1475     @@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
1476     .driver_info = RSVD(4) },
1477     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1478     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1479     + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
1480     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1481     .driver_info = RSVD(4) },
1482     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
1483     @@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
1484     .driver_info = RSVD(2) },
1485     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1486     .driver_info = RSVD(2) },
1487     + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1488     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1489     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1490     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1491     @@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
1492     { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1493     { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1494     .driver_info = RSVD(5) | RSVD(6) },
1495     + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1496     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1497     .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1498     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1499     @@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
1500     { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1501     { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1502     { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1503     - { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1504     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1505     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
1506     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
1507     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
1508     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
1509     + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
1510     + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
1511     + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
1512     { } /* Terminating entry */
1513     };
1514     MODULE_DEVICE_TABLE(usb, option_ids);
1515     diff --git a/fs/iomap.c b/fs/iomap.c
1516     index 37da7a61a6c5..ec15cf2ec696 100644
1517     --- a/fs/iomap.c
1518     +++ b/fs/iomap.c
1519     @@ -117,12 +117,6 @@ iomap_page_create(struct inode *inode, struct page *page)
1520     atomic_set(&iop->read_count, 0);
1521     atomic_set(&iop->write_count, 0);
1522     bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
1523     -
1524     - /*
1525     - * migrate_page_move_mapping() assumes that pages with private data have
1526     - * their count elevated by 1.
1527     - */
1528     - get_page(page);
1529     set_page_private(page, (unsigned long)iop);
1530     SetPagePrivate(page);
1531     return iop;
1532     @@ -139,7 +133,6 @@ iomap_page_release(struct page *page)
1533     WARN_ON_ONCE(atomic_read(&iop->write_count));
1534     ClearPagePrivate(page);
1535     set_page_private(page, 0);
1536     - put_page(page);
1537     kfree(iop);
1538     }
1539    
1540     diff --git a/fs/namei.c b/fs/namei.c
1541     index 0cab6494978c..914178cdbe94 100644
1542     --- a/fs/namei.c
1543     +++ b/fs/namei.c
1544     @@ -3701,8 +3701,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1545     if (error)
1546     return error;
1547    
1548     - if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
1549     - !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
1550     + if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
1551     return -EPERM;
1552    
1553     if (!dir->i_op->mknod)
1554     diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
1555     index 89921a0d2ebb..4d598a399bbf 100644
1556     --- a/fs/proc/proc_sysctl.c
1557     +++ b/fs/proc/proc_sysctl.c
1558     @@ -464,7 +464,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1559    
1560     inode = new_inode(sb);
1561     if (!inode)
1562     - goto out;
1563     + return ERR_PTR(-ENOMEM);
1564    
1565     inode->i_ino = get_next_ino();
1566    
1567     @@ -474,8 +474,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1568     if (unlikely(head->unregistering)) {
1569     spin_unlock(&sysctl_lock);
1570     iput(inode);
1571     - inode = NULL;
1572     - goto out;
1573     + return ERR_PTR(-ENOENT);
1574     }
1575     ei->sysctl = head;
1576     ei->sysctl_entry = table;
1577     @@ -500,7 +499,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1578     if (root->set_ownership)
1579     root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
1580    
1581     -out:
1582     return inode;
1583     }
1584    
1585     @@ -549,10 +547,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
1586     goto out;
1587     }
1588    
1589     - err = ERR_PTR(-ENOMEM);
1590     inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
1591     - if (!inode)
1592     + if (IS_ERR(inode)) {
1593     + err = ERR_CAST(inode);
1594     goto out;
1595     + }
1596    
1597     d_set_d_op(dentry, &proc_sys_dentry_operations);
1598     err = d_splice_alias(inode, dentry);
1599     @@ -685,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file,
1600     if (d_in_lookup(child)) {
1601     struct dentry *res;
1602     inode = proc_sys_make_inode(dir->d_sb, head, table);
1603     - if (!inode) {
1604     + if (IS_ERR(inode)) {
1605     d_lookup_done(child);
1606     dput(child);
1607     return false;
1608     diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
1609     index 4844538eb926..c6f9b2225387 100644
1610     --- a/fs/ubifs/replay.c
1611     +++ b/fs/ubifs/replay.c
1612     @@ -209,6 +209,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
1613     return ubifs_tnc_remove_range(c, &min_key, &max_key);
1614     }
1615    
1616     +/**
1617     + * inode_still_linked - check whether inode in question will be re-linked.
1618     + * @c: UBIFS file-system description object
1619     + * @rino: replay entry to test
1620     + *
1621     + * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
1622     + * This case needs special care, otherwise all references to the inode will
1623     + * be removed upon the first replay entry of an inode with link count 0
1624     + * is found.
1625     + */
1626     +static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
1627     +{
1628     + struct replay_entry *r;
1629     +
1630     + ubifs_assert(c, rino->deletion);
1631     + ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
1632     +
1633     + /*
1634     + * Find the most recent entry for the inode behind @rino and check
1635     + * whether it is a deletion.
1636     + */
1637     + list_for_each_entry_reverse(r, &c->replay_list, list) {
1638     + ubifs_assert(c, r->sqnum >= rino->sqnum);
1639     + if (key_inum(c, &r->key) == key_inum(c, &rino->key))
1640     + return r->deletion == 0;
1641     +
1642     + }
1643     +
1644     + ubifs_assert(c, 0);
1645     + return false;
1646     +}
1647     +
1648     /**
1649     * apply_replay_entry - apply a replay entry to the TNC.
1650     * @c: UBIFS file-system description object
1651     @@ -236,6 +268,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
1652     {
1653     ino_t inum = key_inum(c, &r->key);
1654    
1655     + if (inode_still_linked(c, r)) {
1656     + err = 0;
1657     + break;
1658     + }
1659     +
1660     err = ubifs_tnc_remove_ino(c, inum);
1661     break;
1662     }
1663     diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
1664     index 89f3b03b1445..e3667c9a33a5 100644
1665     --- a/include/asm-generic/4level-fixup.h
1666     +++ b/include/asm-generic/4level-fixup.h
1667     @@ -3,7 +3,7 @@
1668     #define _4LEVEL_FIXUP_H
1669    
1670     #define __ARCH_HAS_4LEVEL_HACK
1671     -#define __PAGETABLE_PUD_FOLDED
1672     +#define __PAGETABLE_PUD_FOLDED 1
1673    
1674     #define PUD_SHIFT PGDIR_SHIFT
1675     #define PUD_SIZE PGDIR_SIZE
1676     diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
1677     index 9c2e0708eb82..73474bb52344 100644
1678     --- a/include/asm-generic/5level-fixup.h
1679     +++ b/include/asm-generic/5level-fixup.h
1680     @@ -3,7 +3,7 @@
1681     #define _5LEVEL_FIXUP_H
1682    
1683     #define __ARCH_HAS_5LEVEL_HACK
1684     -#define __PAGETABLE_P4D_FOLDED
1685     +#define __PAGETABLE_P4D_FOLDED 1
1686    
1687     #define P4D_SHIFT PGDIR_SHIFT
1688     #define P4D_SIZE PGDIR_SIZE
1689     diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
1690     index 0c34215263b8..1d6dd38c0e5e 100644
1691     --- a/include/asm-generic/pgtable-nop4d-hack.h
1692     +++ b/include/asm-generic/pgtable-nop4d-hack.h
1693     @@ -5,7 +5,7 @@
1694     #ifndef __ASSEMBLY__
1695     #include <asm-generic/5level-fixup.h>
1696    
1697     -#define __PAGETABLE_PUD_FOLDED
1698     +#define __PAGETABLE_PUD_FOLDED 1
1699    
1700     /*
1701     * Having the pud type consist of a pgd gets the size right, and allows
1702     diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
1703     index 1a29b2a0282b..04cb913797bc 100644
1704     --- a/include/asm-generic/pgtable-nop4d.h
1705     +++ b/include/asm-generic/pgtable-nop4d.h
1706     @@ -4,7 +4,7 @@
1707    
1708     #ifndef __ASSEMBLY__
1709    
1710     -#define __PAGETABLE_P4D_FOLDED
1711     +#define __PAGETABLE_P4D_FOLDED 1
1712    
1713     typedef struct { pgd_t pgd; } p4d_t;
1714    
1715     diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
1716     index f35f6e8149e4..b85b8271a73d 100644
1717     --- a/include/asm-generic/pgtable-nopmd.h
1718     +++ b/include/asm-generic/pgtable-nopmd.h
1719     @@ -8,7 +8,7 @@
1720    
1721     struct mm_struct;
1722    
1723     -#define __PAGETABLE_PMD_FOLDED
1724     +#define __PAGETABLE_PMD_FOLDED 1
1725    
1726     /*
1727     * Having the pmd type consist of a pud gets the size right, and allows
1728     diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
1729     index e950b9c50f34..9bef475db6fe 100644
1730     --- a/include/asm-generic/pgtable-nopud.h
1731     +++ b/include/asm-generic/pgtable-nopud.h
1732     @@ -9,7 +9,7 @@
1733     #else
1734     #include <asm-generic/pgtable-nop4d.h>
1735    
1736     -#define __PAGETABLE_PUD_FOLDED
1737     +#define __PAGETABLE_PUD_FOLDED 1
1738    
1739     /*
1740     * Having the pud type consist of a p4d gets the size right, and allows
1741     diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1742     index 88ebc6102c7c..15fd0277ffa6 100644
1743     --- a/include/asm-generic/pgtable.h
1744     +++ b/include/asm-generic/pgtable.h
1745     @@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
1746     #endif
1747     #endif
1748    
1749     +/*
1750     + * On some architectures it depends on the mm if the p4d/pud or pmd
1751     + * layer of the page table hierarchy is folded or not.
1752     + */
1753     +#ifndef mm_p4d_folded
1754     +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1755     +#endif
1756     +
1757     +#ifndef mm_pud_folded
1758     +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1759     +#endif
1760     +
1761     +#ifndef mm_pmd_folded
1762     +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1763     +#endif
1764     +
1765     #endif /* _ASM_GENERIC_PGTABLE_H */
1766     diff --git a/include/linux/math64.h b/include/linux/math64.h
1767     index 837f2f2d1d34..bb2c84afb80c 100644
1768     --- a/include/linux/math64.h
1769     +++ b/include/linux/math64.h
1770     @@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
1771     }
1772     #endif /* mul_u64_u32_div */
1773    
1774     +#define DIV64_U64_ROUND_UP(ll, d) \
1775     + ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
1776     +
1777     #endif /* _LINUX_MATH64_H */
1778     diff --git a/include/linux/mm.h b/include/linux/mm.h
1779     index 0416a7204be3..e899460f1bc5 100644
1780     --- a/include/linux/mm.h
1781     +++ b/include/linux/mm.h
1782     @@ -1724,11 +1724,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1783    
1784     static inline void mm_inc_nr_puds(struct mm_struct *mm)
1785     {
1786     + if (mm_pud_folded(mm))
1787     + return;
1788     atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1789     }
1790    
1791     static inline void mm_dec_nr_puds(struct mm_struct *mm)
1792     {
1793     + if (mm_pud_folded(mm))
1794     + return;
1795     atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1796     }
1797     #endif
1798     @@ -1748,11 +1752,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1799    
1800     static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1801     {
1802     + if (mm_pmd_folded(mm))
1803     + return;
1804     atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1805     }
1806    
1807     static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1808     {
1809     + if (mm_pmd_folded(mm))
1810     + return;
1811     atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1812     }
1813     #endif
1814     diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
1815     index b9626aa7e90c..3e2a80cc7b56 100644
1816     --- a/include/linux/t10-pi.h
1817     +++ b/include/linux/t10-pi.h
1818     @@ -39,12 +39,13 @@ struct t10_pi_tuple {
1819    
1820     static inline u32 t10_pi_ref_tag(struct request *rq)
1821     {
1822     + unsigned int shift = ilog2(queue_logical_block_size(rq->q));
1823     +
1824     #ifdef CONFIG_BLK_DEV_INTEGRITY
1825     - return blk_rq_pos(rq) >>
1826     - (rq->q->integrity.interval_exp - 9) & 0xffffffff;
1827     -#else
1828     - return -1U;
1829     + if (rq->q->integrity.interval_exp)
1830     + shift = rq->q->integrity.interval_exp;
1831     #endif
1832     + return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
1833     }
1834    
1835     extern const struct blk_integrity_profile t10_pi_type1_crc;
1836     diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1837     index 0eb390c205af..da588def3c61 100644
1838     --- a/include/net/xfrm.h
1839     +++ b/include/net/xfrm.h
1840     @@ -1552,6 +1552,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1841     int (*func)(struct xfrm_state *, int, void*), void *);
1842     void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1843     struct xfrm_state *xfrm_state_alloc(struct net *net);
1844     +void xfrm_state_free(struct xfrm_state *x);
1845     struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1846     const xfrm_address_t *saddr,
1847     const struct flowi *fl,
1848     diff --git a/kernel/futex.c b/kernel/futex.c
1849     index 11fc3bb456d6..f89abca89513 100644
1850     --- a/kernel/futex.c
1851     +++ b/kernel/futex.c
1852     @@ -1148,11 +1148,65 @@ out_error:
1853     return ret;
1854     }
1855    
1856     +static int handle_exit_race(u32 __user *uaddr, u32 uval,
1857     + struct task_struct *tsk)
1858     +{
1859     + u32 uval2;
1860     +
1861     + /*
1862     + * If PF_EXITPIDONE is not yet set, then try again.
1863     + */
1864     + if (tsk && !(tsk->flags & PF_EXITPIDONE))
1865     + return -EAGAIN;
1866     +
1867     + /*
1868     + * Reread the user space value to handle the following situation:
1869     + *
1870     + * CPU0 CPU1
1871     + *
1872     + * sys_exit() sys_futex()
1873     + * do_exit() futex_lock_pi()
1874     + * futex_lock_pi_atomic()
1875     + * exit_signals(tsk) No waiters:
1876     + * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1877     + * mm_release(tsk) Set waiter bit
1878     + * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1879     + * Set owner died attach_to_pi_owner() {
1880     + * *uaddr = 0xC0000000; tsk = get_task(PID);
1881     + * } if (!tsk->flags & PF_EXITING) {
1882     + * ... attach();
1883     + * tsk->flags |= PF_EXITPIDONE; } else {
1884     + * if (!(tsk->flags & PF_EXITPIDONE))
1885     + * return -EAGAIN;
1886     + * return -ESRCH; <--- FAIL
1887     + * }
1888     + *
1889     + * Returning ESRCH unconditionally is wrong here because the
1890     + * user space value has been changed by the exiting task.
1891     + *
1892     + * The same logic applies to the case where the exiting task is
1893     + * already gone.
1894     + */
1895     + if (get_futex_value_locked(&uval2, uaddr))
1896     + return -EFAULT;
1897     +
1898     + /* If the user space value has changed, try again. */
1899     + if (uval2 != uval)
1900     + return -EAGAIN;
1901     +
1902     + /*
1903     + * The exiting task did not have a robust list, the robust list was
1904     + * corrupted or the user space value in *uaddr is simply bogus.
1905     + * Give up and tell user space.
1906     + */
1907     + return -ESRCH;
1908     +}
1909     +
1910     /*
1911     * Lookup the task for the TID provided from user space and attach to
1912     * it after doing proper sanity checks.
1913     */
1914     -static int attach_to_pi_owner(u32 uval, union futex_key *key,
1915     +static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1916     struct futex_pi_state **ps)
1917     {
1918     pid_t pid = uval & FUTEX_TID_MASK;
1919     @@ -1162,12 +1216,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
1920     /*
1921     * We are the first waiter - try to look up the real owner and attach
1922     * the new pi_state to it, but bail out when TID = 0 [1]
1923     + *
1924     + * The !pid check is paranoid. None of the call sites should end up
1925     + * with pid == 0, but better safe than sorry. Let the caller retry
1926     */
1927     if (!pid)
1928     - return -ESRCH;
1929     + return -EAGAIN;
1930     p = find_get_task_by_vpid(pid);
1931     if (!p)
1932     - return -ESRCH;
1933     + return handle_exit_race(uaddr, uval, NULL);
1934    
1935     if (unlikely(p->flags & PF_KTHREAD)) {
1936     put_task_struct(p);
1937     @@ -1187,7 +1244,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
1938     * set, we know that the task has finished the
1939     * cleanup:
1940     */
1941     - int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1942     + int ret = handle_exit_race(uaddr, uval, p);
1943    
1944     raw_spin_unlock_irq(&p->pi_lock);
1945     put_task_struct(p);
1946     @@ -1244,7 +1301,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1947     * We are the first waiter - try to look up the owner based on
1948     * @uval and attach to it.
1949     */
1950     - return attach_to_pi_owner(uval, key, ps);
1951     + return attach_to_pi_owner(uaddr, uval, key, ps);
1952     }
1953    
1954     static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1955     @@ -1352,7 +1409,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1956     * attach to the owner. If that fails, no harm done, we only
1957     * set the FUTEX_WAITERS bit in the user space variable.
1958     */
1959     - return attach_to_pi_owner(uval, key, ps);
1960     + return attach_to_pi_owner(uaddr, newval, key, ps);
1961     }
1962    
1963     /**
1964     diff --git a/kernel/panic.c b/kernel/panic.c
1965     index 8b2e002d52eb..6a6df23acd1a 100644
1966     --- a/kernel/panic.c
1967     +++ b/kernel/panic.c
1968     @@ -14,6 +14,7 @@
1969     #include <linux/kmsg_dump.h>
1970     #include <linux/kallsyms.h>
1971     #include <linux/notifier.h>
1972     +#include <linux/vt_kern.h>
1973     #include <linux/module.h>
1974     #include <linux/random.h>
1975     #include <linux/ftrace.h>
1976     @@ -233,7 +234,10 @@ void panic(const char *fmt, ...)
1977     if (_crash_kexec_post_notifiers)
1978     __crash_kexec(NULL);
1979    
1980     - bust_spinlocks(0);
1981     +#ifdef CONFIG_VT
1982     + unblank_screen();
1983     +#endif
1984     + console_unblank();
1985    
1986     /*
1987     * We may have ended up stopping the CPU holding the lock (in
1988     diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
1989     index 4b9127e95430..5a01c4fdbfef 100644
1990     --- a/kernel/time/posix-timers.c
1991     +++ b/kernel/time/posix-timers.c
1992     @@ -289,9 +289,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
1993     {
1994     struct hrtimer *timer = &timr->it.real.timer;
1995    
1996     - if (!timr->it_interval)
1997     - return;
1998     -
1999     timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
2000     timr->it_interval);
2001     hrtimer_restart(timer);
2002     @@ -317,7 +314,7 @@ void posixtimer_rearm(struct siginfo *info)
2003     if (!timr)
2004     return;
2005    
2006     - if (timr->it_requeue_pending == info->si_sys_private) {
2007     + if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
2008     timr->kclock->timer_rearm(timr);
2009    
2010     timr->it_active = 1;
2011     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2012     index 15310f14c25e..d2cd70cfaa90 100644
2013     --- a/mm/huge_memory.c
2014     +++ b/mm/huge_memory.c
2015     @@ -2127,23 +2127,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2016     */
2017     old_pmd = pmdp_invalidate(vma, haddr, pmd);
2018    
2019     -#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2020     pmd_migration = is_pmd_migration_entry(old_pmd);
2021     - if (pmd_migration) {
2022     + if (unlikely(pmd_migration)) {
2023     swp_entry_t entry;
2024    
2025     entry = pmd_to_swp_entry(old_pmd);
2026     page = pfn_to_page(swp_offset(entry));
2027     - } else
2028     -#endif
2029     + write = is_write_migration_entry(entry);
2030     + young = false;
2031     + soft_dirty = pmd_swp_soft_dirty(old_pmd);
2032     + } else {
2033     page = pmd_page(old_pmd);
2034     + if (pmd_dirty(old_pmd))
2035     + SetPageDirty(page);
2036     + write = pmd_write(old_pmd);
2037     + young = pmd_young(old_pmd);
2038     + soft_dirty = pmd_soft_dirty(old_pmd);
2039     + }
2040     VM_BUG_ON_PAGE(!page_count(page), page);
2041     page_ref_add(page, HPAGE_PMD_NR - 1);
2042     - if (pmd_dirty(old_pmd))
2043     - SetPageDirty(page);
2044     - write = pmd_write(old_pmd);
2045     - young = pmd_young(old_pmd);
2046     - soft_dirty = pmd_soft_dirty(old_pmd);
2047    
2048     /*
2049     * Withdraw the table only after we mark the pmd entry invalid.
2050     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2051     index 6a62b2421cdf..93e73ccb4dec 100644
2052     --- a/mm/page_alloc.c
2053     +++ b/mm/page_alloc.c
2054     @@ -5538,6 +5538,18 @@ not_early:
2055     cond_resched();
2056     }
2057     }
2058     +#ifdef CONFIG_SPARSEMEM
2059     + /*
2060     + * If the zone does not span the rest of the section then
2061     + * we should at least initialize those pages. Otherwise we
2062     + * could blow up on a poisoned page in some paths which depend
2063     + * on full sections being initialized (e.g. memory hotplug).
2064     + */
2065     + while (end_pfn % PAGES_PER_SECTION) {
2066     + __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
2067     + end_pfn++;
2068     + }
2069     +#endif
2070     }
2071    
2072     static void __meminit zone_init_free_lists(struct zone *zone)
2073     @@ -7704,11 +7716,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
2074     * handle each tail page individually in migration.
2075     */
2076     if (PageHuge(page)) {
2077     + struct page *head = compound_head(page);
2078     + unsigned int skip_pages;
2079    
2080     - if (!hugepage_migration_supported(page_hstate(page)))
2081     + if (!hugepage_migration_supported(page_hstate(head)))
2082     goto unmovable;
2083    
2084     - iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
2085     + skip_pages = (1 << compound_order(head)) - (page - head);
2086     + iter += skip_pages - 1;
2087     continue;
2088     }
2089    
2090     diff --git a/mm/vmscan.c b/mm/vmscan.c
2091     index c5ef7240cbcb..961401c46334 100644
2092     --- a/mm/vmscan.c
2093     +++ b/mm/vmscan.c
2094     @@ -2456,9 +2456,11 @@ out:
2095     /*
2096     * Scan types proportional to swappiness and
2097     * their relative recent reclaim efficiency.
2098     + * Make sure we don't miss the last page
2099     + * because of a round-off error.
2100     */
2101     - scan = div64_u64(scan * fraction[file],
2102     - denominator);
2103     + scan = DIV64_U64_ROUND_UP(scan * fraction[file],
2104     + denominator);
2105     break;
2106     case SCAN_FILE:
2107     case SCAN_ANON:
2108     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2109     index b669262682c9..7a34990a68b1 100644
2110     --- a/net/xfrm/xfrm_state.c
2111     +++ b/net/xfrm/xfrm_state.c
2112     @@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
2113     module_put(mode->owner);
2114     }
2115    
2116     +void xfrm_state_free(struct xfrm_state *x)
2117     +{
2118     + kmem_cache_free(xfrm_state_cache, x);
2119     +}
2120     +EXPORT_SYMBOL(xfrm_state_free);
2121     +
2122     static void xfrm_state_gc_destroy(struct xfrm_state *x)
2123     {
2124     tasklet_hrtimer_cancel(&x->mtimer);
2125     @@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
2126     }
2127     xfrm_dev_state_free(x);
2128     security_xfrm_state_free(x);
2129     - kmem_cache_free(xfrm_state_cache, x);
2130     + xfrm_state_free(x);
2131     }
2132    
2133     static void xfrm_state_gc_task(struct work_struct *work)
2134     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
2135     index df7ca2dabc48..566919838d5e 100644
2136     --- a/net/xfrm/xfrm_user.c
2137     +++ b/net/xfrm/xfrm_user.c
2138     @@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2139    
2140     }
2141    
2142     - kfree(x);
2143     + xfrm_state_free(x);
2144     kfree(xp);
2145    
2146     return 0;
2147    
2148     free_state:
2149     - kfree(x);
2150     + xfrm_state_free(x);
2151     nomem:
2152     return err;
2153     }