Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0100-4.9.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 114367 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index b1037774e8e8..ab3cd5128889 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 0
9     +SUBLEVEL = 1
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
14     index 940dfb406591..04abdec7f496 100644
15     --- a/arch/alpha/kernel/ptrace.c
16     +++ b/arch/alpha/kernel/ptrace.c
17     @@ -283,7 +283,7 @@ long arch_ptrace(struct task_struct *child, long request,
18     /* When I and D space are separate, these will need to be fixed. */
19     case PTRACE_PEEKTEXT: /* read word at location addr. */
20     case PTRACE_PEEKDATA:
21     - copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
22     + copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp),
23     FOLL_FORCE);
24     ret = -EIO;
25     if (copied != sizeof(tmp))
26     diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
27     index f193414d0f6f..4986dc0c1dff 100644
28     --- a/arch/arm/xen/enlighten.c
29     +++ b/arch/arm/xen/enlighten.c
30     @@ -372,8 +372,7 @@ static int __init xen_guest_init(void)
31     * for secondary CPUs as they are brought up.
32     * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
33     */
34     - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
35     - sizeof(struct vcpu_info));
36     + xen_vcpu_info = alloc_percpu(struct vcpu_info);
37     if (xen_vcpu_info == NULL)
38     return -ENOMEM;
39    
40     diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
41     index 8d79286ee4e8..360d99645163 100644
42     --- a/arch/blackfin/kernel/ptrace.c
43     +++ b/arch/blackfin/kernel/ptrace.c
44     @@ -270,7 +270,7 @@ long arch_ptrace(struct task_struct *child, long request,
45     switch (bfin_mem_access_type(addr, to_copy)) {
46     case BFIN_MEM_ACCESS_CORE:
47     case BFIN_MEM_ACCESS_CORE_ONLY:
48     - copied = access_process_vm(child, addr, &tmp,
49     + copied = ptrace_access_vm(child, addr, &tmp,
50     to_copy, FOLL_FORCE);
51     if (copied)
52     break;
53     @@ -323,7 +323,7 @@ long arch_ptrace(struct task_struct *child, long request,
54     switch (bfin_mem_access_type(addr, to_copy)) {
55     case BFIN_MEM_ACCESS_CORE:
56     case BFIN_MEM_ACCESS_CORE_ONLY:
57     - copied = access_process_vm(child, addr, &data,
58     + copied = ptrace_access_vm(child, addr, &data,
59     to_copy,
60     FOLL_FORCE | FOLL_WRITE);
61     break;
62     diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
63     index f0df654ac6fc..fe1f9cf7b391 100644
64     --- a/arch/cris/arch-v32/kernel/ptrace.c
65     +++ b/arch/cris/arch-v32/kernel/ptrace.c
66     @@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
67     /* The trampoline page is globally mapped, no page table to traverse.*/
68     tmp = *(unsigned long*)addr;
69     } else {
70     - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
71     + copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
72    
73     if (copied != sizeof(tmp))
74     break;
75     diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
76     index 31aa8c0f68e1..36f660da8124 100644
77     --- a/arch/ia64/kernel/ptrace.c
78     +++ b/arch/ia64/kernel/ptrace.c
79     @@ -1159,7 +1159,7 @@ arch_ptrace (struct task_struct *child, long request,
80     case PTRACE_PEEKTEXT:
81     case PTRACE_PEEKDATA:
82     /* read word at location addr */
83     - if (access_process_vm(child, addr, &data, sizeof(data),
84     + if (ptrace_access_vm(child, addr, &data, sizeof(data),
85     FOLL_FORCE)
86     != sizeof(data))
87     return -EIO;
88     diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
89     index 7e71a4e0281b..5fcbdcd7abd0 100644
90     --- a/arch/mips/kernel/ptrace32.c
91     +++ b/arch/mips/kernel/ptrace32.c
92     @@ -69,7 +69,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
93     if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
94     break;
95    
96     - copied = access_process_vm(child, (u64)addrOthers, &tmp,
97     + copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
98     sizeof(tmp), FOLL_FORCE);
99     if (copied != sizeof(tmp))
100     break;
101     @@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
102     if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
103     break;
104     ret = 0;
105     - if (access_process_vm(child, (u64)addrOthers, &data,
106     + if (ptrace_access_vm(child, (u64)addrOthers, &data,
107     sizeof(data),
108     FOLL_FORCE | FOLL_WRITE) == sizeof(data))
109     break;
110     diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
111     index 010b7b310237..1e887f3a61a6 100644
112     --- a/arch/powerpc/kernel/ptrace32.c
113     +++ b/arch/powerpc/kernel/ptrace32.c
114     @@ -73,7 +73,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
115     if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
116     break;
117    
118     - copied = access_process_vm(child, (u64)addrOthers, &tmp,
119     + copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
120     sizeof(tmp), FOLL_FORCE);
121     if (copied != sizeof(tmp))
122     break;
123     @@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
124     if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
125     break;
126     ret = 0;
127     - if (access_process_vm(child, (u64)addrOthers, &tmp,
128     + if (ptrace_access_vm(child, (u64)addrOthers, &tmp,
129     sizeof(tmp),
130     FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
131     break;
132     diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
133     new file mode 100644
134     index 000000000000..44b8762fa0c7
135     --- /dev/null
136     +++ b/arch/x86/include/asm/asm-prototypes.h
137     @@ -0,0 +1,16 @@
138     +#include <asm/ftrace.h>
139     +#include <asm/uaccess.h>
140     +#include <asm/string.h>
141     +#include <asm/page.h>
142     +#include <asm/checksum.h>
143     +
144     +#include <asm-generic/asm-prototypes.h>
145     +
146     +#include <asm/page.h>
147     +#include <asm/pgtable.h>
148     +#include <asm/special_insns.h>
149     +#include <asm/preempt.h>
150     +
151     +#ifndef CONFIG_X86_CMPXCHG64
152     +extern void cmpxchg8b_emu(void);
153     +#endif
154     diff --git a/block/blk-mq.c b/block/blk-mq.c
155     index f3d27a6dee09..ad459e4e8071 100644
156     --- a/block/blk-mq.c
157     +++ b/block/blk-mq.c
158     @@ -1332,9 +1332,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
159     blk_mq_put_ctx(data.ctx);
160     if (!old_rq)
161     goto done;
162     - if (!blk_mq_direct_issue_request(old_rq, &cookie))
163     - goto done;
164     - blk_mq_insert_request(old_rq, false, true, true);
165     + if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
166     + blk_mq_direct_issue_request(old_rq, &cookie) != 0)
167     + blk_mq_insert_request(old_rq, false, true, true);
168     goto done;
169     }
170    
171     diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
172     index 4c7c6da7a989..6441dfda489f 100644
173     --- a/drivers/base/power/opp/core.c
174     +++ b/drivers/base/power/opp/core.c
175     @@ -584,6 +584,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
176     struct clk *clk;
177     unsigned long freq, old_freq;
178     unsigned long u_volt, u_volt_min, u_volt_max;
179     + unsigned long old_u_volt, old_u_volt_min, old_u_volt_max;
180     int ret;
181    
182     if (unlikely(!target_freq)) {
183     @@ -633,6 +634,14 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
184     return ret;
185     }
186    
187     + if (IS_ERR(old_opp)) {
188     + old_u_volt = 0;
189     + } else {
190     + old_u_volt = old_opp->u_volt;
191     + old_u_volt_min = old_opp->u_volt_min;
192     + old_u_volt_max = old_opp->u_volt_max;
193     + }
194     +
195     u_volt = opp->u_volt;
196     u_volt_min = opp->u_volt_min;
197     u_volt_max = opp->u_volt_max;
198     @@ -677,9 +686,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
199     __func__, old_freq);
200     restore_voltage:
201     /* This shouldn't harm even if the voltages weren't updated earlier */
202     - if (!IS_ERR(old_opp))
203     - _set_opp_voltage(dev, reg, old_opp->u_volt,
204     - old_opp->u_volt_min, old_opp->u_volt_max);
205     + if (old_u_volt) {
206     + _set_opp_voltage(dev, reg, old_u_volt, old_u_volt_min,
207     + old_u_volt_max);
208     + }
209    
210     return ret;
211     }
212     @@ -1316,7 +1326,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
213     * that this function is *NOT* called under RCU protection or in contexts where
214     * mutex cannot be locked.
215     */
216     -int dev_pm_opp_set_regulator(struct device *dev, const char *name)
217     +struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name)
218     {
219     struct opp_table *opp_table;
220     struct regulator *reg;
221     @@ -1354,20 +1364,20 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
222     opp_table->regulator = reg;
223    
224     mutex_unlock(&opp_table_lock);
225     - return 0;
226     + return opp_table;
227    
228     err:
229     _remove_opp_table(opp_table);
230     unlock:
231     mutex_unlock(&opp_table_lock);
232    
233     - return ret;
234     + return ERR_PTR(ret);
235     }
236     EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
237    
238     /**
239     * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
240     - * @dev: Device for which regulator was set.
241     + * @opp_table: OPP table returned from dev_pm_opp_set_regulator().
242     *
243     * Locking: The internal opp_table and opp structures are RCU protected.
244     * Hence this function internally uses RCU updater strategy with mutex locks
245     @@ -1375,22 +1385,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
246     * that this function is *NOT* called under RCU protection or in contexts where
247     * mutex cannot be locked.
248     */
249     -void dev_pm_opp_put_regulator(struct device *dev)
250     +void dev_pm_opp_put_regulator(struct opp_table *opp_table)
251     {
252     - struct opp_table *opp_table;
253     -
254     mutex_lock(&opp_table_lock);
255    
256     - /* Check for existing table for 'dev' first */
257     - opp_table = _find_opp_table(dev);
258     - if (IS_ERR(opp_table)) {
259     - dev_err(dev, "Failed to find opp_table: %ld\n",
260     - PTR_ERR(opp_table));
261     - goto unlock;
262     - }
263     -
264     if (IS_ERR(opp_table->regulator)) {
265     - dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
266     + pr_err("%s: Doesn't have regulator set\n", __func__);
267     goto unlock;
268     }
269    
270     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
271     index fa1b7a90ba11..4af818766797 100644
272     --- a/drivers/block/loop.c
273     +++ b/drivers/block/loop.c
274     @@ -1646,7 +1646,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
275     blk_mq_start_request(bd->rq);
276    
277     if (lo->lo_state != Lo_bound)
278     - return -EIO;
279     + return BLK_MQ_RQ_QUEUE_ERROR;
280    
281     switch (req_op(cmd->rq)) {
282     case REQ_OP_FLUSH:
283     diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
284     index 62028f483bba..a2ab00831df1 100644
285     --- a/drivers/char/tpm/xen-tpmfront.c
286     +++ b/drivers/char/tpm/xen-tpmfront.c
287     @@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
288     rv = setup_ring(dev, priv);
289     if (rv) {
290     chip = dev_get_drvdata(&dev->dev);
291     - tpm_chip_unregister(chip);
292     ring_free(priv);
293     return rv;
294     }
295     diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
296     index 8831e1a05367..11d8aa3ec186 100644
297     --- a/drivers/clk/ti/clk-3xxx.c
298     +++ b/drivers/clk/ti/clk-3xxx.c
299     @@ -22,13 +22,6 @@
300    
301     #include "clock.h"
302    
303     -/*
304     - * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
305     - * that are sourced by DPLL5, and both of these require this clock
306     - * to be at 120 MHz for proper operation.
307     - */
308     -#define DPLL5_FREQ_FOR_USBHOST 120000000
309     -
310     #define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
311     #define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
312     #define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
313     @@ -546,14 +539,21 @@ void __init omap3_clk_lock_dpll5(void)
314     struct clk *dpll5_clk;
315     struct clk *dpll5_m2_clk;
316    
317     + /*
318     + * Errata sprz319f advisory 2.1 documents a USB host clock drift issue
319     + * that can be worked around using specially crafted dpll5 settings
320     + * with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
321     + * host clock rate, its .set_rate handler() will detect that frequency
322     + * and use the errata settings.
323     + */
324     dpll5_clk = clk_get(NULL, "dpll5_ck");
325     - clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
326     + clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
327     clk_prepare_enable(dpll5_clk);
328    
329     - /* Program dpll5_m2_clk divider for no division */
330     + /* Program dpll5_m2_clk divider */
331     dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
332     clk_prepare_enable(dpll5_m2_clk);
333     - clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
334     + clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
335    
336     clk_disable_unprepare(dpll5_m2_clk);
337     clk_disable_unprepare(dpll5_clk);
338     diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
339     index 90f3f472ae1c..13c37f48d9d6 100644
340     --- a/drivers/clk/ti/clock.h
341     +++ b/drivers/clk/ti/clock.h
342     @@ -257,11 +257,20 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
343     unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
344     unsigned long parent_rate);
345    
346     +/*
347     + * OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
348     + * that are sourced by DPLL5, and both of these require this clock
349     + * to be at 120 MHz for proper operation.
350     + */
351     +#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
352     +
353     unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
354     int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
355     unsigned long parent_rate);
356     int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
357     unsigned long parent_rate, u8 index);
358     +int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
359     + unsigned long parent_rate);
360     void omap3_clk_lock_dpll5(void);
361    
362     unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
363     diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
364     index 9fc8754a6e61..4b9a419d8e14 100644
365     --- a/drivers/clk/ti/dpll.c
366     +++ b/drivers/clk/ti/dpll.c
367     @@ -114,6 +114,18 @@ static const struct clk_ops omap3_dpll_ck_ops = {
368     .round_rate = &omap2_dpll_round_rate,
369     };
370    
371     +static const struct clk_ops omap3_dpll5_ck_ops = {
372     + .enable = &omap3_noncore_dpll_enable,
373     + .disable = &omap3_noncore_dpll_disable,
374     + .get_parent = &omap2_init_dpll_parent,
375     + .recalc_rate = &omap3_dpll_recalc,
376     + .set_rate = &omap3_dpll5_set_rate,
377     + .set_parent = &omap3_noncore_dpll_set_parent,
378     + .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
379     + .determine_rate = &omap3_noncore_dpll_determine_rate,
380     + .round_rate = &omap2_dpll_round_rate,
381     +};
382     +
383     static const struct clk_ops omap3_dpll_per_ck_ops = {
384     .enable = &omap3_noncore_dpll_enable,
385     .disable = &omap3_noncore_dpll_disable,
386     @@ -474,7 +486,12 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
387     .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
388     };
389    
390     - of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
391     + if ((of_machine_is_compatible("ti,omap3630") ||
392     + of_machine_is_compatible("ti,omap36xx")) &&
393     + !strcmp(node->name, "dpll5_ck"))
394     + of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
395     + else
396     + of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
397     }
398     CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
399     of_ti_omap3_dpll_setup);
400     diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
401     index 88f2ce81ba55..4cdd28a25584 100644
402     --- a/drivers/clk/ti/dpll3xxx.c
403     +++ b/drivers/clk/ti/dpll3xxx.c
404     @@ -838,3 +838,70 @@ int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
405     return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
406     index);
407     }
408     +
409     +/* Apply DM3730 errata sprz319 advisory 2.1. */
410     +static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
411     + unsigned long parent_rate)
412     +{
413     + struct omap3_dpll5_settings {
414     + unsigned int rate, m, n;
415     + };
416     +
417     + static const struct omap3_dpll5_settings precomputed[] = {
418     + /*
419     + * From DM3730 errata advisory 2.1, table 35 and 36.
420     + * The N value is increased by 1 compared to the tables as the
421     + * errata lists register values while last_rounded_field is the
422     + * real divider value.
423     + */
424     + { 12000000, 80, 0 + 1 },
425     + { 13000000, 443, 5 + 1 },
426     + { 19200000, 50, 0 + 1 },
427     + { 26000000, 443, 11 + 1 },
428     + { 38400000, 25, 0 + 1 }
429     + };
430     +
431     + const struct omap3_dpll5_settings *d;
432     + struct clk_hw_omap *clk = to_clk_hw_omap(hw);
433     + struct dpll_data *dd;
434     + unsigned int i;
435     +
436     + for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
437     + if (parent_rate == precomputed[i].rate)
438     + break;
439     + }
440     +
441     + if (i == ARRAY_SIZE(precomputed))
442     + return false;
443     +
444     + d = &precomputed[i];
445     +
446     + /* Update the M, N and rounded rate values and program the DPLL. */
447     + dd = clk->dpll_data;
448     + dd->last_rounded_m = d->m;
449     + dd->last_rounded_n = d->n;
450     + dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
451     + omap3_noncore_dpll_program(clk, 0);
452     +
453     + return true;
454     +}
455     +
456     +/**
457     + * omap3_dpll5_set_rate - set rate for omap3 dpll5
458     + * @hw: clock to change
459     + * @rate: target rate for clock
460     + * @parent_rate: rate of the parent clock
461     + *
462     + * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
463     + * the DPLL is used for USB host (detected through the requested rate).
464     + */
465     +int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
466     + unsigned long parent_rate)
467     +{
468     + if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
469     + if (omap3_dpll5_apply_errata(hw, parent_rate))
470     + return 0;
471     + }
472     +
473     + return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
474     +}
475     diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
476     index 5c07ae05d69a..4d3ec92cbabf 100644
477     --- a/drivers/cpufreq/cpufreq-dt.c
478     +++ b/drivers/cpufreq/cpufreq-dt.c
479     @@ -28,6 +28,7 @@
480     #include "cpufreq-dt.h"
481    
482     struct private_data {
483     + struct opp_table *opp_table;
484     struct device *cpu_dev;
485     struct thermal_cooling_device *cdev;
486     const char *reg_name;
487     @@ -143,6 +144,7 @@ static int resources_available(void)
488     static int cpufreq_init(struct cpufreq_policy *policy)
489     {
490     struct cpufreq_frequency_table *freq_table;
491     + struct opp_table *opp_table = NULL;
492     struct private_data *priv;
493     struct device *cpu_dev;
494     struct clk *cpu_clk;
495     @@ -186,8 +188,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
496     */
497     name = find_supply_name(cpu_dev);
498     if (name) {
499     - ret = dev_pm_opp_set_regulator(cpu_dev, name);
500     - if (ret) {
501     + opp_table = dev_pm_opp_set_regulator(cpu_dev, name);
502     + if (IS_ERR(opp_table)) {
503     + ret = PTR_ERR(opp_table);
504     dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
505     policy->cpu, ret);
506     goto out_put_clk;
507     @@ -237,6 +240,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
508     }
509    
510     priv->reg_name = name;
511     + priv->opp_table = opp_table;
512    
513     ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
514     if (ret) {
515     @@ -285,7 +289,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
516     out_free_opp:
517     dev_pm_opp_of_cpumask_remove_table(policy->cpus);
518     if (name)
519     - dev_pm_opp_put_regulator(cpu_dev);
520     + dev_pm_opp_put_regulator(opp_table);
521     out_put_clk:
522     clk_put(cpu_clk);
523    
524     @@ -300,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
525     dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
526     dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
527     if (priv->reg_name)
528     - dev_pm_opp_put_regulator(priv->cpu_dev);
529     + dev_pm_opp_put_regulator(priv->opp_table);
530    
531     clk_put(policy->clk);
532     kfree(priv);
533     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
534     index 954a64c7757b..c310318b34dd 100644
535     --- a/drivers/crypto/caam/caamalg.c
536     +++ b/drivers/crypto/caam/caamalg.c
537     @@ -736,7 +736,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
538    
539     /* Will read cryptlen */
540     append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
541     - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
542     + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
543     + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
544     + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
545    
546     /* Write ICV */
547     append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
548     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
549     index a2768835d394..0aedd0ebccec 100644
550     --- a/drivers/md/dm-crypt.c
551     +++ b/drivers/md/dm-crypt.c
552     @@ -1503,12 +1503,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
553     if (!cc->key_size && strcmp(key, "-"))
554     goto out;
555    
556     + /* clear the flag since following operations may invalidate previously valid key */
557     + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
558     +
559     if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
560     goto out;
561    
562     - set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
563     -
564     r = crypt_setkey_allcpus(cc);
565     + if (!r)
566     + set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
567    
568     out:
569     /* Hex key string not needed after here, so wipe it. */
570     diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
571     index 6a2e8dd44a1b..3643cba71351 100644
572     --- a/drivers/md/dm-flakey.c
573     +++ b/drivers/md/dm-flakey.c
574     @@ -200,11 +200,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
575    
576     if (!(fc->up_interval + fc->down_interval)) {
577     ti->error = "Total (up + down) interval is zero";
578     + r = -EINVAL;
579     goto bad;
580     }
581    
582     if (fc->up_interval + fc->down_interval < fc->up_interval) {
583     ti->error = "Interval overflow";
584     + r = -EINVAL;
585     goto bad;
586     }
587    
588     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
589     index 6d53810963f7..af2d79b52484 100644
590     --- a/drivers/md/dm-raid.c
591     +++ b/drivers/md/dm-raid.c
592     @@ -2994,6 +2994,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
593     }
594     }
595    
596     + /* Disable/enable discard support on raid set. */
597     + configure_discard_support(rs);
598     +
599     mddev_unlock(&rs->md);
600     return 0;
601    
602     @@ -3580,12 +3583,6 @@ static int raid_preresume(struct dm_target *ti)
603     if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
604     rs_update_sbs(rs);
605    
606     - /*
607     - * Disable/enable discard support on raid set after any
608     - * conversion, because devices can have been added
609     - */
610     - configure_discard_support(rs);
611     -
612     /* Load the bitmap from disk unless raid0 */
613     r = __load_dirty_region_bitmap(rs);
614     if (r)
615     diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
616     index 1d0d2adc050a..31a89c8832c0 100644
617     --- a/drivers/md/dm-rq.c
618     +++ b/drivers/md/dm-rq.c
619     @@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
620     */
621     static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
622     {
623     + struct request_queue *q = md->queue;
624     + unsigned long flags;
625     +
626     atomic_dec(&md->pending[rw]);
627    
628     /* nudge anyone waiting on suspend queue */
629     @@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
630     * back into ->request_fn() could deadlock attempting to grab the
631     * queue lock again.
632     */
633     - if (!md->queue->mq_ops && run_queue)
634     - blk_run_queue_async(md->queue);
635     + if (!q->mq_ops && run_queue) {
636     + spin_lock_irqsave(q->queue_lock, flags);
637     + blk_run_queue_async(q);
638     + spin_unlock_irqrestore(q->queue_lock, flags);
639     + }
640    
641     /*
642     * dm_put() must be at the end of this function. See the comment above
643     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
644     index c4b53b332607..5ac239d0f787 100644
645     --- a/drivers/md/dm-table.c
646     +++ b/drivers/md/dm-table.c
647     @@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
648    
649     BUG_ON(!request_based); /* No targets in this table */
650    
651     - if (list_empty(devices) && __table_type_request_based(live_md_type)) {
652     - /* inherit live MD type */
653     - t->type = live_md_type;
654     - return 0;
655     - }
656     -
657     /*
658     * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
659     * having a compatible target use dm_table_set_type.
660     @@ -948,6 +942,19 @@ static int dm_table_determine_type(struct dm_table *t)
661     return -EINVAL;
662     }
663    
664     + if (list_empty(devices)) {
665     + int srcu_idx;
666     + struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
667     +
668     + /* inherit live table's type and all_blk_mq */
669     + if (live_table) {
670     + t->type = live_table->type;
671     + t->all_blk_mq = live_table->all_blk_mq;
672     + }
673     + dm_put_live_table(t->md, srcu_idx);
674     + return 0;
675     + }
676     +
677     /* Non-request-stackable devices can't be used for request-based dm */
678     list_for_each_entry(dd, devices, list) {
679     struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
680     @@ -974,6 +981,11 @@ static int dm_table_determine_type(struct dm_table *t)
681     t->all_blk_mq = true;
682     }
683    
684     + if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
685     + DMERR("table load rejected: all devices are not blk-mq request-stackable");
686     + return -EINVAL;
687     + }
688     +
689     return 0;
690     }
691    
692     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
693     index 7e44005595c1..20557e2c60c6 100644
694     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
695     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
696     @@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
697     memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
698    
699     r = sm_ll_new_metadata(&smm->ll, tm);
700     + if (!r) {
701     + if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
702     + nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
703     + r = sm_ll_extend(&smm->ll, nr_blocks);
704     + }
705     + memcpy(&smm->sm, &ops, sizeof(smm->sm));
706     if (r)
707     return r;
708    
709     - if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
710     - nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
711     - r = sm_ll_extend(&smm->ll, nr_blocks);
712     - if (r)
713     - return r;
714     -
715     - memcpy(&smm->sm, &ops, sizeof(smm->sm));
716     -
717     /*
718     * Now we need to update the newly created data structures with the
719     * allocated blocks that they were built from.
720     diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
721     index af5e2dc4a3d5..011f88e5663e 100644
722     --- a/drivers/nvme/target/configfs.c
723     +++ b/drivers/nvme/target/configfs.c
724     @@ -271,7 +271,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
725    
726     mutex_lock(&subsys->lock);
727     ret = -EBUSY;
728     - if (nvmet_ns_enabled(ns))
729     + if (ns->enabled)
730     goto out_unlock;
731    
732     kfree(ns->device_path);
733     @@ -307,7 +307,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
734     int ret = 0;
735    
736     mutex_lock(&subsys->lock);
737     - if (nvmet_ns_enabled(ns)) {
738     + if (ns->enabled) {
739     ret = -EBUSY;
740     goto out_unlock;
741     }
742     @@ -339,7 +339,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
743    
744     static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
745     {
746     - return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
747     + return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
748     }
749    
750     static ssize_t nvmet_ns_enable_store(struct config_item *item,
751     diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
752     index a21437a33adb..55ce769cecee 100644
753     --- a/drivers/nvme/target/core.c
754     +++ b/drivers/nvme/target/core.c
755     @@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
756     int ret = 0;
757    
758     mutex_lock(&subsys->lock);
759     - if (!list_empty(&ns->dev_link))
760     + if (ns->enabled)
761     goto out_unlock;
762    
763     ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
764     @@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
765     list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
766     nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
767    
768     + ns->enabled = true;
769     ret = 0;
770     out_unlock:
771     mutex_unlock(&subsys->lock);
772     @@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
773     struct nvmet_ctrl *ctrl;
774    
775     mutex_lock(&subsys->lock);
776     - if (list_empty(&ns->dev_link)) {
777     - mutex_unlock(&subsys->lock);
778     - return;
779     - }
780     - list_del_init(&ns->dev_link);
781     + if (!ns->enabled)
782     + goto out_unlock;
783     +
784     + ns->enabled = false;
785     + list_del_rcu(&ns->dev_link);
786     mutex_unlock(&subsys->lock);
787    
788     /*
789     @@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
790    
791     if (ns->bdev)
792     blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
793     +out_unlock:
794     mutex_unlock(&subsys->lock);
795     }
796    
797     diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
798     index 76b6eedccaf9..7655a351320f 100644
799     --- a/drivers/nvme/target/nvmet.h
800     +++ b/drivers/nvme/target/nvmet.h
801     @@ -47,6 +47,7 @@ struct nvmet_ns {
802     loff_t size;
803     u8 nguid[16];
804    
805     + bool enabled;
806     struct nvmet_subsys *subsys;
807     const char *device_path;
808    
809     @@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
810     return container_of(to_config_group(item), struct nvmet_ns, group);
811     }
812    
813     -static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
814     -{
815     - return !list_empty_careful(&ns->dev_link);
816     -}
817     -
818     struct nvmet_cq {
819     u16 qid;
820     u16 size;
821     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
822     index fada988512a1..c5ff13f22b24 100644
823     --- a/drivers/usb/class/cdc-acm.c
824     +++ b/drivers/usb/class/cdc-acm.c
825     @@ -1719,6 +1719,7 @@ static const struct usb_device_id acm_ids[] = {
826     { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
827     .driver_info = QUIRK_CONTROL_LINE_STATE, },
828     { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
829     + { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
830     { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
831     },
832     /* Motorola H24 HSPA module: */
833     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
834     index cbb146736f57..0d81436c94bd 100644
835     --- a/drivers/usb/core/hub.c
836     +++ b/drivers/usb/core/hub.c
837     @@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
838    
839     static void hub_release(struct kref *kref);
840     static int usb_reset_and_verify_device(struct usb_device *udev);
841     +static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
842     + struct usb_port *port_dev);
843    
844     static inline char *portspeed(struct usb_hub *hub, int portstatus)
845     {
846     @@ -899,82 +901,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
847     }
848    
849     /*
850     - * If USB 3.0 ports are placed into the Disabled state, they will no longer
851     - * detect any device connects or disconnects. This is generally not what the
852     - * USB core wants, since it expects a disabled port to produce a port status
853     - * change event when a new device connects.
854     - *
855     - * Instead, set the link state to Disabled, wait for the link to settle into
856     - * that state, clear any change bits, and then put the port into the RxDetect
857     - * state.
858     + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
859     + * a connection with a plugged-in cable but will signal the host when the cable
860     + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
861     */
862     -static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
863     -{
864     - int ret;
865     - int total_time;
866     - u16 portchange, portstatus;
867     -
868     - if (!hub_is_superspeed(hub->hdev))
869     - return -EINVAL;
870     -
871     - ret = hub_port_status(hub, port1, &portstatus, &portchange);
872     - if (ret < 0)
873     - return ret;
874     -
875     - /*
876     - * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
877     - * Controller [1022:7814] will have spurious result making the following
878     - * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
879     - * as high-speed device if we set the usb 3.0 port link state to
880     - * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
881     - * check the state here to avoid the bug.
882     - */
883     - if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
884     - USB_SS_PORT_LS_RX_DETECT) {
885     - dev_dbg(&hub->ports[port1 - 1]->dev,
886     - "Not disabling port; link state is RxDetect\n");
887     - return ret;
888     - }
889     -
890     - ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
891     - if (ret)
892     - return ret;
893     -
894     - /* Wait for the link to enter the disabled state. */
895     - for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
896     - ret = hub_port_status(hub, port1, &portstatus, &portchange);
897     - if (ret < 0)
898     - return ret;
899     -
900     - if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
901     - USB_SS_PORT_LS_SS_DISABLED)
902     - break;
903     - if (total_time >= HUB_DEBOUNCE_TIMEOUT)
904     - break;
905     - msleep(HUB_DEBOUNCE_STEP);
906     - }
907     - if (total_time >= HUB_DEBOUNCE_TIMEOUT)
908     - dev_warn(&hub->ports[port1 - 1]->dev,
909     - "Could not disable after %d ms\n", total_time);
910     -
911     - return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
912     -}
913     -
914     static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
915     {
916     struct usb_port *port_dev = hub->ports[port1 - 1];
917     struct usb_device *hdev = hub->hdev;
918     int ret = 0;
919    
920     - if (port_dev->child && set_state)
921     - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
922     if (!hub->error) {
923     - if (hub_is_superspeed(hub->hdev))
924     - ret = hub_usb3_port_disable(hub, port1);
925     - else
926     + if (hub_is_superspeed(hub->hdev)) {
927     + hub_usb3_port_prepare_disable(hub, port_dev);
928     + ret = hub_set_port_link_state(hub, port_dev->portnum,
929     + USB_SS_PORT_LS_U3);
930     + } else {
931     ret = usb_clear_port_feature(hdev, port1,
932     USB_PORT_FEAT_ENABLE);
933     + }
934     }
935     + if (port_dev->child && set_state)
936     + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
937     if (ret && ret != -ENODEV)
938     dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
939     return ret;
940     @@ -4140,6 +4088,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
941     }
942     EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
943    
944     +/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
945     +static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
946     + struct usb_port *port_dev)
947     +{
948     + struct usb_device *udev = port_dev->child;
949     + int ret;
950     +
951     + if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
952     + ret = hub_set_port_link_state(hub, port_dev->portnum,
953     + USB_SS_PORT_LS_U0);
954     + if (!ret) {
955     + msleep(USB_RESUME_TIMEOUT);
956     + ret = usb_disable_remote_wakeup(udev);
957     + }
958     + if (ret)
959     + dev_warn(&udev->dev,
960     + "Port disable: can't disable remote wake\n");
961     + udev->do_remote_wakeup = 0;
962     + }
963     +}
964    
965     #else /* CONFIG_PM */
966    
967     @@ -4147,6 +4115,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
968     #define hub_resume NULL
969     #define hub_reset_resume NULL
970    
971     +static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
972     + struct usb_port *port_dev) { }
973     +
974     int usb_disable_lpm(struct usb_device *udev)
975     {
976     return 0;
977     diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
978     index 3ed5162677ad..1713248ab15a 100644
979     --- a/drivers/usb/core/ledtrig-usbport.c
980     +++ b/drivers/usb/core/ledtrig-usbport.c
981     @@ -74,8 +74,7 @@ static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
982    
983     usbport_data->count = 0;
984     usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
985     - led_cdev->brightness_set(led_cdev,
986     - usbport_data->count ? LED_FULL : LED_OFF);
987     + led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF);
988     }
989    
990     /***************************************
991     @@ -228,12 +227,12 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
992     case USB_DEVICE_ADD:
993     usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
994     if (observed && usbport_data->count++ == 0)
995     - led_cdev->brightness_set(led_cdev, LED_FULL);
996     + led_set_brightness(led_cdev, LED_FULL);
997     return NOTIFY_OK;
998     case USB_DEVICE_REMOVE:
999     usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
1000     if (observed && --usbport_data->count == 0)
1001     - led_cdev->brightness_set(led_cdev, LED_OFF);
1002     + led_set_brightness(led_cdev, LED_OFF);
1003     return NOTIFY_OK;
1004     }
1005    
1006     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1007     index 1dfa56a5f1c5..b3687e223e00 100644
1008     --- a/drivers/usb/dwc3/gadget.c
1009     +++ b/drivers/usb/dwc3/gadget.c
1010     @@ -771,6 +771,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1011     unsigned length, unsigned chain, unsigned node)
1012     {
1013     struct dwc3_trb *trb;
1014     + struct dwc3 *dwc = dep->dwc;
1015     + struct usb_gadget *gadget = &dwc->gadget;
1016     + enum usb_device_speed speed = gadget->speed;
1017    
1018     dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
1019     dep->name, req, (unsigned long long) dma,
1020     @@ -798,10 +801,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1021     break;
1022    
1023     case USB_ENDPOINT_XFER_ISOC:
1024     - if (!node)
1025     + if (!node) {
1026     trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
1027     - else
1028     +
1029     + if (speed == USB_SPEED_HIGH) {
1030     + struct usb_ep *ep = &dep->endpoint;
1031     + trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
1032     + }
1033     + } else {
1034     trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
1035     + }
1036    
1037     /* always enable Interrupt on Missed ISOC */
1038     trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1039     diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1040     index 32176f779861..e38b21087d26 100644
1041     --- a/drivers/usb/gadget/composite.c
1042     +++ b/drivers/usb/gadget/composite.c
1043     @@ -197,11 +197,16 @@ int config_ep_by_speed(struct usb_gadget *g,
1044    
1045     ep_found:
1046     /* commit results */
1047     - _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
1048     + _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
1049     _ep->desc = chosen_desc;
1050     _ep->comp_desc = NULL;
1051     _ep->maxburst = 0;
1052     - _ep->mult = 0;
1053     + _ep->mult = 1;
1054     +
1055     + if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
1056     + usb_endpoint_xfer_int(_ep->desc)))
1057     + _ep->mult = usb_endpoint_maxp(_ep->desc) & 0x7ff;
1058     +
1059     if (!want_comp_desc)
1060     return 0;
1061    
1062     @@ -218,7 +223,7 @@ int config_ep_by_speed(struct usb_gadget *g,
1063     switch (usb_endpoint_type(_ep->desc)) {
1064     case USB_ENDPOINT_XFER_ISOC:
1065     /* mult: bits 1:0 of bmAttributes */
1066     - _ep->mult = comp_desc->bmAttributes & 0x3;
1067     + _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
1068     case USB_ENDPOINT_XFER_BULK:
1069     case USB_ENDPOINT_XFER_INT:
1070     _ep->maxburst = comp_desc->bMaxBurst + 1;
1071     diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
1072     index cd214ec8a601..969cfe741380 100644
1073     --- a/drivers/usb/gadget/function/f_uac2.c
1074     +++ b/drivers/usb/gadget/function/f_uac2.c
1075     @@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1076     agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
1077     if (!agdev->out_ep) {
1078     dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1079     - goto err;
1080     + return ret;
1081     }
1082    
1083     agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
1084     if (!agdev->in_ep) {
1085     dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1086     - goto err;
1087     + return ret;
1088     }
1089    
1090     uac2->p_prm.uac2 = uac2;
1091     @@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1092     ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
1093     NULL);
1094     if (ret)
1095     - goto err;
1096     + return ret;
1097    
1098     prm = &agdev->uac2.c_prm;
1099     prm->max_psize = hs_epout_desc.wMaxPacketSize;
1100     @@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1101     prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
1102     if (!prm->rbuf) {
1103     prm->max_psize = 0;
1104     - goto err_free_descs;
1105     + goto err;
1106     }
1107    
1108     ret = alsa_uac2_init(agdev);
1109     if (ret)
1110     - goto err_free_descs;
1111     + goto err;
1112     return 0;
1113    
1114     -err_free_descs:
1115     - usb_free_all_descriptors(fn);
1116     err:
1117     kfree(agdev->uac2.p_prm.rbuf);
1118     kfree(agdev->uac2.c_prm.rbuf);
1119     +err_free_descs:
1120     + usb_free_all_descriptors(fn);
1121     return -EINVAL;
1122     }
1123    
1124     diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
1125     index 3d0d5d94a62f..0f01c04d7cbd 100644
1126     --- a/drivers/usb/gadget/function/uvc_video.c
1127     +++ b/drivers/usb/gadget/function/uvc_video.c
1128     @@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
1129    
1130     req_size = video->ep->maxpacket
1131     * max_t(unsigned int, video->ep->maxburst, 1)
1132     - * (video->ep->mult + 1);
1133     + * (video->ep->mult);
1134    
1135     for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
1136     video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
1137     diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
1138     index 940304c33224..02260cfdedb1 100644
1139     --- a/drivers/usb/host/uhci-pci.c
1140     +++ b/drivers/usb/host/uhci-pci.c
1141     @@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
1142     if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
1143     uhci->wait_for_hp = 1;
1144    
1145     + /* Intel controllers use non-PME wakeup signalling */
1146     + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
1147     + device_set_run_wake(uhci_dev(uhci), 1);
1148     +
1149     /* Set up pointers to PCI-specific functions */
1150     uhci->reset_hc = uhci_pci_reset_hc;
1151     uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
1152     diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
1153     index fc5d3a791e08..6f29bfadbe33 100644
1154     --- a/drivers/usb/serial/kl5kusb105.c
1155     +++ b/drivers/usb/serial/kl5kusb105.c
1156     @@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
1157     rc = usb_serial_generic_open(tty, port);
1158     if (rc) {
1159     retval = rc;
1160     - goto exit;
1161     + goto err_free_cfg;
1162     }
1163    
1164     rc = usb_control_msg(port->serial->dev,
1165     @@ -315,17 +315,32 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
1166     dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
1167    
1168     rc = klsi_105_get_line_state(port, &line_state);
1169     - if (rc >= 0) {
1170     - spin_lock_irqsave(&priv->lock, flags);
1171     - priv->line_state = line_state;
1172     - spin_unlock_irqrestore(&priv->lock, flags);
1173     - dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
1174     - retval = 0;
1175     - } else
1176     + if (rc < 0) {
1177     retval = rc;
1178     + goto err_disable_read;
1179     + }
1180     +
1181     + spin_lock_irqsave(&priv->lock, flags);
1182     + priv->line_state = line_state;
1183     + spin_unlock_irqrestore(&priv->lock, flags);
1184     + dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
1185     + line_state);
1186     +
1187     + return 0;
1188    
1189     -exit:
1190     +err_disable_read:
1191     + usb_control_msg(port->serial->dev,
1192     + usb_sndctrlpipe(port->serial->dev, 0),
1193     + KL5KUSB105A_SIO_CONFIGURE,
1194     + USB_TYPE_VENDOR | USB_DIR_OUT,
1195     + KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
1196     + 0, /* index */
1197     + NULL, 0,
1198     + KLSI_TIMEOUT);
1199     + usb_serial_generic_close(port);
1200     +err_free_cfg:
1201     kfree(cfg);
1202     +
1203     return retval;
1204     }
1205    
1206     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1207     index 9894e341c6ac..7ce31a4c7e7f 100644
1208     --- a/drivers/usb/serial/option.c
1209     +++ b/drivers/usb/serial/option.c
1210     @@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
1211     #define TELIT_PRODUCT_CC864_SINGLE 0x1006
1212     #define TELIT_PRODUCT_DE910_DUAL 0x1010
1213     #define TELIT_PRODUCT_UE910_V2 0x1012
1214     +#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
1215     +#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
1216     #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
1217     #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1218     #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1219     @@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
1220     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1221     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
1222     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1223     + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
1224     + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1225     + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
1226     + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1227     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1228     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1229     { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1230     @@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
1231     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1232     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1233     { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1234     + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
1235     { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
1236     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1237     { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1238     diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
1239     index aba6bd478045..bc0296d937d0 100644
1240     --- a/drivers/usb/usbip/vudc_transfer.c
1241     +++ b/drivers/usb/usbip/vudc_transfer.c
1242     @@ -339,6 +339,8 @@ static void v_timer(unsigned long _vudc)
1243     total = timer->frame_limit;
1244     }
1245    
1246     + /* We have to clear ep0 flags separately as it's not on the list */
1247     + udc->ep[0].already_seen = 0;
1248     list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
1249     ep = to_vep(_ep);
1250     ep->already_seen = 0;
1251     diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
1252     index 630bd189f167..2a9d5cdedea2 100644
1253     --- a/drivers/watchdog/mei_wdt.c
1254     +++ b/drivers/watchdog/mei_wdt.c
1255     @@ -389,6 +389,8 @@ static int mei_wdt_register(struct mei_wdt *wdt)
1256     wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
1257    
1258     watchdog_set_drvdata(&wdt->wdd, wdt);
1259     + watchdog_stop_on_reboot(&wdt->wdd);
1260     +
1261     ret = watchdog_register_device(&wdt->wdd);
1262     if (ret) {
1263     dev_err(dev, "unable to register watchdog device = %d.\n", ret);
1264     diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
1265     index 5796b5d1b3f2..4f47b5e90956 100644
1266     --- a/drivers/watchdog/qcom-wdt.c
1267     +++ b/drivers/watchdog/qcom-wdt.c
1268     @@ -209,7 +209,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
1269     wdt->wdd.parent = &pdev->dev;
1270     wdt->layout = regs;
1271    
1272     - if (readl(wdt->base + WDT_STS) & 1)
1273     + if (readl(wdt_addr(wdt, WDT_STS)) & 1)
1274     wdt->wdd.bootstatus = WDIOF_CARDRESET;
1275    
1276     /*
1277     diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
1278     index bb952121ea94..2ef2b61b69df 100644
1279     --- a/drivers/xen/gntdev.c
1280     +++ b/drivers/xen/gntdev.c
1281     @@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1282    
1283     vma->vm_ops = &gntdev_vmops;
1284    
1285     - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
1286     + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1287    
1288     if (use_ptemod)
1289     vma->vm_flags |= VM_DONTCOPY;
1290     diff --git a/fs/block_dev.c b/fs/block_dev.c
1291     index 05b553368bb4..9166b9f63d33 100644
1292     --- a/fs/block_dev.c
1293     +++ b/fs/block_dev.c
1294     @@ -832,7 +832,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
1295     return true; /* already a holder */
1296     else if (bdev->bd_holder != NULL)
1297     return false; /* held by someone else */
1298     - else if (bdev->bd_contains == bdev)
1299     + else if (whole == bdev)
1300     return true; /* is a whole device which isn't held */
1301    
1302     else if (whole->bd_holder == bd_may_claim)
1303     diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
1304     index e0f071f6b5a7..63d197724519 100644
1305     --- a/fs/btrfs/async-thread.c
1306     +++ b/fs/btrfs/async-thread.c
1307     @@ -86,6 +86,20 @@ btrfs_work_owner(struct btrfs_work *work)
1308     return work->wq->fs_info;
1309     }
1310    
1311     +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
1312     +{
1313     + /*
1314     + * We could compare wq->normal->pending with num_online_cpus()
1315     + * to support "thresh == NO_THRESHOLD" case, but it requires
1316     + * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
1317     + * postpone it until someone needs the support of that case.
1318     + */
1319     + if (wq->normal->thresh == NO_THRESHOLD)
1320     + return false;
1321     +
1322     + return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
1323     +}
1324     +
1325     BTRFS_WORK_HELPER(worker_helper);
1326     BTRFS_WORK_HELPER(delalloc_helper);
1327     BTRFS_WORK_HELPER(flush_delalloc_helper);
1328     diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
1329     index 8e52484cd461..1f9597355c9d 100644
1330     --- a/fs/btrfs/async-thread.h
1331     +++ b/fs/btrfs/async-thread.h
1332     @@ -84,4 +84,5 @@ void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
1333     void btrfs_set_work_high_priority(struct btrfs_work *work);
1334     struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
1335     struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
1336     +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
1337     #endif
1338     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1339     index 0b8ce2b9f7d0..86245b884fce 100644
1340     --- a/fs/btrfs/ctree.h
1341     +++ b/fs/btrfs/ctree.h
1342     @@ -2210,6 +2210,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
1343     cpu->target = le64_to_cpu(disk->target);
1344     cpu->flags = le64_to_cpu(disk->flags);
1345     cpu->limit = le64_to_cpu(disk->limit);
1346     + cpu->stripes_min = le32_to_cpu(disk->stripes_min);
1347     + cpu->stripes_max = le32_to_cpu(disk->stripes_max);
1348     }
1349    
1350     static inline void
1351     @@ -2228,6 +2230,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
1352     disk->target = cpu_to_le64(cpu->target);
1353     disk->flags = cpu_to_le64(cpu->flags);
1354     disk->limit = cpu_to_le64(cpu->limit);
1355     + disk->stripes_min = cpu_to_le32(cpu->stripes_min);
1356     + disk->stripes_max = cpu_to_le32(cpu->stripes_max);
1357     }
1358    
1359     /* struct btrfs_super_block */
1360     diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
1361     index 0fcf5f25d524..4d8f8a8c9c90 100644
1362     --- a/fs/btrfs/delayed-inode.c
1363     +++ b/fs/btrfs/delayed-inode.c
1364     @@ -1353,7 +1353,8 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1365     total_done++;
1366    
1367     btrfs_release_prepared_delayed_node(delayed_node);
1368     - if (async_work->nr == 0 || total_done < async_work->nr)
1369     + if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1370     + total_done < async_work->nr)
1371     goto again;
1372    
1373     free_path:
1374     @@ -1369,7 +1370,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1375     {
1376     struct btrfs_async_delayed_work *async_work;
1377    
1378     - if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1379     + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1380     + btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1381     return 0;
1382    
1383     async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1384     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1385     index 3a57f99d96aa..1cd325765aaa 100644
1386     --- a/fs/btrfs/disk-io.c
1387     +++ b/fs/btrfs/disk-io.c
1388     @@ -559,7 +559,15 @@ static noinline int check_leaf(struct btrfs_root *root,
1389     u32 nritems = btrfs_header_nritems(leaf);
1390     int slot;
1391    
1392     - if (nritems == 0) {
1393     + /*
1394     + * Extent buffers from a relocation tree have a owner field that
1395     + * corresponds to the subvolume tree they are based on. So just from an
1396     + * extent buffer alone we can not find out what is the id of the
1397     + * corresponding subvolume tree, so we can not figure out if the extent
1398     + * buffer corresponds to the root of the relocation tree or not. So skip
1399     + * this check for relocation trees.
1400     + */
1401     + if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
1402     struct btrfs_root *check_root;
1403    
1404     key.objectid = btrfs_header_owner(leaf);
1405     @@ -572,17 +580,24 @@ static noinline int check_leaf(struct btrfs_root *root,
1406     * open_ctree() some roots has not yet been set up.
1407     */
1408     if (!IS_ERR_OR_NULL(check_root)) {
1409     + struct extent_buffer *eb;
1410     +
1411     + eb = btrfs_root_node(check_root);
1412     /* if leaf is the root, then it's fine */
1413     - if (leaf->start !=
1414     - btrfs_root_bytenr(&check_root->root_item)) {
1415     + if (leaf != eb) {
1416     CORRUPT("non-root leaf's nritems is 0",
1417     - leaf, root, 0);
1418     + leaf, check_root, 0);
1419     + free_extent_buffer(eb);
1420     return -EIO;
1421     }
1422     + free_extent_buffer(eb);
1423     }
1424     return 0;
1425     }
1426    
1427     + if (nritems == 0)
1428     + return 0;
1429     +
1430     /* Check the 0 item */
1431     if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
1432     BTRFS_LEAF_DATA_SIZE(root)) {
1433     diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
1434     index 11f4fffe503e..dfd99867ff4d 100644
1435     --- a/fs/btrfs/qgroup.c
1436     +++ b/fs/btrfs/qgroup.c
1437     @@ -2335,10 +2335,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1438     int err = -ENOMEM;
1439     int ret = 0;
1440    
1441     - mutex_lock(&fs_info->qgroup_rescan_lock);
1442     - fs_info->qgroup_rescan_running = true;
1443     - mutex_unlock(&fs_info->qgroup_rescan_lock);
1444     -
1445     path = btrfs_alloc_path();
1446     if (!path)
1447     goto out;
1448     @@ -2449,6 +2445,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
1449     sizeof(fs_info->qgroup_rescan_progress));
1450     fs_info->qgroup_rescan_progress.objectid = progress_objectid;
1451     init_completion(&fs_info->qgroup_rescan_completion);
1452     + fs_info->qgroup_rescan_running = true;
1453    
1454     spin_unlock(&fs_info->qgroup_lock);
1455     mutex_unlock(&fs_info->qgroup_rescan_lock);
1456     diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1457     index c4af0cdb783d..2cf5e142675e 100644
1458     --- a/fs/btrfs/relocation.c
1459     +++ b/fs/btrfs/relocation.c
1460     @@ -1395,14 +1395,23 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1461     root_key.offset = objectid;
1462    
1463     if (root->root_key.objectid == objectid) {
1464     + u64 commit_root_gen;
1465     +
1466     /* called by btrfs_init_reloc_root */
1467     ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1468     BTRFS_TREE_RELOC_OBJECTID);
1469     BUG_ON(ret);
1470     -
1471     last_snap = btrfs_root_last_snapshot(&root->root_item);
1472     - btrfs_set_root_last_snapshot(&root->root_item,
1473     - trans->transid - 1);
1474     + /*
1475     + * Set the last_snapshot field to the generation of the commit
1476     + * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1477     + * correctly (returns true) when the relocation root is created
1478     + * either inside the critical section of a transaction commit
1479     + * (through transaction.c:qgroup_account_snapshot()) and when
1480     + * it's created before the transaction commit is started.
1481     + */
1482     + commit_root_gen = btrfs_header_generation(root->commit_root);
1483     + btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1484     } else {
1485     /*
1486     * called by btrfs_reloc_post_snapshot_hook.
1487     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1488     index 3d33c4e41e5f..b89004513c09 100644
1489     --- a/fs/btrfs/tree-log.c
1490     +++ b/fs/btrfs/tree-log.c
1491     @@ -1940,12 +1940,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
1492     next:
1493     /* check the next slot in the tree to see if it is a valid item */
1494     nritems = btrfs_header_nritems(path->nodes[0]);
1495     + path->slots[0]++;
1496     if (path->slots[0] >= nritems) {
1497     ret = btrfs_next_leaf(root, path);
1498     if (ret)
1499     goto out;
1500     - } else {
1501     - path->slots[0]++;
1502     }
1503    
1504     btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1505     @@ -5205,6 +5204,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
1506     if (di_key.type == BTRFS_ROOT_ITEM_KEY)
1507     continue;
1508    
1509     + btrfs_release_path(path);
1510     di_inode = btrfs_iget(root->fs_info->sb, &di_key,
1511     root, NULL);
1512     if (IS_ERR(di_inode)) {
1513     @@ -5214,13 +5214,12 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
1514    
1515     if (btrfs_inode_in_log(di_inode, trans->transid)) {
1516     iput(di_inode);
1517     - continue;
1518     + break;
1519     }
1520    
1521     ctx->log_new_dentries = false;
1522     if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
1523     log_mode = LOG_INODE_ALL;
1524     - btrfs_release_path(path);
1525     ret = btrfs_log_inode(trans, root, di_inode,
1526     log_mode, 0, LLONG_MAX, ctx);
1527     if (!ret &&
1528     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1529     index 1f17f6bd7a60..203287f86525 100644
1530     --- a/fs/cifs/cifsglob.h
1531     +++ b/fs/cifs/cifsglob.h
1532     @@ -646,6 +646,8 @@ struct TCP_Server_Info {
1533     unsigned int max_read;
1534     unsigned int max_write;
1535     __u8 preauth_hash[512];
1536     + struct delayed_work reconnect; /* reconnect workqueue job */
1537     + struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
1538     #endif /* CONFIG_CIFS_SMB2 */
1539     unsigned long echo_interval;
1540     };
1541     @@ -849,6 +851,7 @@ cap_unix(struct cifs_ses *ses)
1542     struct cifs_tcon {
1543     struct list_head tcon_list;
1544     int tc_count;
1545     + struct list_head rlist; /* reconnect list */
1546     struct list_head openFileList;
1547     spinlock_t open_file_lock; /* protects list above */
1548     struct cifs_ses *ses; /* pointer to session associated with */
1549     @@ -922,6 +925,7 @@ struct cifs_tcon {
1550     bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
1551     bool broken_sparse_sup; /* if server or share does not support sparse */
1552     bool need_reconnect:1; /* connection reset, tid now invalid */
1553     + bool need_reopen_files:1; /* need to reopen tcon file handles */
1554     bool use_resilient:1; /* use resilient instead of durable handles */
1555     bool use_persistent:1; /* use persistent instead of durable handles */
1556     #ifdef CONFIG_CIFS_SMB2
1557     diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
1558     index ced0e42ce460..cd8025a249bb 100644
1559     --- a/fs/cifs/cifsproto.h
1560     +++ b/fs/cifs/cifsproto.h
1561     @@ -206,6 +206,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
1562     struct tcon_link *tlink,
1563     struct cifs_pending_open *open);
1564     extern void cifs_del_pending_open(struct cifs_pending_open *open);
1565     +extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
1566     + int from_reconnect);
1567     +extern void cifs_put_tcon(struct cifs_tcon *tcon);
1568    
1569     #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
1570     extern void cifs_dfs_release_automount_timer(void);
1571     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1572     index 4547aeddd12b..893be0722643 100644
1573     --- a/fs/cifs/connect.c
1574     +++ b/fs/cifs/connect.c
1575     @@ -52,6 +52,9 @@
1576     #include "nterr.h"
1577     #include "rfc1002pdu.h"
1578     #include "fscache.h"
1579     +#ifdef CONFIG_CIFS_SMB2
1580     +#include "smb2proto.h"
1581     +#endif
1582    
1583     #define CIFS_PORT 445
1584     #define RFC1001_PORT 139
1585     @@ -2100,8 +2103,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
1586     return NULL;
1587     }
1588    
1589     -static void
1590     -cifs_put_tcp_session(struct TCP_Server_Info *server)
1591     +void
1592     +cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1593     {
1594     struct task_struct *task;
1595    
1596     @@ -2118,6 +2121,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
1597    
1598     cancel_delayed_work_sync(&server->echo);
1599    
1600     +#ifdef CONFIG_CIFS_SMB2
1601     + if (from_reconnect)
1602     + /*
1603     + * Avoid deadlock here: reconnect work calls
1604     + * cifs_put_tcp_session() at its end. Need to be sure
1605     + * that reconnect work does nothing with server pointer after
1606     + * that step.
1607     + */
1608     + cancel_delayed_work(&server->reconnect);
1609     + else
1610     + cancel_delayed_work_sync(&server->reconnect);
1611     +#endif
1612     +
1613     spin_lock(&GlobalMid_Lock);
1614     server->tcpStatus = CifsExiting;
1615     spin_unlock(&GlobalMid_Lock);
1616     @@ -2182,6 +2198,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1617     INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1618     INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1619     INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1620     +#ifdef CONFIG_CIFS_SMB2
1621     + INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1622     + mutex_init(&tcp_ses->reconnect_mutex);
1623     +#endif
1624     memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
1625     sizeof(tcp_ses->srcaddr));
1626     memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
1627     @@ -2340,7 +2360,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
1628     spin_unlock(&cifs_tcp_ses_lock);
1629    
1630     sesInfoFree(ses);
1631     - cifs_put_tcp_session(server);
1632     + cifs_put_tcp_session(server, 0);
1633     }
1634    
1635     #ifdef CONFIG_KEYS
1636     @@ -2514,7 +2534,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1637     mutex_unlock(&ses->session_mutex);
1638    
1639     /* existing SMB ses has a server reference already */
1640     - cifs_put_tcp_session(server);
1641     + cifs_put_tcp_session(server, 0);
1642     free_xid(xid);
1643     return ses;
1644     }
1645     @@ -2604,7 +2624,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
1646     return NULL;
1647     }
1648    
1649     -static void
1650     +void
1651     cifs_put_tcon(struct cifs_tcon *tcon)
1652     {
1653     unsigned int xid;
1654     @@ -3792,7 +3812,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
1655     else if (ses)
1656     cifs_put_smb_ses(ses);
1657     else
1658     - cifs_put_tcp_session(server);
1659     + cifs_put_tcp_session(server, 0);
1660     bdi_destroy(&cifs_sb->bdi);
1661     }
1662    
1663     @@ -4103,7 +4123,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
1664     ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
1665     if (IS_ERR(ses)) {
1666     tcon = (struct cifs_tcon *)ses;
1667     - cifs_put_tcp_session(master_tcon->ses->server);
1668     + cifs_put_tcp_session(master_tcon->ses->server, 0);
1669     goto out;
1670     }
1671    
1672     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1673     index 7f5f6176c6f1..18a1e1d6671f 100644
1674     --- a/fs/cifs/file.c
1675     +++ b/fs/cifs/file.c
1676     @@ -777,6 +777,11 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1677     struct list_head *tmp1;
1678     struct list_head tmp_list;
1679    
1680     + if (!tcon->use_persistent || !tcon->need_reopen_files)
1681     + return;
1682     +
1683     + tcon->need_reopen_files = false;
1684     +
1685     cifs_dbg(FYI, "Reopen persistent handles");
1686     INIT_LIST_HEAD(&tmp_list);
1687    
1688     @@ -793,7 +798,8 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1689    
1690     list_for_each_safe(tmp, tmp1, &tmp_list) {
1691     open_file = list_entry(tmp, struct cifsFileInfo, rlist);
1692     - cifs_reopen_file(open_file, false /* do not flush */);
1693     + if (cifs_reopen_file(open_file, false /* do not flush */))
1694     + tcon->need_reopen_files = true;
1695     list_del_init(&open_file->rlist);
1696     cifsFileInfo_put(open_file);
1697     }
1698     diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
1699     index 9f51b81119f2..001528781b6b 100644
1700     --- a/fs/cifs/ioctl.c
1701     +++ b/fs/cifs/ioctl.c
1702     @@ -189,7 +189,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
1703     xid = get_xid();
1704    
1705     cifs_sb = CIFS_SB(inode->i_sb);
1706     - cifs_dbg(VFS, "cifs ioctl 0x%x\n", command);
1707     + cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
1708     switch (command) {
1709     case FS_IOC_GETFLAGS:
1710     if (pSMBFile == NULL)
1711     diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
1712     index f9e766f464be..b2aff0c6f22c 100644
1713     --- a/fs/cifs/smb2file.c
1714     +++ b/fs/cifs/smb2file.c
1715     @@ -260,7 +260,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
1716     * and check it for zero before using.
1717     */
1718     max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
1719     - if (!max_buf) {
1720     + if (max_buf < sizeof(struct smb2_lock_element)) {
1721     free_xid(xid);
1722     return -EINVAL;
1723     }
1724     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1725     index 5ca5ea4668a1..87457227812c 100644
1726     --- a/fs/cifs/smb2pdu.c
1727     +++ b/fs/cifs/smb2pdu.c
1728     @@ -250,16 +250,19 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1729     }
1730    
1731     cifs_mark_open_files_invalid(tcon);
1732     + if (tcon->use_persistent)
1733     + tcon->need_reopen_files = true;
1734    
1735     rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
1736     mutex_unlock(&tcon->ses->session_mutex);
1737    
1738     - if (tcon->use_persistent)
1739     - cifs_reopen_persistent_handles(tcon);
1740     -
1741     cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
1742     if (rc)
1743     goto out;
1744     +
1745     + if (smb2_command != SMB2_INTERNAL_CMD)
1746     + queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
1747     +
1748     atomic_inc(&tconInfoReconnectCount);
1749     out:
1750     /*
1751     @@ -280,7 +283,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1752     case SMB2_CHANGE_NOTIFY:
1753     case SMB2_QUERY_INFO:
1754     case SMB2_SET_INFO:
1755     - return -EAGAIN;
1756     + rc = -EAGAIN;
1757     }
1758     unload_nls(nls_codepage);
1759     return rc;
1760     @@ -1972,6 +1975,55 @@ smb2_echo_callback(struct mid_q_entry *mid)
1761     add_credits(server, credits_received, CIFS_ECHO_OP);
1762     }
1763    
1764     +void smb2_reconnect_server(struct work_struct *work)
1765     +{
1766     + struct TCP_Server_Info *server = container_of(work,
1767     + struct TCP_Server_Info, reconnect.work);
1768     + struct cifs_ses *ses;
1769     + struct cifs_tcon *tcon, *tcon2;
1770     + struct list_head tmp_list;
1771     + int tcon_exist = false;
1772     +
1773     + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
1774     + mutex_lock(&server->reconnect_mutex);
1775     +
1776     + INIT_LIST_HEAD(&tmp_list);
1777     + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
1778     +
1779     + spin_lock(&cifs_tcp_ses_lock);
1780     + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1781     + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1782     + if (tcon->need_reconnect || tcon->need_reopen_files) {
1783     + tcon->tc_count++;
1784     + list_add_tail(&tcon->rlist, &tmp_list);
1785     + tcon_exist = true;
1786     + }
1787     + }
1788     + }
1789     + /*
1790     + * Get the reference to server struct to be sure that the last call of
1791     + * cifs_put_tcon() in the loop below won't release the server pointer.
1792     + */
1793     + if (tcon_exist)
1794     + server->srv_count++;
1795     +
1796     + spin_unlock(&cifs_tcp_ses_lock);
1797     +
1798     + list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
1799     + if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
1800     + cifs_reopen_persistent_handles(tcon);
1801     + list_del_init(&tcon->rlist);
1802     + cifs_put_tcon(tcon);
1803     + }
1804     +
1805     + cifs_dbg(FYI, "Reconnecting tcons finished\n");
1806     + mutex_unlock(&server->reconnect_mutex);
1807     +
1808     + /* now we can safely release srv struct */
1809     + if (tcon_exist)
1810     + cifs_put_tcp_session(server, 1);
1811     +}
1812     +
1813     int
1814     SMB2_echo(struct TCP_Server_Info *server)
1815     {
1816     @@ -1984,32 +2036,11 @@ SMB2_echo(struct TCP_Server_Info *server)
1817     cifs_dbg(FYI, "In echo request\n");
1818    
1819     if (server->tcpStatus == CifsNeedNegotiate) {
1820     - struct list_head *tmp, *tmp2;
1821     - struct cifs_ses *ses;
1822     - struct cifs_tcon *tcon;
1823     -
1824     - cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
1825     - spin_lock(&cifs_tcp_ses_lock);
1826     - list_for_each(tmp, &server->smb_ses_list) {
1827     - ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
1828     - list_for_each(tmp2, &ses->tcon_list) {
1829     - tcon = list_entry(tmp2, struct cifs_tcon,
1830     - tcon_list);
1831     - /* add check for persistent handle reconnect */
1832     - if (tcon && tcon->need_reconnect) {
1833     - spin_unlock(&cifs_tcp_ses_lock);
1834     - rc = smb2_reconnect(SMB2_ECHO, tcon);
1835     - spin_lock(&cifs_tcp_ses_lock);
1836     - }
1837     - }
1838     - }
1839     - spin_unlock(&cifs_tcp_ses_lock);
1840     + /* No need to send echo on newly established connections */
1841     + queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
1842     + return rc;
1843     }
1844    
1845     - /* if no session, renegotiate failed above */
1846     - if (server->tcpStatus == CifsNeedNegotiate)
1847     - return -EIO;
1848     -
1849     rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1850     if (rc)
1851     return rc;
1852     diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
1853     index fd3709e8de33..dc0d141f33e2 100644
1854     --- a/fs/cifs/smb2pdu.h
1855     +++ b/fs/cifs/smb2pdu.h
1856     @@ -80,6 +80,8 @@
1857     #define SMB2_SET_INFO cpu_to_le16(SMB2_SET_INFO_HE)
1858     #define SMB2_OPLOCK_BREAK cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
1859    
1860     +#define SMB2_INTERNAL_CMD cpu_to_le16(0xFFFF)
1861     +
1862     #define NUMBER_OF_SMB2_COMMANDS 0x0013
1863    
1864     /* BB FIXME - analyze following length BB */
1865     diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
1866     index eb2cde2f64ba..f2d511a6971b 100644
1867     --- a/fs/cifs/smb2proto.h
1868     +++ b/fs/cifs/smb2proto.h
1869     @@ -96,6 +96,7 @@ extern int smb2_open_file(const unsigned int xid,
1870     extern int smb2_unlock_range(struct cifsFileInfo *cfile,
1871     struct file_lock *flock, const unsigned int xid);
1872     extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
1873     +extern void smb2_reconnect_server(struct work_struct *work);
1874    
1875     /*
1876     * SMB2 Worker functions - most of protocol specific implementation details
1877     diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
1878     index 699b7868108f..c12bffefa3c9 100644
1879     --- a/fs/cifs/smbencrypt.c
1880     +++ b/fs/cifs/smbencrypt.c
1881     @@ -23,7 +23,7 @@
1882     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1883     */
1884    
1885     -#include <crypto/skcipher.h>
1886     +#include <linux/crypto.h>
1887     #include <linux/module.h>
1888     #include <linux/slab.h>
1889     #include <linux/fs.h>
1890     @@ -69,46 +69,22 @@ str_to_key(unsigned char *str, unsigned char *key)
1891     static int
1892     smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
1893     {
1894     - int rc;
1895     unsigned char key2[8];
1896     - struct crypto_skcipher *tfm_des;
1897     - struct scatterlist sgin, sgout;
1898     - struct skcipher_request *req;
1899     + struct crypto_cipher *tfm_des;
1900    
1901     str_to_key(key, key2);
1902    
1903     - tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
1904     + tfm_des = crypto_alloc_cipher("des", 0, 0);
1905     if (IS_ERR(tfm_des)) {
1906     - rc = PTR_ERR(tfm_des);
1907     - cifs_dbg(VFS, "could not allocate des crypto API\n");
1908     - goto smbhash_err;
1909     - }
1910     -
1911     - req = skcipher_request_alloc(tfm_des, GFP_KERNEL);
1912     - if (!req) {
1913     - rc = -ENOMEM;
1914     cifs_dbg(VFS, "could not allocate des crypto API\n");
1915     - goto smbhash_free_skcipher;
1916     + return PTR_ERR(tfm_des);
1917     }
1918    
1919     - crypto_skcipher_setkey(tfm_des, key2, 8);
1920     -
1921     - sg_init_one(&sgin, in, 8);
1922     - sg_init_one(&sgout, out, 8);
1923     + crypto_cipher_setkey(tfm_des, key2, 8);
1924     + crypto_cipher_encrypt_one(tfm_des, out, in);
1925     + crypto_free_cipher(tfm_des);
1926    
1927     - skcipher_request_set_callback(req, 0, NULL, NULL);
1928     - skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);
1929     -
1930     - rc = crypto_skcipher_encrypt(req);
1931     - if (rc)
1932     - cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc);
1933     -
1934     - skcipher_request_free(req);
1935     -
1936     -smbhash_free_skcipher:
1937     - crypto_free_skcipher(tfm_des);
1938     -smbhash_err:
1939     - return rc;
1940     + return 0;
1941     }
1942    
1943     static int
1944     diff --git a/fs/exec.c b/fs/exec.c
1945     index 4e497b9ee71e..67e86571685a 100644
1946     --- a/fs/exec.c
1947     +++ b/fs/exec.c
1948     @@ -19,7 +19,7 @@
1949     * current->executable is only used by the procfs. This allows a dispatch
1950     * table to check for several different types of binary formats. We keep
1951     * trying until we recognize the file or we run out of supported binary
1952     - * formats.
1953     + * formats.
1954     */
1955    
1956     #include <linux/slab.h>
1957     @@ -1266,6 +1266,13 @@ int flush_old_exec(struct linux_binprm * bprm)
1958     flush_thread();
1959     current->personality &= ~bprm->per_clear;
1960    
1961     + /*
1962     + * We have to apply CLOEXEC before we change whether the process is
1963     + * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1964     + * trying to access the should-be-closed file descriptors of a process
1965     + * undergoing exec(2).
1966     + */
1967     + do_close_on_exec(current->files);
1968     return 0;
1969    
1970     out:
1971     @@ -1275,8 +1282,22 @@ EXPORT_SYMBOL(flush_old_exec);
1972    
1973     void would_dump(struct linux_binprm *bprm, struct file *file)
1974     {
1975     - if (inode_permission(file_inode(file), MAY_READ) < 0)
1976     + struct inode *inode = file_inode(file);
1977     + if (inode_permission(inode, MAY_READ) < 0) {
1978     + struct user_namespace *old, *user_ns;
1979     bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1980     +
1981     + /* Ensure mm->user_ns contains the executable */
1982     + user_ns = old = bprm->mm->user_ns;
1983     + while ((user_ns != &init_user_ns) &&
1984     + !privileged_wrt_inode_uidgid(user_ns, inode))
1985     + user_ns = user_ns->parent;
1986     +
1987     + if (old != user_ns) {
1988     + bprm->mm->user_ns = get_user_ns(user_ns);
1989     + put_user_ns(old);
1990     + }
1991     + }
1992     }
1993     EXPORT_SYMBOL(would_dump);
1994    
1995     @@ -1306,7 +1327,6 @@ void setup_new_exec(struct linux_binprm * bprm)
1996     !gid_eq(bprm->cred->gid, current_egid())) {
1997     current->pdeath_signal = 0;
1998     } else {
1999     - would_dump(bprm, bprm->file);
2000     if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
2001     set_dumpable(current->mm, suid_dumpable);
2002     }
2003     @@ -1315,7 +1335,6 @@ void setup_new_exec(struct linux_binprm * bprm)
2004     group */
2005     current->self_exec_id++;
2006     flush_signal_handlers(current, 0);
2007     - do_close_on_exec(current->files);
2008     }
2009     EXPORT_SYMBOL(setup_new_exec);
2010    
2011     @@ -1406,7 +1425,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
2012     unsigned n_fs;
2013    
2014     if (p->ptrace) {
2015     - if (p->ptrace & PT_PTRACE_CAP)
2016     + if (ptracer_capable(p, current_user_ns()))
2017     bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
2018     else
2019     bprm->unsafe |= LSM_UNSAFE_PTRACE;
2020     @@ -1741,6 +1760,8 @@ static int do_execveat_common(int fd, struct filename *filename,
2021     if (retval < 0)
2022     goto out;
2023    
2024     + would_dump(bprm, bprm->file);
2025     +
2026     retval = exec_binprm(bprm);
2027     if (retval < 0)
2028     goto out;
2029     diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
2030     index b1d52c14098e..f97611171023 100644
2031     --- a/fs/ext4/ext4_jbd2.h
2032     +++ b/fs/ext4/ext4_jbd2.h
2033     @@ -414,17 +414,19 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
2034     return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
2035     /* We do not support data journalling with delayed allocation */
2036     if (!S_ISREG(inode->i_mode) ||
2037     - test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2038     - return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
2039     - if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
2040     - !test_opt(inode->i_sb, DELALLOC))
2041     + test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
2042     + (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
2043     + !test_opt(inode->i_sb, DELALLOC))) {
2044     + /* We do not support data journalling for encrypted data */
2045     + if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
2046     + return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
2047     return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
2048     + }
2049     if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2050     return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
2051     if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
2052     return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
2053     - else
2054     - BUG();
2055     + BUG();
2056     }
2057    
2058     static inline int ext4_should_journal_data(struct inode *inode)
2059     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
2060     index f74d5ee2cdec..d8ca4b9f9dd6 100644
2061     --- a/fs/ext4/inline.c
2062     +++ b/fs/ext4/inline.c
2063     @@ -336,8 +336,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
2064    
2065     len -= EXT4_MIN_INLINE_DATA_SIZE;
2066     value = kzalloc(len, GFP_NOFS);
2067     - if (!value)
2068     + if (!value) {
2069     + error = -ENOMEM;
2070     goto out;
2071     + }
2072    
2073     error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
2074     value, len);
2075     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2076     index 9c064727ed62..33a509c876ee 100644
2077     --- a/fs/ext4/inode.c
2078     +++ b/fs/ext4/inode.c
2079     @@ -4434,6 +4434,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2080     struct inode *inode;
2081     journal_t *journal = EXT4_SB(sb)->s_journal;
2082     long ret;
2083     + loff_t size;
2084     int block;
2085     uid_t i_uid;
2086     gid_t i_gid;
2087     @@ -4534,6 +4535,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2088     ei->i_file_acl |=
2089     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2090     inode->i_size = ext4_isize(raw_inode);
2091     + if ((size = i_size_read(inode)) < 0) {
2092     + EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
2093     + ret = -EFSCORRUPTED;
2094     + goto bad_inode;
2095     + }
2096     ei->i_disksize = inode->i_size;
2097     #ifdef CONFIG_QUOTA
2098     ei->i_reserved_quota = 0;
2099     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2100     index f418f55c2bbe..7ae43c59bc79 100644
2101     --- a/fs/ext4/mballoc.c
2102     +++ b/fs/ext4/mballoc.c
2103     @@ -669,7 +669,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
2104     ext4_grpblk_t min;
2105     ext4_grpblk_t max;
2106     ext4_grpblk_t chunk;
2107     - unsigned short border;
2108     + unsigned int border;
2109    
2110     BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
2111    
2112     @@ -2287,7 +2287,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2113     struct ext4_group_info *grinfo;
2114     struct sg {
2115     struct ext4_group_info info;
2116     - ext4_grpblk_t counters[16];
2117     + ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2118     } sg;
2119    
2120     group--;
2121     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2122     index 52b0530c5d65..478630af0d19 100644
2123     --- a/fs/ext4/super.c
2124     +++ b/fs/ext4/super.c
2125     @@ -3193,10 +3193,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
2126     ext4_set_bit(s++, buf);
2127     count++;
2128     }
2129     - for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
2130     - ext4_set_bit(EXT4_B2C(sbi, s++), buf);
2131     - count++;
2132     + j = ext4_bg_num_gdb(sb, grp);
2133     + if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
2134     + ext4_error(sb, "Invalid number of block group "
2135     + "descriptor blocks: %d", j);
2136     + j = EXT4_BLOCKS_PER_GROUP(sb) - s;
2137     }
2138     + count += j;
2139     + for (; j > 0; j--)
2140     + ext4_set_bit(EXT4_B2C(sbi, s++), buf);
2141     }
2142     if (!count)
2143     return 0;
2144     @@ -3301,7 +3306,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2145     char *orig_data = kstrdup(data, GFP_KERNEL);
2146     struct buffer_head *bh;
2147     struct ext4_super_block *es = NULL;
2148     - struct ext4_sb_info *sbi;
2149     + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2150     ext4_fsblk_t block;
2151     ext4_fsblk_t sb_block = get_sb_block(&data);
2152     ext4_fsblk_t logical_sb_block;
2153     @@ -3320,16 +3325,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2154     unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
2155     ext4_group_t first_not_zeroed;
2156    
2157     - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2158     - if (!sbi)
2159     - goto out_free_orig;
2160     + if ((data && !orig_data) || !sbi)
2161     + goto out_free_base;
2162    
2163     sbi->s_blockgroup_lock =
2164     kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
2165     - if (!sbi->s_blockgroup_lock) {
2166     - kfree(sbi);
2167     - goto out_free_orig;
2168     - }
2169     + if (!sbi->s_blockgroup_lock)
2170     + goto out_free_base;
2171     +
2172     sb->s_fs_info = sbi;
2173     sbi->s_sb = sb;
2174     sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
2175     @@ -3475,11 +3478,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2176     */
2177     sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2178    
2179     - if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
2180     - &journal_devnum, &journal_ioprio, 0)) {
2181     - ext4_msg(sb, KERN_WARNING,
2182     - "failed to parse options in superblock: %s",
2183     - sbi->s_es->s_mount_opts);
2184     + if (sbi->s_es->s_mount_opts[0]) {
2185     + char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
2186     + sizeof(sbi->s_es->s_mount_opts),
2187     + GFP_KERNEL);
2188     + if (!s_mount_opts)
2189     + goto failed_mount;
2190     + if (!parse_options(s_mount_opts, sb, &journal_devnum,
2191     + &journal_ioprio, 0)) {
2192     + ext4_msg(sb, KERN_WARNING,
2193     + "failed to parse options in superblock: %s",
2194     + s_mount_opts);
2195     + }
2196     + kfree(s_mount_opts);
2197     }
2198     sbi->s_def_mount_opt = sbi->s_mount_opt;
2199     if (!parse_options((char *) data, sb, &journal_devnum,
2200     @@ -3505,6 +3516,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2201     "both data=journal and dax");
2202     goto failed_mount;
2203     }
2204     + if (ext4_has_feature_encrypt(sb)) {
2205     + ext4_msg(sb, KERN_WARNING,
2206     + "encrypted files will use data=ordered "
2207     + "instead of data journaling mode");
2208     + }
2209     if (test_opt(sb, DELALLOC))
2210     clear_opt(sb, DELALLOC);
2211     } else {
2212     @@ -3660,12 +3676,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2213    
2214     sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
2215     sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
2216     - if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
2217     - goto cantfind_ext4;
2218    
2219     sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
2220     if (sbi->s_inodes_per_block == 0)
2221     goto cantfind_ext4;
2222     + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
2223     + sbi->s_inodes_per_group > blocksize * 8) {
2224     + ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
2225     + sbi->s_blocks_per_group);
2226     + goto failed_mount;
2227     + }
2228     sbi->s_itb_per_group = sbi->s_inodes_per_group /
2229     sbi->s_inodes_per_block;
2230     sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
2231     @@ -3748,13 +3768,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2232     }
2233     sbi->s_cluster_ratio = clustersize / blocksize;
2234    
2235     - if (sbi->s_inodes_per_group > blocksize * 8) {
2236     - ext4_msg(sb, KERN_ERR,
2237     - "#inodes per group too big: %lu",
2238     - sbi->s_inodes_per_group);
2239     - goto failed_mount;
2240     - }
2241     -
2242     /* Do we have standard group size of clustersize * 8 blocks ? */
2243     if (sbi->s_blocks_per_group == clustersize << 3)
2244     set_opt2(sb, STD_GROUP_SIZE);
2245     @@ -4160,7 +4173,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2246    
2247     if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
2248     ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
2249     - "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
2250     + "Opts: %.*s%s%s", descr,
2251     + (int) sizeof(sbi->s_es->s_mount_opts),
2252     + sbi->s_es->s_mount_opts,
2253     *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
2254    
2255     if (es->s_error_count)
2256     @@ -4239,8 +4254,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2257     out_fail:
2258     sb->s_fs_info = NULL;
2259     kfree(sbi->s_blockgroup_lock);
2260     +out_free_base:
2261     kfree(sbi);
2262     -out_free_orig:
2263     kfree(orig_data);
2264     return err ? err : ret;
2265     }
2266     @@ -4550,7 +4565,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
2267     &EXT4_SB(sb)->s_freeinodes_counter));
2268     BUFFER_TRACE(sbh, "marking dirty");
2269     ext4_superblock_csum_set(sb);
2270     - lock_buffer(sbh);
2271     + if (sync)
2272     + lock_buffer(sbh);
2273     if (buffer_write_io_error(sbh)) {
2274     /*
2275     * Oh, dear. A previous attempt to write the
2276     @@ -4566,8 +4582,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
2277     set_buffer_uptodate(sbh);
2278     }
2279     mark_buffer_dirty(sbh);
2280     - unlock_buffer(sbh);
2281     if (sync) {
2282     + unlock_buffer(sbh);
2283     error = __sync_dirty_buffer(sbh,
2284     test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
2285     if (error)
2286     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
2287     index 7e9b504bd8b2..b4dbc2f59656 100644
2288     --- a/fs/f2fs/checkpoint.c
2289     +++ b/fs/f2fs/checkpoint.c
2290     @@ -772,6 +772,11 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
2291     if (sanity_check_ckpt(sbi))
2292     goto fail_no_cp;
2293    
2294     + if (cur_page == cp1)
2295     + sbi->cur_cp_pack = 1;
2296     + else
2297     + sbi->cur_cp_pack = 2;
2298     +
2299     if (cp_blks <= 1)
2300     goto done;
2301    
2302     @@ -1123,7 +1128,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2303     le32_to_cpu(ckpt->checksum_offset)))
2304     = cpu_to_le32(crc32);
2305    
2306     - start_blk = __start_cp_addr(sbi);
2307     + start_blk = __start_cp_next_addr(sbi);
2308    
2309     /* need to wait for end_io results */
2310     wait_on_all_pages_writeback(sbi);
2311     @@ -1187,6 +1192,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2312     clear_prefree_segments(sbi, cpc);
2313     clear_sbi_flag(sbi, SBI_IS_DIRTY);
2314     clear_sbi_flag(sbi, SBI_NEED_CP);
2315     + __set_cp_next_pack(sbi);
2316    
2317     /*
2318     * redirty superblock if metadata like node page or inode cache is
2319     diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
2320     index fb245bd302e4..1c35e80732e0 100644
2321     --- a/fs/f2fs/debug.c
2322     +++ b/fs/f2fs/debug.c
2323     @@ -373,6 +373,7 @@ static int stat_open(struct inode *inode, struct file *file)
2324     }
2325    
2326     static const struct file_operations stat_fops = {
2327     + .owner = THIS_MODULE,
2328     .open = stat_open,
2329     .read = seq_read,
2330     .llseek = seq_lseek,
2331     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2332     index 9e8de18a168a..6dd03115789b 100644
2333     --- a/fs/f2fs/f2fs.h
2334     +++ b/fs/f2fs/f2fs.h
2335     @@ -428,7 +428,7 @@ struct f2fs_inode_info {
2336     /* Use below internally in f2fs*/
2337     unsigned long flags; /* use to pass per-file flags */
2338     struct rw_semaphore i_sem; /* protect fi info */
2339     - struct percpu_counter dirty_pages; /* # of dirty pages */
2340     + atomic_t dirty_pages; /* # of dirty pages */
2341     f2fs_hash_t chash; /* hash value of given file name */
2342     unsigned int clevel; /* maximum level of given file name */
2343     nid_t i_xattr_nid; /* node id that contains xattrs */
2344     @@ -764,6 +764,7 @@ struct f2fs_sb_info {
2345    
2346     /* for checkpoint */
2347     struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
2348     + int cur_cp_pack; /* remain current cp pack */
2349     spinlock_t cp_lock; /* for flag in ckpt */
2350     struct inode *meta_inode; /* cache meta blocks */
2351     struct mutex cp_mutex; /* checkpoint procedure lock */
2352     @@ -1242,7 +1243,7 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2353    
2354     static inline void inode_inc_dirty_pages(struct inode *inode)
2355     {
2356     - percpu_counter_inc(&F2FS_I(inode)->dirty_pages);
2357     + atomic_inc(&F2FS_I(inode)->dirty_pages);
2358     inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2359     F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2360     }
2361     @@ -1258,7 +1259,7 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
2362     !S_ISLNK(inode->i_mode))
2363     return;
2364    
2365     - percpu_counter_dec(&F2FS_I(inode)->dirty_pages);
2366     + atomic_dec(&F2FS_I(inode)->dirty_pages);
2367     dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2368     F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2369     }
2370     @@ -1268,9 +1269,9 @@ static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2371     return percpu_counter_sum_positive(&sbi->nr_pages[count_type]);
2372     }
2373    
2374     -static inline s64 get_dirty_pages(struct inode *inode)
2375     +static inline int get_dirty_pages(struct inode *inode)
2376     {
2377     - return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages);
2378     + return atomic_read(&F2FS_I(inode)->dirty_pages);
2379     }
2380    
2381     static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2382     @@ -1329,22 +1330,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2383    
2384     static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2385     {
2386     - block_t start_addr;
2387     - struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2388     - unsigned long long ckpt_version = cur_cp_version(ckpt);
2389     -
2390     - start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2391     + block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2392    
2393     - /*
2394     - * odd numbered checkpoint should at cp segment 0
2395     - * and even segment must be at cp segment 1
2396     - */
2397     - if (!(ckpt_version & 1))
2398     + if (sbi->cur_cp_pack == 2)
2399     start_addr += sbi->blocks_per_seg;
2400     + return start_addr;
2401     +}
2402    
2403     +static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2404     +{
2405     + block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2406     +
2407     + if (sbi->cur_cp_pack == 1)
2408     + start_addr += sbi->blocks_per_seg;
2409     return start_addr;
2410     }
2411    
2412     +static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2413     +{
2414     + sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2415     +}
2416     +
2417     static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2418     {
2419     return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2420     diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2421     index c7865073cd26..801111e1f8ef 100644
2422     --- a/fs/f2fs/file.c
2423     +++ b/fs/f2fs/file.c
2424     @@ -967,7 +967,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
2425     new_size = (dst + i) << PAGE_SHIFT;
2426     if (dst_inode->i_size < new_size)
2427     f2fs_i_size_write(dst_inode, new_size);
2428     - } while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
2429     + } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
2430    
2431     f2fs_put_dnode(&dn);
2432     } else {
2433     @@ -1526,7 +1526,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
2434     goto out;
2435    
2436     f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
2437     - "Unexpected flush for atomic writes: ino=%lu, npages=%lld",
2438     + "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2439     inode->i_ino, get_dirty_pages(inode));
2440     ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2441     if (ret)
2442     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2443     index 6132b4ce4e4c..8021d35df7b0 100644
2444     --- a/fs/f2fs/super.c
2445     +++ b/fs/f2fs/super.c
2446     @@ -558,13 +558,9 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
2447    
2448     init_once((void *) fi);
2449    
2450     - if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) {
2451     - kmem_cache_free(f2fs_inode_cachep, fi);
2452     - return NULL;
2453     - }
2454     -
2455     /* Initialize f2fs-specific inode info */
2456     fi->vfs_inode.i_version = 1;
2457     + atomic_set(&fi->dirty_pages, 0);
2458     fi->i_current_depth = 1;
2459     fi->i_advise = 0;
2460     init_rwsem(&fi->i_sem);
2461     @@ -687,7 +683,6 @@ static void f2fs_i_callback(struct rcu_head *head)
2462    
2463     static void f2fs_destroy_inode(struct inode *inode)
2464     {
2465     - percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
2466     call_rcu(&inode->i_rcu, f2fs_i_callback);
2467     }
2468    
2469     diff --git a/fs/splice.c b/fs/splice.c
2470     index 5a7750bd2eea..63b8f54485dc 100644
2471     --- a/fs/splice.c
2472     +++ b/fs/splice.c
2473     @@ -1086,7 +1086,13 @@ EXPORT_SYMBOL(do_splice_direct);
2474    
2475     static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
2476     {
2477     - while (pipe->nrbufs == pipe->buffers) {
2478     + for (;;) {
2479     + if (unlikely(!pipe->readers)) {
2480     + send_sig(SIGPIPE, current, 0);
2481     + return -EPIPE;
2482     + }
2483     + if (pipe->nrbufs != pipe->buffers)
2484     + return 0;
2485     if (flags & SPLICE_F_NONBLOCK)
2486     return -EAGAIN;
2487     if (signal_pending(current))
2488     @@ -1095,7 +1101,6 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
2489     pipe_wait(pipe);
2490     pipe->waiting_writers--;
2491     }
2492     - return 0;
2493     }
2494    
2495     static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
2496     diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
2497     index 552465e011ec..47074e0c33f3 100644
2498     --- a/fs/xfs/xfs_bmap_util.c
2499     +++ b/fs/xfs/xfs_bmap_util.c
2500     @@ -1792,6 +1792,7 @@ xfs_swap_extent_forks(
2501     struct xfs_ifork tempifp, *ifp, *tifp;
2502     int aforkblks = 0;
2503     int taforkblks = 0;
2504     + xfs_extnum_t nextents;
2505     __uint64_t tmp;
2506     int error;
2507    
2508     @@ -1881,7 +1882,8 @@ xfs_swap_extent_forks(
2509     * pointer. Otherwise it's already NULL or
2510     * pointing to the extent.
2511     */
2512     - if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
2513     + nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2514     + if (nextents <= XFS_INLINE_EXTS) {
2515     ifp->if_u1.if_extents =
2516     ifp->if_u2.if_inline_ext;
2517     }
2518     @@ -1900,7 +1902,8 @@ xfs_swap_extent_forks(
2519     * pointer. Otherwise it's already NULL or
2520     * pointing to the extent.
2521     */
2522     - if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
2523     + nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2524     + if (nextents <= XFS_INLINE_EXTS) {
2525     tifp->if_u1.if_extents =
2526     tifp->if_u2.if_inline_ext;
2527     }
2528     diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
2529     index 9b3d7c76915d..2d91f5ab7538 100644
2530     --- a/fs/xfs/xfs_log_recover.c
2531     +++ b/fs/xfs/xfs_log_recover.c
2532     @@ -4929,6 +4929,7 @@ xlog_recover_clear_agi_bucket(
2533     agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2534     offset = offsetof(xfs_agi_t, agi_unlinked) +
2535     (sizeof(xfs_agino_t) * bucket);
2536     + xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
2537     xfs_trans_log_buf(tp, agibp, offset,
2538     (offset + sizeof(xfs_agino_t) - 1));
2539    
2540     diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
2541     new file mode 100644
2542     index 000000000000..df13637e4017
2543     --- /dev/null
2544     +++ b/include/asm-generic/asm-prototypes.h
2545     @@ -0,0 +1,7 @@
2546     +#include <linux/bitops.h>
2547     +extern void *__memset(void *, int, __kernel_size_t);
2548     +extern void *__memcpy(void *, const void *, __kernel_size_t);
2549     +extern void *__memmove(void *, const void *, __kernel_size_t);
2550     +extern void *memset(void *, int, __kernel_size_t);
2551     +extern void *memcpy(void *, const void *, __kernel_size_t);
2552     +extern void *memmove(void *, const void *, __kernel_size_t);
2553     diff --git a/include/linux/capability.h b/include/linux/capability.h
2554     index dbc21c719ce6..6ffb67e10c06 100644
2555     --- a/include/linux/capability.h
2556     +++ b/include/linux/capability.h
2557     @@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
2558     return true;
2559     }
2560     #endif /* CONFIG_MULTIUSER */
2561     +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
2562     extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
2563     extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
2564     +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
2565    
2566     /* audit system wants to get cap info from files as well */
2567     extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
2568     diff --git a/include/linux/cpu.h b/include/linux/cpu.h
2569     index b886dc17f2f3..e571128ad99a 100644
2570     --- a/include/linux/cpu.h
2571     +++ b/include/linux/cpu.h
2572     @@ -93,22 +93,16 @@ extern bool cpuhp_tasks_frozen;
2573     { .notifier_call = fn, .priority = pri }; \
2574     __register_cpu_notifier(&fn##_nb); \
2575     }
2576     -#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
2577     -#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
2578     -#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
2579     -#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
2580    
2581     -#ifdef CONFIG_HOTPLUG_CPU
2582     extern int register_cpu_notifier(struct notifier_block *nb);
2583     extern int __register_cpu_notifier(struct notifier_block *nb);
2584     extern void unregister_cpu_notifier(struct notifier_block *nb);
2585     extern void __unregister_cpu_notifier(struct notifier_block *nb);
2586     -#else
2587    
2588     -#ifndef MODULE
2589     -extern int register_cpu_notifier(struct notifier_block *nb);
2590     -extern int __register_cpu_notifier(struct notifier_block *nb);
2591     -#else
2592     +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
2593     +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
2594     +#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
2595     +
2596     static inline int register_cpu_notifier(struct notifier_block *nb)
2597     {
2598     return 0;
2599     @@ -118,7 +112,6 @@ static inline int __register_cpu_notifier(struct notifier_block *nb)
2600     {
2601     return 0;
2602     }
2603     -#endif
2604    
2605     static inline void unregister_cpu_notifier(struct notifier_block *nb)
2606     {
2607     diff --git a/include/linux/mm.h b/include/linux/mm.h
2608     index a92c8d73aeaf..0b5b2e4df14e 100644
2609     --- a/include/linux/mm.h
2610     +++ b/include/linux/mm.h
2611     @@ -1270,6 +1270,8 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
2612     unsigned int gup_flags);
2613     extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2614     void *buf, int len, unsigned int gup_flags);
2615     +extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
2616     + unsigned long addr, void *buf, int len, unsigned int gup_flags);
2617    
2618     long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
2619     unsigned long start, unsigned long nr_pages,
2620     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2621     index 4a8acedf4b7d..08d947fc4c59 100644
2622     --- a/include/linux/mm_types.h
2623     +++ b/include/linux/mm_types.h
2624     @@ -473,6 +473,7 @@ struct mm_struct {
2625     */
2626     struct task_struct __rcu *owner;
2627     #endif
2628     + struct user_namespace *user_ns;
2629    
2630     /* store ref to file /proc/<pid>/exe symlink points to */
2631     struct file __rcu *exe_file;
2632     diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
2633     index bca26157f5b6..f6bc76501912 100644
2634     --- a/include/linux/pm_opp.h
2635     +++ b/include/linux/pm_opp.h
2636     @@ -19,6 +19,7 @@
2637    
2638     struct dev_pm_opp;
2639     struct device;
2640     +struct opp_table;
2641    
2642     enum dev_pm_opp_event {
2643     OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
2644     @@ -62,8 +63,8 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
2645     void dev_pm_opp_put_supported_hw(struct device *dev);
2646     int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
2647     void dev_pm_opp_put_prop_name(struct device *dev);
2648     -int dev_pm_opp_set_regulator(struct device *dev, const char *name);
2649     -void dev_pm_opp_put_regulator(struct device *dev);
2650     +struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name);
2651     +void dev_pm_opp_put_regulator(struct opp_table *opp_table);
2652     int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
2653     int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
2654     int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
2655     @@ -170,12 +171,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
2656    
2657     static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
2658    
2659     -static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
2660     +static inline struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name)
2661     {
2662     - return -ENOTSUPP;
2663     + return ERR_PTR(-ENOTSUPP);
2664     }
2665    
2666     -static inline void dev_pm_opp_put_regulator(struct device *dev) {}
2667     +static inline void dev_pm_opp_put_regulator(struct opp_table *opp_table) {}
2668    
2669     static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
2670     {
2671     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
2672     index 504c98a278d4..e0e539321ab9 100644
2673     --- a/include/linux/ptrace.h
2674     +++ b/include/linux/ptrace.h
2675     @@ -8,6 +8,9 @@
2676     #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
2677     #include <uapi/linux/ptrace.h>
2678    
2679     +extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
2680     + void *buf, int len, unsigned int gup_flags);
2681     +
2682     /*
2683     * Ptrace flags
2684     *
2685     @@ -19,7 +22,6 @@
2686     #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
2687     #define PT_PTRACED 0x00000001
2688     #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
2689     -#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
2690    
2691     #define PT_OPT_FLAG_SHIFT 3
2692     /* PT_TRACE_* event enable flags */
2693     diff --git a/include/linux/sched.h b/include/linux/sched.h
2694     index e9c009dc3a4a..75d9a57e212e 100644
2695     --- a/include/linux/sched.h
2696     +++ b/include/linux/sched.h
2697     @@ -1656,6 +1656,7 @@ struct task_struct {
2698     struct list_head cpu_timers[3];
2699    
2700     /* process credentials */
2701     + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
2702     const struct cred __rcu *real_cred; /* objective and real subjective task
2703     * credentials (COW) */
2704     const struct cred __rcu *cred; /* effective (overridable) subjective task
2705     diff --git a/kernel/capability.c b/kernel/capability.c
2706     index 00411c82dac5..4984e1f552eb 100644
2707     --- a/kernel/capability.c
2708     +++ b/kernel/capability.c
2709     @@ -457,6 +457,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
2710     EXPORT_SYMBOL(file_ns_capable);
2711    
2712     /**
2713     + * privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
2714     + * @ns: The user namespace in question
2715     + * @inode: The inode in question
2716     + *
2717     + * Return true if the inode uid and gid are within the namespace.
2718     + */
2719     +bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
2720     +{
2721     + return kuid_has_mapping(ns, inode->i_uid) &&
2722     + kgid_has_mapping(ns, inode->i_gid);
2723     +}
2724     +
2725     +/**
2726     * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
2727     * @inode: The inode in question
2728     * @cap: The capability in question
2729     @@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
2730     {
2731     struct user_namespace *ns = current_user_ns();
2732    
2733     - return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
2734     - kgid_has_mapping(ns, inode->i_gid);
2735     + return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
2736     }
2737     EXPORT_SYMBOL(capable_wrt_inode_uidgid);
2738     +
2739     +/**
2740     + * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
2741     + * @tsk: The task that may be ptraced
2742     + * @ns: The user namespace to search for CAP_SYS_PTRACE in
2743     + *
2744     + * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
2745     + * in the specified user namespace.
2746     + */
2747     +bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
2748     +{
2749     + int ret = 0; /* An absent tracer adds no restrictions */
2750     + const struct cred *cred;
2751     + rcu_read_lock();
2752     + cred = rcu_dereference(tsk->ptracer_cred);
2753     + if (cred)
2754     + ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
2755     + rcu_read_unlock();
2756     + return (ret == 0);
2757     +}
2758     diff --git a/kernel/cpu.c b/kernel/cpu.c
2759     index 29de1a9352c0..217fd2e7f435 100644
2760     --- a/kernel/cpu.c
2761     +++ b/kernel/cpu.c
2762     @@ -659,7 +659,6 @@ void __init cpuhp_threads_init(void)
2763     kthread_unpark(this_cpu_read(cpuhp_state.thread));
2764     }
2765    
2766     -#ifdef CONFIG_HOTPLUG_CPU
2767     EXPORT_SYMBOL(register_cpu_notifier);
2768     EXPORT_SYMBOL(__register_cpu_notifier);
2769     void unregister_cpu_notifier(struct notifier_block *nb)
2770     @@ -676,6 +675,7 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
2771     }
2772     EXPORT_SYMBOL(__unregister_cpu_notifier);
2773    
2774     +#ifdef CONFIG_HOTPLUG_CPU
2775     /**
2776     * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
2777     * @cpu: a CPU id
2778     diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
2779     index 0874e2edd275..79517e5549f1 100644
2780     --- a/kernel/debug/debug_core.c
2781     +++ b/kernel/debug/debug_core.c
2782     @@ -598,11 +598,11 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
2783     /*
2784     * Wait for the other CPUs to be notified and be waiting for us:
2785     */
2786     - time_left = loops_per_jiffy * HZ;
2787     + time_left = MSEC_PER_SEC;
2788     while (kgdb_do_roundup && --time_left &&
2789     (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
2790     online_cpus)
2791     - cpu_relax();
2792     + udelay(1000);
2793     if (!time_left)
2794     pr_crit("Timed out waiting for secondary CPUs.\n");
2795    
2796     diff --git a/kernel/fork.c b/kernel/fork.c
2797     index 997ac1d584f7..ba8a01564985 100644
2798     --- a/kernel/fork.c
2799     +++ b/kernel/fork.c
2800     @@ -745,7 +745,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2801     #endif
2802     }
2803    
2804     -static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
2805     +static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
2806     + struct user_namespace *user_ns)
2807     {
2808     mm->mmap = NULL;
2809     mm->mm_rb = RB_ROOT;
2810     @@ -785,6 +786,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
2811     if (init_new_context(p, mm))
2812     goto fail_nocontext;
2813    
2814     + mm->user_ns = get_user_ns(user_ns);
2815     return mm;
2816    
2817     fail_nocontext:
2818     @@ -830,7 +832,7 @@ struct mm_struct *mm_alloc(void)
2819     return NULL;
2820    
2821     memset(mm, 0, sizeof(*mm));
2822     - return mm_init(mm, current);
2823     + return mm_init(mm, current, current_user_ns());
2824     }
2825    
2826     /*
2827     @@ -845,6 +847,7 @@ void __mmdrop(struct mm_struct *mm)
2828     destroy_context(mm);
2829     mmu_notifier_mm_destroy(mm);
2830     check_mm(mm);
2831     + put_user_ns(mm->user_ns);
2832     free_mm(mm);
2833     }
2834     EXPORT_SYMBOL_GPL(__mmdrop);
2835     @@ -1126,7 +1129,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
2836    
2837     memcpy(mm, oldmm, sizeof(*mm));
2838    
2839     - if (!mm_init(mm, tsk))
2840     + if (!mm_init(mm, tsk, mm->user_ns))
2841     goto fail_nomem;
2842    
2843     err = dup_mmap(mm, oldmm);
2844     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2845     index e6474f7272ec..49ba7c1ade9d 100644
2846     --- a/kernel/ptrace.c
2847     +++ b/kernel/ptrace.c
2848     @@ -27,6 +27,35 @@
2849     #include <linux/cn_proc.h>
2850     #include <linux/compat.h>
2851    
2852     +/*
2853     + * Access another process' address space via ptrace.
2854     + * Source/target buffer must be kernel space,
2855     + * Do not walk the page table directly, use get_user_pages
2856     + */
2857     +int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
2858     + void *buf, int len, unsigned int gup_flags)
2859     +{
2860     + struct mm_struct *mm;
2861     + int ret;
2862     +
2863     + mm = get_task_mm(tsk);
2864     + if (!mm)
2865     + return 0;
2866     +
2867     + if (!tsk->ptrace ||
2868     + (current != tsk->parent) ||
2869     + ((get_dumpable(mm) != SUID_DUMP_USER) &&
2870     + !ptracer_capable(tsk, mm->user_ns))) {
2871     + mmput(mm);
2872     + return 0;
2873     + }
2874     +
2875     + ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
2876     + mmput(mm);
2877     +
2878     + return ret;
2879     +}
2880     +
2881    
2882     /*
2883     * ptrace a task: make the debugger its new parent and
2884     @@ -39,6 +68,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
2885     BUG_ON(!list_empty(&child->ptrace_entry));
2886     list_add(&child->ptrace_entry, &new_parent->ptraced);
2887     child->parent = new_parent;
2888     + rcu_read_lock();
2889     + child->ptracer_cred = get_cred(__task_cred(new_parent));
2890     + rcu_read_unlock();
2891     }
2892    
2893     /**
2894     @@ -71,12 +103,16 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
2895     */
2896     void __ptrace_unlink(struct task_struct *child)
2897     {
2898     + const struct cred *old_cred;
2899     BUG_ON(!child->ptrace);
2900    
2901     clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
2902    
2903     child->parent = child->real_parent;
2904     list_del_init(&child->ptrace_entry);
2905     + old_cred = child->ptracer_cred;
2906     + child->ptracer_cred = NULL;
2907     + put_cred(old_cred);
2908    
2909     spin_lock(&child->sighand->siglock);
2910     child->ptrace = 0;
2911     @@ -220,7 +256,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
2912     static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
2913     {
2914     const struct cred *cred = current_cred(), *tcred;
2915     - int dumpable = 0;
2916     + struct mm_struct *mm;
2917     kuid_t caller_uid;
2918     kgid_t caller_gid;
2919    
2920     @@ -271,16 +307,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
2921     return -EPERM;
2922     ok:
2923     rcu_read_unlock();
2924     - smp_rmb();
2925     - if (task->mm)
2926     - dumpable = get_dumpable(task->mm);
2927     - rcu_read_lock();
2928     - if (dumpable != SUID_DUMP_USER &&
2929     - !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
2930     - rcu_read_unlock();
2931     - return -EPERM;
2932     - }
2933     - rcu_read_unlock();
2934     + mm = task->mm;
2935     + if (mm &&
2936     + ((get_dumpable(mm) != SUID_DUMP_USER) &&
2937     + !ptrace_has_cap(mm->user_ns, mode)))
2938     + return -EPERM;
2939    
2940     return security_ptrace_access_check(task, mode);
2941     }
2942     @@ -344,10 +375,6 @@ static int ptrace_attach(struct task_struct *task, long request,
2943    
2944     if (seize)
2945     flags |= PT_SEIZED;
2946     - rcu_read_lock();
2947     - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
2948     - flags |= PT_PTRACE_CAP;
2949     - rcu_read_unlock();
2950     task->ptrace = flags;
2951    
2952     __ptrace_link(task, current);
2953     @@ -537,7 +564,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
2954     int this_len, retval;
2955    
2956     this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
2957     - retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
2958     + retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
2959     +
2960     if (!retval) {
2961     if (copied)
2962     break;
2963     @@ -564,7 +592,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
2964     this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
2965     if (copy_from_user(buf, src, this_len))
2966     return -EFAULT;
2967     - retval = access_process_vm(tsk, dst, buf, this_len,
2968     + retval = ptrace_access_vm(tsk, dst, buf, this_len,
2969     FOLL_FORCE | FOLL_WRITE);
2970     if (!retval) {
2971     if (copied)
2972     @@ -1128,7 +1156,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
2973     unsigned long tmp;
2974     int copied;
2975    
2976     - copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
2977     + copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
2978     if (copied != sizeof(tmp))
2979     return -EIO;
2980     return put_user(tmp, (unsigned long __user *)data);
2981     @@ -1139,7 +1167,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
2982     {
2983     int copied;
2984    
2985     - copied = access_process_vm(tsk, addr, &data, sizeof(data),
2986     + copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
2987     FOLL_FORCE | FOLL_WRITE);
2988     return (copied == sizeof(data)) ? 0 : -EIO;
2989     }
2990     @@ -1157,7 +1185,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
2991     switch (request) {
2992     case PTRACE_PEEKTEXT:
2993     case PTRACE_PEEKDATA:
2994     - ret = access_process_vm(child, addr, &word, sizeof(word),
2995     + ret = ptrace_access_vm(child, addr, &word, sizeof(word),
2996     FOLL_FORCE);
2997     if (ret != sizeof(word))
2998     ret = -EIO;
2999     @@ -1167,7 +1195,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
3000    
3001     case PTRACE_POKETEXT:
3002     case PTRACE_POKEDATA:
3003     - ret = access_process_vm(child, addr, &data, sizeof(data),
3004     + ret = ptrace_access_vm(child, addr, &data, sizeof(data),
3005     FOLL_FORCE | FOLL_WRITE);
3006     ret = (ret != sizeof(data) ? -EIO : 0);
3007     break;
3008     diff --git a/kernel/watchdog.c b/kernel/watchdog.c
3009     index 9acb29f280ec..6d1020c03d41 100644
3010     --- a/kernel/watchdog.c
3011     +++ b/kernel/watchdog.c
3012     @@ -344,7 +344,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
3013     */
3014     if (is_hardlockup()) {
3015     int this_cpu = smp_processor_id();
3016     - struct pt_regs *regs = get_irq_regs();
3017    
3018     /* only print hardlockups once */
3019     if (__this_cpu_read(hard_watchdog_warn) == true)
3020     diff --git a/mm/filemap.c b/mm/filemap.c
3021     index 50b52fe51937..9a50acecc473 100644
3022     --- a/mm/filemap.c
3023     +++ b/mm/filemap.c
3024     @@ -1686,7 +1686,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
3025     int error = 0;
3026    
3027     if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
3028     - return -EINVAL;
3029     + return 0;
3030     iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
3031    
3032     index = *ppos >> PAGE_SHIFT;
3033     diff --git a/mm/init-mm.c b/mm/init-mm.c
3034     index a56a851908d2..975e49f00f34 100644
3035     --- a/mm/init-mm.c
3036     +++ b/mm/init-mm.c
3037     @@ -6,6 +6,7 @@
3038     #include <linux/cpumask.h>
3039    
3040     #include <linux/atomic.h>
3041     +#include <linux/user_namespace.h>
3042     #include <asm/pgtable.h>
3043     #include <asm/mmu.h>
3044    
3045     @@ -21,5 +22,6 @@ struct mm_struct init_mm = {
3046     .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
3047     .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
3048     .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
3049     + .user_ns = &init_user_ns,
3050     INIT_MM_CONTEXT(init_mm)
3051     };
3052     diff --git a/mm/memory.c b/mm/memory.c
3053     index e18c57bdc75c..cbb1e5e5f791 100644
3054     --- a/mm/memory.c
3055     +++ b/mm/memory.c
3056     @@ -3868,7 +3868,7 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
3057     * Access another process' address space as given in mm. If non-NULL, use the
3058     * given task for page fault accounting.
3059     */
3060     -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3061     +int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3062     unsigned long addr, void *buf, int len, unsigned int gup_flags)
3063     {
3064     struct vm_area_struct *vma;
3065     diff --git a/mm/nommu.c b/mm/nommu.c
3066     index 8b8faaf2a9e9..44265e00b701 100644
3067     --- a/mm/nommu.c
3068     +++ b/mm/nommu.c
3069     @@ -1808,7 +1808,7 @@ void filemap_map_pages(struct fault_env *fe,
3070     }
3071     EXPORT_SYMBOL(filemap_map_pages);
3072    
3073     -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3074     +int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3075     unsigned long addr, void *buf, int len, unsigned int gup_flags)
3076     {
3077     struct vm_area_struct *vma;
3078     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3079     index 6de9440e3ae2..34ada718ef47 100644
3080     --- a/mm/page_alloc.c
3081     +++ b/mm/page_alloc.c
3082     @@ -2192,7 +2192,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
3083     unsigned long count, struct list_head *list,
3084     int migratetype, bool cold)
3085     {
3086     - int i;
3087     + int i, alloced = 0;
3088    
3089     spin_lock(&zone->lock);
3090     for (i = 0; i < count; ++i) {
3091     @@ -2217,13 +2217,21 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
3092     else
3093     list_add_tail(&page->lru, list);
3094     list = &page->lru;
3095     + alloced++;
3096     if (is_migrate_cma(get_pcppage_migratetype(page)))
3097     __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3098     -(1 << order));
3099     }
3100     +
3101     + /*
3102     + * i pages were removed from the buddy list even if some leak due
3103     + * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3104     + * on i. Do not confuse with 'alloced' which is the number of
3105     + * pages added to the pcp list.
3106     + */
3107     __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3108     spin_unlock(&zone->lock);
3109     - return i;
3110     + return alloced;
3111     }
3112    
3113     #ifdef CONFIG_NUMA
3114     diff --git a/mm/vmscan.c b/mm/vmscan.c
3115     index d75cdf360730..c4abf08861d2 100644
3116     --- a/mm/vmscan.c
3117     +++ b/mm/vmscan.c
3118     @@ -291,6 +291,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
3119     int nid = shrinkctl->nid;
3120     long batch_size = shrinker->batch ? shrinker->batch
3121     : SHRINK_BATCH;
3122     + long scanned = 0, next_deferred;
3123    
3124     freeable = shrinker->count_objects(shrinker, shrinkctl);
3125     if (freeable == 0)
3126     @@ -312,7 +313,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
3127     pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
3128     shrinker->scan_objects, total_scan);
3129     total_scan = freeable;
3130     - }
3131     + next_deferred = nr;
3132     + } else
3133     + next_deferred = total_scan;
3134    
3135     /*
3136     * We need to avoid excessive windup on filesystem shrinkers
3137     @@ -369,17 +372,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
3138    
3139     count_vm_events(SLABS_SCANNED, nr_to_scan);
3140     total_scan -= nr_to_scan;
3141     + scanned += nr_to_scan;
3142    
3143     cond_resched();
3144     }
3145    
3146     + if (next_deferred >= scanned)
3147     + next_deferred -= scanned;
3148     + else
3149     + next_deferred = 0;
3150     /*
3151     * move the unused scan count back into the shrinker in a
3152     * manner that handles concurrent updates. If we exhausted the
3153     * scan, there is no need to do an update.
3154     */
3155     - if (total_scan > 0)
3156     - new_nr = atomic_long_add_return(total_scan,
3157     + if (next_deferred > 0)
3158     + new_nr = atomic_long_add_return(next_deferred,
3159     &shrinker->nr_deferred[nid]);
3160     else
3161     new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
3162     diff --git a/scripts/package/builddeb b/scripts/package/builddeb
3163     index 8ea9fd2b6573..3c575cd07888 100755
3164     --- a/scripts/package/builddeb
3165     +++ b/scripts/package/builddeb
3166     @@ -51,7 +51,7 @@ set_debarch() {
3167     debarch=hppa ;;
3168     mips*)
3169     debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
3170     - arm64)
3171     + aarch64|arm64)
3172     debarch=arm64 ;;
3173     arm*)
3174     if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then
3175     diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
3176     index 7f57a145a47e..a03cf68d0bcd 100644
3177     --- a/sound/pci/hda/hda_auto_parser.c
3178     +++ b/sound/pci/hda/hda_auto_parser.c
3179     @@ -884,6 +884,8 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
3180     }
3181     EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
3182    
3183     +#define IGNORE_SEQ_ASSOC (~(AC_DEFCFG_SEQUENCE | AC_DEFCFG_DEF_ASSOC))
3184     +
3185     static bool pin_config_match(struct hda_codec *codec,
3186     const struct hda_pintbl *pins)
3187     {
3188     @@ -901,7 +903,7 @@ static bool pin_config_match(struct hda_codec *codec,
3189     for (; t_pins->nid; t_pins++) {
3190     if (t_pins->nid == nid) {
3191     found = 1;
3192     - if (t_pins->val == cfg)
3193     + if ((t_pins->val & IGNORE_SEQ_ASSOC) == (cfg & IGNORE_SEQ_ASSOC))
3194     break;
3195     else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
3196     break;
3197     diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
3198     index ad06866d7c69..11b9b2f17a2e 100644
3199     --- a/sound/pci/hda/patch_ca0132.c
3200     +++ b/sound/pci/hda/patch_ca0132.c
3201     @@ -780,6 +780,7 @@ static const struct hda_pintbl alienware_pincfgs[] = {
3202     static const struct snd_pci_quirk ca0132_quirks[] = {
3203     SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
3204     SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
3205     + SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
3206     {}
3207     };
3208    
3209     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3210     index ed62748a6d55..c15c51bea26d 100644
3211     --- a/sound/pci/hda/patch_conexant.c
3212     +++ b/sound/pci/hda/patch_conexant.c
3213     @@ -262,6 +262,7 @@ enum {
3214     CXT_FIXUP_CAP_MIX_AMP_5047,
3215     CXT_FIXUP_MUTE_LED_EAPD,
3216     CXT_FIXUP_HP_SPECTRE,
3217     + CXT_FIXUP_HP_GATE_MIC,
3218     };
3219    
3220     /* for hda_fixup_thinkpad_acpi() */
3221     @@ -633,6 +634,17 @@ static void cxt_fixup_cap_mix_amp_5047(struct hda_codec *codec,
3222     (1 << AC_AMPCAP_MUTE_SHIFT));
3223     }
3224    
3225     +static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
3226     + const struct hda_fixup *fix,
3227     + int action)
3228     +{
3229     + /* the mic pin (0x19) doesn't give an unsolicited event;
3230     + * probe the mic pin together with the headphone pin (0x16)
3231     + */
3232     + if (action == HDA_FIXUP_ACT_PROBE)
3233     + snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
3234     +}
3235     +
3236     /* ThinkPad X200 & co with cxt5051 */
3237     static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
3238     { 0x16, 0x042140ff }, /* HP (seq# overridden) */
3239     @@ -774,6 +786,10 @@ static const struct hda_fixup cxt_fixups[] = {
3240     { }
3241     }
3242     },
3243     + [CXT_FIXUP_HP_GATE_MIC] = {
3244     + .type = HDA_FIXUP_FUNC,
3245     + .v.func = cxt_fixup_hp_gate_mic_jack,
3246     + },
3247     };
3248    
3249     static const struct snd_pci_quirk cxt5045_fixups[] = {
3250     @@ -824,6 +840,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3251     SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
3252     SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
3253     SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
3254     + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
3255     SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
3256     SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
3257     SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
3258     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3259     index ea81c08ddc7a..3f75d1b83bf2 100644
3260     --- a/sound/pci/hda/patch_realtek.c
3261     +++ b/sound/pci/hda/patch_realtek.c
3262     @@ -5917,6 +5917,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3263     {0x12, 0x90a60180},
3264     {0x14, 0x90170120},
3265     {0x21, 0x02211030}),
3266     + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3267     + {0x1b, 0x01011020},
3268     + {0x21, 0x02211010}),
3269     SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
3270     {0x12, 0x90a60160},
3271     {0x14, 0x90170120},
3272     diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3273     index 25c6d87c818e..f5a8050351b5 100644
3274     --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3275     +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3276     @@ -771,6 +771,9 @@ static int sst_soc_prepare(struct device *dev)
3277     struct sst_data *drv = dev_get_drvdata(dev);
3278     struct snd_soc_pcm_runtime *rtd;
3279    
3280     + if (!drv->soc_card)
3281     + return 0;
3282     +
3283     /* suspend all pcms first */
3284     snd_soc_suspend(drv->soc_card->dev);
3285     snd_soc_poweroff(drv->soc_card->dev);
3286     @@ -793,6 +796,9 @@ static void sst_soc_complete(struct device *dev)
3287     struct sst_data *drv = dev_get_drvdata(dev);
3288     struct snd_soc_pcm_runtime *rtd;
3289    
3290     + if (!drv->soc_card)
3291     + return;
3292     +
3293     /* restart SSPs */
3294     list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
3295     struct snd_soc_dai *dai = rtd->cpu_dai;
3296     diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
3297     index 2c44139b4041..33db205dd12b 100644
3298     --- a/sound/usb/hiface/pcm.c
3299     +++ b/sound/usb/hiface/pcm.c
3300     @@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
3301    
3302     mutex_lock(&rt->stream_mutex);
3303    
3304     + hiface_pcm_stream_stop(rt);
3305     +
3306     sub->dma_off = 0;
3307     sub->period_off = 0;
3308    
3309     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3310     index 2f8c388ef84f..4703caea56b2 100644
3311     --- a/sound/usb/mixer.c
3312     +++ b/sound/usb/mixer.c
3313     @@ -932,9 +932,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
3314     case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
3315     case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
3316     case USB_ID(0x046d, 0x0991):
3317     + case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
3318     /* Most audio usb devices lie about volume resolution.
3319     * Most Logitech webcams have res = 384.
3320     - * Proboly there is some logitech magic behind this number --fishor
3321     + * Probably there is some logitech magic behind this number --fishor
3322     */
3323     if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
3324     usb_audio_info(chip,