Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0139-4.4.40-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2875 - (show annotations) (download)
Mon Mar 27 13:49:16 2017 UTC (7 years, 1 month ago) by niro
File size: 75130 byte(s)
linux-4.4.40
1 diff --git a/Makefile b/Makefile
2 index 88d26a632bef..5b5937780408 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 4
8 -SUBLEVEL = 39
9 +SUBLEVEL = 40
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
14 index fc7ea529f462..52c8c1f642fe 100644
15 --- a/arch/arm/xen/enlighten.c
16 +++ b/arch/arm/xen/enlighten.c
17 @@ -239,8 +239,7 @@ static int __init xen_guest_init(void)
18 * for secondary CPUs as they are brought up.
19 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
20 */
21 - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
22 - sizeof(struct vcpu_info));
23 + xen_vcpu_info = alloc_percpu(struct vcpu_info);
24 if (xen_vcpu_info == NULL)
25 return -ENOMEM;
26
27 diff --git a/block/blk-mq.c b/block/blk-mq.c
28 index c3e461ec40e4..9f99a01b00e8 100644
29 --- a/block/blk-mq.c
30 +++ b/block/blk-mq.c
31 @@ -1313,9 +1313,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
32 blk_mq_put_ctx(data.ctx);
33 if (!old_rq)
34 goto done;
35 - if (!blk_mq_direct_issue_request(old_rq, &cookie))
36 - goto done;
37 - blk_mq_insert_request(old_rq, false, true, true);
38 + if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
39 + blk_mq_direct_issue_request(old_rq, &cookie) != 0)
40 + blk_mq_insert_request(old_rq, false, true, true);
41 goto done;
42 }
43
44 diff --git a/drivers/base/core.c b/drivers/base/core.c
45 index b7d56c5ea3c6..f18856f5954b 100644
46 --- a/drivers/base/core.c
47 +++ b/drivers/base/core.c
48 @@ -836,11 +836,29 @@ static struct kobject *get_device_parent(struct device *dev,
49 return NULL;
50 }
51
52 +static inline bool live_in_glue_dir(struct kobject *kobj,
53 + struct device *dev)
54 +{
55 + if (!kobj || !dev->class ||
56 + kobj->kset != &dev->class->p->glue_dirs)
57 + return false;
58 + return true;
59 +}
60 +
61 +static inline struct kobject *get_glue_dir(struct device *dev)
62 +{
63 + return dev->kobj.parent;
64 +}
65 +
66 +/*
67 + * make sure cleaning up dir as the last step, we need to make
68 + * sure .release handler of kobject is run with holding the
69 + * global lock
70 + */
71 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
72 {
73 /* see if we live in a "glue" directory */
74 - if (!glue_dir || !dev->class ||
75 - glue_dir->kset != &dev->class->p->glue_dirs)
76 + if (!live_in_glue_dir(glue_dir, dev))
77 return;
78
79 mutex_lock(&gdp_mutex);
80 @@ -848,11 +866,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
81 mutex_unlock(&gdp_mutex);
82 }
83
84 -static void cleanup_device_parent(struct device *dev)
85 -{
86 - cleanup_glue_dir(dev, dev->kobj.parent);
87 -}
88 -
89 static int device_add_class_symlinks(struct device *dev)
90 {
91 struct device_node *of_node = dev_of_node(dev);
92 @@ -1028,6 +1041,7 @@ int device_add(struct device *dev)
93 struct kobject *kobj;
94 struct class_interface *class_intf;
95 int error = -EINVAL;
96 + struct kobject *glue_dir = NULL;
97
98 dev = get_device(dev);
99 if (!dev)
100 @@ -1072,8 +1086,10 @@ int device_add(struct device *dev)
101 /* first, register with generic layer. */
102 /* we require the name to be set before, and pass NULL */
103 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
104 - if (error)
105 + if (error) {
106 + glue_dir = get_glue_dir(dev);
107 goto Error;
108 + }
109
110 /* notify platform of device entry */
111 if (platform_notify)
112 @@ -1154,9 +1170,10 @@ done:
113 device_remove_file(dev, &dev_attr_uevent);
114 attrError:
115 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
116 + glue_dir = get_glue_dir(dev);
117 kobject_del(&dev->kobj);
118 Error:
119 - cleanup_device_parent(dev);
120 + cleanup_glue_dir(dev, glue_dir);
121 put_device(parent);
122 name_error:
123 kfree(dev->p);
124 @@ -1232,6 +1249,7 @@ EXPORT_SYMBOL_GPL(put_device);
125 void device_del(struct device *dev)
126 {
127 struct device *parent = dev->parent;
128 + struct kobject *glue_dir = NULL;
129 struct class_interface *class_intf;
130
131 /* Notify clients of device removal. This call must come
132 @@ -1276,8 +1294,9 @@ void device_del(struct device *dev)
133 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
134 BUS_NOTIFY_REMOVED_DEVICE, dev);
135 kobject_uevent(&dev->kobj, KOBJ_REMOVE);
136 - cleanup_device_parent(dev);
137 + glue_dir = get_glue_dir(dev);
138 kobject_del(&dev->kobj);
139 + cleanup_glue_dir(dev, glue_dir);
140 put_device(parent);
141 }
142 EXPORT_SYMBOL_GPL(device_del);
143 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
144 index 80cf8add46ff..ab0b2dd3f629 100644
145 --- a/drivers/block/loop.c
146 +++ b/drivers/block/loop.c
147 @@ -1657,7 +1657,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
148 blk_mq_start_request(bd->rq);
149
150 if (lo->lo_state != Lo_bound)
151 - return -EIO;
152 + return BLK_MQ_RQ_QUEUE_ERROR;
153
154 if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
155 REQ_DISCARD)))
156 diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
157 index 3111f2778079..849f2e29c243 100644
158 --- a/drivers/char/tpm/xen-tpmfront.c
159 +++ b/drivers/char/tpm/xen-tpmfront.c
160 @@ -305,7 +305,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
161 rv = setup_ring(dev, priv);
162 if (rv) {
163 chip = dev_get_drvdata(&dev->dev);
164 - tpm_chip_unregister(chip);
165 ring_free(priv);
166 return rv;
167 }
168 diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
169 index 8831e1a05367..11d8aa3ec186 100644
170 --- a/drivers/clk/ti/clk-3xxx.c
171 +++ b/drivers/clk/ti/clk-3xxx.c
172 @@ -22,13 +22,6 @@
173
174 #include "clock.h"
175
176 -/*
177 - * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
178 - * that are sourced by DPLL5, and both of these require this clock
179 - * to be at 120 MHz for proper operation.
180 - */
181 -#define DPLL5_FREQ_FOR_USBHOST 120000000
182 -
183 #define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
184 #define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
185 #define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
186 @@ -546,14 +539,21 @@ void __init omap3_clk_lock_dpll5(void)
187 struct clk *dpll5_clk;
188 struct clk *dpll5_m2_clk;
189
190 + /*
191 + * Errata sprz319f advisory 2.1 documents a USB host clock drift issue
192 + * that can be worked around using specially crafted dpll5 settings
193 + * with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
194 + * host clock rate, its .set_rate handler() will detect that frequency
195 + * and use the errata settings.
196 + */
197 dpll5_clk = clk_get(NULL, "dpll5_ck");
198 - clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
199 + clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
200 clk_prepare_enable(dpll5_clk);
201
202 - /* Program dpll5_m2_clk divider for no division */
203 + /* Program dpll5_m2_clk divider */
204 dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
205 clk_prepare_enable(dpll5_m2_clk);
206 - clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
207 + clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
208
209 clk_disable_unprepare(dpll5_m2_clk);
210 clk_disable_unprepare(dpll5_clk);
211 diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
212 index 90f3f472ae1c..13c37f48d9d6 100644
213 --- a/drivers/clk/ti/clock.h
214 +++ b/drivers/clk/ti/clock.h
215 @@ -257,11 +257,20 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
216 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
217 unsigned long parent_rate);
218
219 +/*
220 + * OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
221 + * that are sourced by DPLL5, and both of these require this clock
222 + * to be at 120 MHz for proper operation.
223 + */
224 +#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
225 +
226 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
227 int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
228 unsigned long parent_rate);
229 int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
230 unsigned long parent_rate, u8 index);
231 +int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
232 + unsigned long parent_rate);
233 void omap3_clk_lock_dpll5(void);
234
235 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
236 diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
237 index 5519b386edc0..f9a5089ddc79 100644
238 --- a/drivers/clk/ti/dpll.c
239 +++ b/drivers/clk/ti/dpll.c
240 @@ -114,6 +114,18 @@ static const struct clk_ops omap3_dpll_ck_ops = {
241 .round_rate = &omap2_dpll_round_rate,
242 };
243
244 +static const struct clk_ops omap3_dpll5_ck_ops = {
245 + .enable = &omap3_noncore_dpll_enable,
246 + .disable = &omap3_noncore_dpll_disable,
247 + .get_parent = &omap2_init_dpll_parent,
248 + .recalc_rate = &omap3_dpll_recalc,
249 + .set_rate = &omap3_dpll5_set_rate,
250 + .set_parent = &omap3_noncore_dpll_set_parent,
251 + .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
252 + .determine_rate = &omap3_noncore_dpll_determine_rate,
253 + .round_rate = &omap2_dpll_round_rate,
254 +};
255 +
256 static const struct clk_ops omap3_dpll_per_ck_ops = {
257 .enable = &omap3_noncore_dpll_enable,
258 .disable = &omap3_noncore_dpll_disable,
259 @@ -461,7 +473,12 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
260 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
261 };
262
263 - of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
264 + if ((of_machine_is_compatible("ti,omap3630") ||
265 + of_machine_is_compatible("ti,omap36xx")) &&
266 + !strcmp(node->name, "dpll5_ck"))
267 + of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
268 + else
269 + of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
270 }
271 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
272 of_ti_omap3_dpll_setup);
273 diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
274 index f4dec00fb684..0e9119fae760 100644
275 --- a/drivers/clk/ti/dpll3xxx.c
276 +++ b/drivers/clk/ti/dpll3xxx.c
277 @@ -815,3 +815,70 @@ int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
278 return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
279 index);
280 }
281 +
282 +/* Apply DM3730 errata sprz319 advisory 2.1. */
283 +static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
284 + unsigned long parent_rate)
285 +{
286 + struct omap3_dpll5_settings {
287 + unsigned int rate, m, n;
288 + };
289 +
290 + static const struct omap3_dpll5_settings precomputed[] = {
291 + /*
292 + * From DM3730 errata advisory 2.1, table 35 and 36.
293 + * The N value is increased by 1 compared to the tables as the
294 + * errata lists register values while last_rounded_field is the
295 + * real divider value.
296 + */
297 + { 12000000, 80, 0 + 1 },
298 + { 13000000, 443, 5 + 1 },
299 + { 19200000, 50, 0 + 1 },
300 + { 26000000, 443, 11 + 1 },
301 + { 38400000, 25, 0 + 1 }
302 + };
303 +
304 + const struct omap3_dpll5_settings *d;
305 + struct clk_hw_omap *clk = to_clk_hw_omap(hw);
306 + struct dpll_data *dd;
307 + unsigned int i;
308 +
309 + for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
310 + if (parent_rate == precomputed[i].rate)
311 + break;
312 + }
313 +
314 + if (i == ARRAY_SIZE(precomputed))
315 + return false;
316 +
317 + d = &precomputed[i];
318 +
319 + /* Update the M, N and rounded rate values and program the DPLL. */
320 + dd = clk->dpll_data;
321 + dd->last_rounded_m = d->m;
322 + dd->last_rounded_n = d->n;
323 + dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
324 + omap3_noncore_dpll_program(clk, 0);
325 +
326 + return true;
327 +}
328 +
329 +/**
330 + * omap3_dpll5_set_rate - set rate for omap3 dpll5
331 + * @hw: clock to change
332 + * @rate: target rate for clock
333 + * @parent_rate: rate of the parent clock
334 + *
335 + * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
336 + * the DPLL is used for USB host (detected through the requested rate).
337 + */
338 +int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
339 + unsigned long parent_rate)
340 +{
341 + if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
342 + if (omap3_dpll5_apply_errata(hw, parent_rate))
343 + return 0;
344 + }
345 +
346 + return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
347 +}
348 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
349 index 2cde3796cb82..f3307fc38e79 100644
350 --- a/drivers/crypto/caam/caamalg.c
351 +++ b/drivers/crypto/caam/caamalg.c
352 @@ -702,7 +702,9 @@ copy_iv:
353
354 /* Will read cryptlen */
355 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
356 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
357 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
358 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
359 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
360
361 /* Write ICV */
362 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
363 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
364 index 5cac11d7a876..de628883ee3d 100644
365 --- a/drivers/md/dm-crypt.c
366 +++ b/drivers/md/dm-crypt.c
367 @@ -1500,12 +1500,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
368 if (!cc->key_size && strcmp(key, "-"))
369 goto out;
370
371 + /* clear the flag since following operations may invalidate previously valid key */
372 + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
373 +
374 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
375 goto out;
376
377 - set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
378 -
379 r = crypt_setkey_allcpus(cc);
380 + if (!r)
381 + set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
382
383 out:
384 /* Hex key string not needed after here, so wipe it. */
385 diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
386 index 8e9e928dafba..78f403b45ab3 100644
387 --- a/drivers/md/dm-flakey.c
388 +++ b/drivers/md/dm-flakey.c
389 @@ -200,11 +200,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
390
391 if (!(fc->up_interval + fc->down_interval)) {
392 ti->error = "Total (up + down) interval is zero";
393 + r = -EINVAL;
394 goto bad;
395 }
396
397 if (fc->up_interval + fc->down_interval < fc->up_interval) {
398 ti->error = "Interval overflow";
399 + r = -EINVAL;
400 goto bad;
401 }
402
403 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
404 index 7e44005595c1..20557e2c60c6 100644
405 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
406 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
407 @@ -775,17 +775,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
408 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
409
410 r = sm_ll_new_metadata(&smm->ll, tm);
411 + if (!r) {
412 + if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
413 + nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
414 + r = sm_ll_extend(&smm->ll, nr_blocks);
415 + }
416 + memcpy(&smm->sm, &ops, sizeof(smm->sm));
417 if (r)
418 return r;
419
420 - if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
421 - nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
422 - r = sm_ll_extend(&smm->ll, nr_blocks);
423 - if (r)
424 - return r;
425 -
426 - memcpy(&smm->sm, &ops, sizeof(smm->sm));
427 -
428 /*
429 * Now we need to update the newly created data structures with the
430 * allocated blocks that they were built from.
431 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
432 index 174e06ec7c2f..e5bb870b5461 100644
433 --- a/drivers/net/ppp/ppp_generic.c
434 +++ b/drivers/net/ppp/ppp_generic.c
435 @@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
436 spin_lock_bh(&pn->all_channels_lock);
437 list_del(&pch->list);
438 spin_unlock_bh(&pn->all_channels_lock);
439 - put_net(pch->chan_net);
440 - pch->chan_net = NULL;
441
442 pch->file.dead = 1;
443 wake_up_interruptible(&pch->file.rwait);
444 @@ -2984,6 +2982,9 @@ ppp_disconnect_channel(struct channel *pch)
445 */
446 static void ppp_destroy_channel(struct channel *pch)
447 {
448 + put_net(pch->chan_net);
449 + pch->chan_net = NULL;
450 +
451 atomic_dec(&channel_count);
452
453 if (!pch->file.dead) {
454 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
455 index 4d77745f439f..96849e2e7435 100644
456 --- a/drivers/usb/class/cdc-acm.c
457 +++ b/drivers/usb/class/cdc-acm.c
458 @@ -1708,6 +1708,7 @@ static const struct usb_device_id acm_ids[] = {
459 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
460 .driver_info = QUIRK_CONTROL_LINE_STATE, },
461 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
462 + { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
463 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
464 },
465 /* Motorola H24 HSPA module: */
466 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
467 index bcc1e1b729ad..496d6a558793 100644
468 --- a/drivers/usb/core/hub.c
469 +++ b/drivers/usb/core/hub.c
470 @@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
471
472 static void hub_release(struct kref *kref);
473 static int usb_reset_and_verify_device(struct usb_device *udev);
474 +static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
475 + struct usb_port *port_dev);
476
477 static inline char *portspeed(struct usb_hub *hub, int portstatus)
478 {
479 @@ -883,82 +885,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
480 }
481
482 /*
483 - * If USB 3.0 ports are placed into the Disabled state, they will no longer
484 - * detect any device connects or disconnects. This is generally not what the
485 - * USB core wants, since it expects a disabled port to produce a port status
486 - * change event when a new device connects.
487 - *
488 - * Instead, set the link state to Disabled, wait for the link to settle into
489 - * that state, clear any change bits, and then put the port into the RxDetect
490 - * state.
491 + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
492 + * a connection with a plugged-in cable but will signal the host when the cable
493 + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
494 */
495 -static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
496 -{
497 - int ret;
498 - int total_time;
499 - u16 portchange, portstatus;
500 -
501 - if (!hub_is_superspeed(hub->hdev))
502 - return -EINVAL;
503 -
504 - ret = hub_port_status(hub, port1, &portstatus, &portchange);
505 - if (ret < 0)
506 - return ret;
507 -
508 - /*
509 - * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
510 - * Controller [1022:7814] will have spurious result making the following
511 - * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
512 - * as high-speed device if we set the usb 3.0 port link state to
513 - * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
514 - * check the state here to avoid the bug.
515 - */
516 - if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
517 - USB_SS_PORT_LS_RX_DETECT) {
518 - dev_dbg(&hub->ports[port1 - 1]->dev,
519 - "Not disabling port; link state is RxDetect\n");
520 - return ret;
521 - }
522 -
523 - ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
524 - if (ret)
525 - return ret;
526 -
527 - /* Wait for the link to enter the disabled state. */
528 - for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
529 - ret = hub_port_status(hub, port1, &portstatus, &portchange);
530 - if (ret < 0)
531 - return ret;
532 -
533 - if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
534 - USB_SS_PORT_LS_SS_DISABLED)
535 - break;
536 - if (total_time >= HUB_DEBOUNCE_TIMEOUT)
537 - break;
538 - msleep(HUB_DEBOUNCE_STEP);
539 - }
540 - if (total_time >= HUB_DEBOUNCE_TIMEOUT)
541 - dev_warn(&hub->ports[port1 - 1]->dev,
542 - "Could not disable after %d ms\n", total_time);
543 -
544 - return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
545 -}
546 -
547 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
548 {
549 struct usb_port *port_dev = hub->ports[port1 - 1];
550 struct usb_device *hdev = hub->hdev;
551 int ret = 0;
552
553 - if (port_dev->child && set_state)
554 - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
555 if (!hub->error) {
556 - if (hub_is_superspeed(hub->hdev))
557 - ret = hub_usb3_port_disable(hub, port1);
558 - else
559 + if (hub_is_superspeed(hub->hdev)) {
560 + hub_usb3_port_prepare_disable(hub, port_dev);
561 + ret = hub_set_port_link_state(hub, port_dev->portnum,
562 + USB_SS_PORT_LS_U3);
563 + } else {
564 ret = usb_clear_port_feature(hdev, port1,
565 USB_PORT_FEAT_ENABLE);
566 + }
567 }
568 + if (port_dev->child && set_state)
569 + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
570 if (ret && ret != -ENODEV)
571 dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
572 return ret;
573 @@ -4073,6 +4021,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
574 }
575 EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
576
577 +/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
578 +static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
579 + struct usb_port *port_dev)
580 +{
581 + struct usb_device *udev = port_dev->child;
582 + int ret;
583 +
584 + if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
585 + ret = hub_set_port_link_state(hub, port_dev->portnum,
586 + USB_SS_PORT_LS_U0);
587 + if (!ret) {
588 + msleep(USB_RESUME_TIMEOUT);
589 + ret = usb_disable_remote_wakeup(udev);
590 + }
591 + if (ret)
592 + dev_warn(&udev->dev,
593 + "Port disable: can't disable remote wake\n");
594 + udev->do_remote_wakeup = 0;
595 + }
596 +}
597
598 #else /* CONFIG_PM */
599
600 @@ -4080,6 +4048,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
601 #define hub_resume NULL
602 #define hub_reset_resume NULL
603
604 +static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
605 + struct usb_port *port_dev) { }
606 +
607 int usb_disable_lpm(struct usb_device *udev)
608 {
609 return 0;
610 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
611 index 8b14c2a13ac5..739b5e2d8adb 100644
612 --- a/drivers/usb/gadget/composite.c
613 +++ b/drivers/usb/gadget/composite.c
614 @@ -144,11 +144,16 @@ int config_ep_by_speed(struct usb_gadget *g,
615
616 ep_found:
617 /* commit results */
618 - _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
619 + _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
620 _ep->desc = chosen_desc;
621 _ep->comp_desc = NULL;
622 _ep->maxburst = 0;
623 - _ep->mult = 0;
624 + _ep->mult = 1;
625 +
626 + if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
627 + usb_endpoint_xfer_int(_ep->desc)))
628 + _ep->mult = usb_endpoint_maxp(_ep->desc) & 0x7ff;
629 +
630 if (!want_comp_desc)
631 return 0;
632
633 @@ -165,7 +170,7 @@ ep_found:
634 switch (usb_endpoint_type(_ep->desc)) {
635 case USB_ENDPOINT_XFER_ISOC:
636 /* mult: bits 1:0 of bmAttributes */
637 - _ep->mult = comp_desc->bmAttributes & 0x3;
638 + _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
639 case USB_ENDPOINT_XFER_BULK:
640 case USB_ENDPOINT_XFER_INT:
641 _ep->maxburst = comp_desc->bMaxBurst + 1;
642 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
643 index 12628dd36e55..12064d3bddf6 100644
644 --- a/drivers/usb/gadget/function/f_uac2.c
645 +++ b/drivers/usb/gadget/function/f_uac2.c
646 @@ -1079,13 +1079,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
647 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
648 if (!agdev->out_ep) {
649 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
650 - goto err;
651 + return ret;
652 }
653
654 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
655 if (!agdev->in_ep) {
656 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
657 - goto err;
658 + return ret;
659 }
660
661 uac2->p_prm.uac2 = uac2;
662 @@ -1102,7 +1102,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
663
664 ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
665 if (ret)
666 - goto err;
667 + return ret;
668
669 prm = &agdev->uac2.c_prm;
670 prm->max_psize = hs_epout_desc.wMaxPacketSize;
671 @@ -1117,19 +1117,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
672 prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
673 if (!prm->rbuf) {
674 prm->max_psize = 0;
675 - goto err_free_descs;
676 + goto err;
677 }
678
679 ret = alsa_uac2_init(agdev);
680 if (ret)
681 - goto err_free_descs;
682 + goto err;
683 return 0;
684
685 -err_free_descs:
686 - usb_free_all_descriptors(fn);
687 err:
688 kfree(agdev->uac2.p_prm.rbuf);
689 kfree(agdev->uac2.c_prm.rbuf);
690 +err_free_descs:
691 + usb_free_all_descriptors(fn);
692 return -EINVAL;
693 }
694
695 diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
696 index 3d0d5d94a62f..0f01c04d7cbd 100644
697 --- a/drivers/usb/gadget/function/uvc_video.c
698 +++ b/drivers/usb/gadget/function/uvc_video.c
699 @@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
700
701 req_size = video->ep->maxpacket
702 * max_t(unsigned int, video->ep->maxburst, 1)
703 - * (video->ep->mult + 1);
704 + * (video->ep->mult);
705
706 for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
707 video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
708 diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
709 index 940304c33224..02260cfdedb1 100644
710 --- a/drivers/usb/host/uhci-pci.c
711 +++ b/drivers/usb/host/uhci-pci.c
712 @@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
713 if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
714 uhci->wait_for_hp = 1;
715
716 + /* Intel controllers use non-PME wakeup signalling */
717 + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
718 + device_set_run_wake(uhci_dev(uhci), 1);
719 +
720 /* Set up pointers to PCI-specific functions */
721 uhci->reset_hc = uhci_pci_reset_hc;
722 uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
723 diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
724 index e020ad28a00c..53c90131764d 100644
725 --- a/drivers/usb/serial/kl5kusb105.c
726 +++ b/drivers/usb/serial/kl5kusb105.c
727 @@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
728 rc = usb_serial_generic_open(tty, port);
729 if (rc) {
730 retval = rc;
731 - goto exit;
732 + goto err_free_cfg;
733 }
734
735 rc = usb_control_msg(port->serial->dev,
736 @@ -315,17 +315,32 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
737 dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
738
739 rc = klsi_105_get_line_state(port, &line_state);
740 - if (rc >= 0) {
741 - spin_lock_irqsave(&priv->lock, flags);
742 - priv->line_state = line_state;
743 - spin_unlock_irqrestore(&priv->lock, flags);
744 - dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
745 - retval = 0;
746 - } else
747 + if (rc < 0) {
748 retval = rc;
749 + goto err_disable_read;
750 + }
751 +
752 + spin_lock_irqsave(&priv->lock, flags);
753 + priv->line_state = line_state;
754 + spin_unlock_irqrestore(&priv->lock, flags);
755 + dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
756 + line_state);
757 +
758 + return 0;
759
760 -exit:
761 +err_disable_read:
762 + usb_control_msg(port->serial->dev,
763 + usb_sndctrlpipe(port->serial->dev, 0),
764 + KL5KUSB105A_SIO_CONFIGURE,
765 + USB_TYPE_VENDOR | USB_DIR_OUT,
766 + KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
767 + 0, /* index */
768 + NULL, 0,
769 + KLSI_TIMEOUT);
770 + usb_serial_generic_close(port);
771 +err_free_cfg:
772 kfree(cfg);
773 +
774 return retval;
775 }
776
777 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
778 index 9894e341c6ac..7ce31a4c7e7f 100644
779 --- a/drivers/usb/serial/option.c
780 +++ b/drivers/usb/serial/option.c
781 @@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
782 #define TELIT_PRODUCT_CC864_SINGLE 0x1006
783 #define TELIT_PRODUCT_DE910_DUAL 0x1010
784 #define TELIT_PRODUCT_UE910_V2 0x1012
785 +#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
786 +#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
787 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
788 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
789 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
790 @@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
791 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
792 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
793 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
794 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
795 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
796 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
797 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
798 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
799 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
800 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
801 @@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
802 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
803 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
804 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
805 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
806 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
807 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
808 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
809 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
810 index 1be5dd048622..308600adf6e0 100644
811 --- a/drivers/xen/gntdev.c
812 +++ b/drivers/xen/gntdev.c
813 @@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
814
815 vma->vm_ops = &gntdev_vmops;
816
817 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
818 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
819
820 if (use_ptemod)
821 vma->vm_flags |= VM_DONTCOPY;
822 diff --git a/fs/block_dev.c b/fs/block_dev.c
823 index 44d4a1e9244e..f10dbac851a1 100644
824 --- a/fs/block_dev.c
825 +++ b/fs/block_dev.c
826 @@ -759,7 +759,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
827 return true; /* already a holder */
828 else if (bdev->bd_holder != NULL)
829 return false; /* held by someone else */
830 - else if (bdev->bd_contains == bdev)
831 + else if (whole == bdev)
832 return true; /* is a whole device which isn't held */
833
834 else if (whole->bd_holder == bd_may_claim)
835 diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
836 index 9aba42b78253..a09264d8b853 100644
837 --- a/fs/btrfs/async-thread.c
838 +++ b/fs/btrfs/async-thread.c
839 @@ -70,6 +70,20 @@ void btrfs_##name(struct work_struct *arg) \
840 normal_work_helper(work); \
841 }
842
843 +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
844 +{
845 + /*
846 + * We could compare wq->normal->pending with num_online_cpus()
847 + * to support "thresh == NO_THRESHOLD" case, but it requires
848 + * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
849 + * postpone it until someone needs the support of that case.
850 + */
851 + if (wq->normal->thresh == NO_THRESHOLD)
852 + return false;
853 +
854 + return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
855 +}
856 +
857 BTRFS_WORK_HELPER(worker_helper);
858 BTRFS_WORK_HELPER(delalloc_helper);
859 BTRFS_WORK_HELPER(flush_delalloc_helper);
860 diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
861 index ad4d0647d1a6..8e1d6576d764 100644
862 --- a/fs/btrfs/async-thread.h
863 +++ b/fs/btrfs/async-thread.h
864 @@ -80,4 +80,5 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
865 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
866 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
867 void btrfs_set_work_high_priority(struct btrfs_work *work);
868 +bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
869 #endif
870 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
871 index 1391f72c28c3..e847573c6db0 100644
872 --- a/fs/btrfs/ctree.h
873 +++ b/fs/btrfs/ctree.h
874 @@ -3070,6 +3070,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
875 cpu->target = le64_to_cpu(disk->target);
876 cpu->flags = le64_to_cpu(disk->flags);
877 cpu->limit = le64_to_cpu(disk->limit);
878 + cpu->stripes_min = le32_to_cpu(disk->stripes_min);
879 + cpu->stripes_max = le32_to_cpu(disk->stripes_max);
880 }
881
882 static inline void
883 @@ -3088,6 +3090,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
884 disk->target = cpu_to_le64(cpu->target);
885 disk->flags = cpu_to_le64(cpu->flags);
886 disk->limit = cpu_to_le64(cpu->limit);
887 + disk->stripes_min = cpu_to_le32(cpu->stripes_min);
888 + disk->stripes_max = cpu_to_le32(cpu->stripes_max);
889 }
890
891 /* struct btrfs_super_block */
892 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
893 index 02b934d0ee65..09fa5af9782e 100644
894 --- a/fs/btrfs/delayed-inode.c
895 +++ b/fs/btrfs/delayed-inode.c
896 @@ -1375,7 +1375,8 @@ release_path:
897 total_done++;
898
899 btrfs_release_prepared_delayed_node(delayed_node);
900 - if (async_work->nr == 0 || total_done < async_work->nr)
901 + if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
902 + total_done < async_work->nr)
903 goto again;
904
905 free_path:
906 @@ -1391,7 +1392,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
907 {
908 struct btrfs_async_delayed_work *async_work;
909
910 - if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
911 + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
912 + btrfs_workqueue_normal_congested(fs_info->delayed_workers))
913 return 0;
914
915 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
916 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
917 index 47cdc6f3390b..2af08c3de775 100644
918 --- a/fs/btrfs/extent-tree.c
919 +++ b/fs/btrfs/extent-tree.c
920 @@ -8486,14 +8486,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
921 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
922 &wc->refs[level - 1],
923 &wc->flags[level - 1]);
924 - if (ret < 0) {
925 - btrfs_tree_unlock(next);
926 - return ret;
927 - }
928 + if (ret < 0)
929 + goto out_unlock;
930
931 if (unlikely(wc->refs[level - 1] == 0)) {
932 btrfs_err(root->fs_info, "Missing references.");
933 - BUG();
934 + ret = -EIO;
935 + goto out_unlock;
936 }
937 *lookup_info = 0;
938
939 @@ -8545,7 +8544,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
940 }
941
942 level--;
943 - BUG_ON(level != btrfs_header_level(next));
944 + ASSERT(level == btrfs_header_level(next));
945 + if (level != btrfs_header_level(next)) {
946 + btrfs_err(root->fs_info, "mismatched level");
947 + ret = -EIO;
948 + goto out_unlock;
949 + }
950 path->nodes[level] = next;
951 path->slots[level] = 0;
952 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
953 @@ -8560,8 +8564,15 @@ skip:
954 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
955 parent = path->nodes[level]->start;
956 } else {
957 - BUG_ON(root->root_key.objectid !=
958 + ASSERT(root->root_key.objectid ==
959 btrfs_header_owner(path->nodes[level]));
960 + if (root->root_key.objectid !=
961 + btrfs_header_owner(path->nodes[level])) {
962 + btrfs_err(root->fs_info,
963 + "mismatched block owner");
964 + ret = -EIO;
965 + goto out_unlock;
966 + }
967 parent = 0;
968 }
969
970 @@ -8578,12 +8589,18 @@ skip:
971 }
972 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
973 root->root_key.objectid, level - 1, 0);
974 - BUG_ON(ret); /* -ENOMEM */
975 + if (ret)
976 + goto out_unlock;
977 }
978 +
979 + *lookup_info = 1;
980 + ret = 1;
981 +
982 +out_unlock:
983 btrfs_tree_unlock(next);
984 free_extent_buffer(next);
985 - *lookup_info = 1;
986 - return 1;
987 +
988 + return ret;
989 }
990
991 /*
992 @@ -9686,6 +9703,11 @@ int btrfs_read_block_groups(struct btrfs_root *root)
993 struct extent_buffer *leaf;
994 int need_clear = 0;
995 u64 cache_gen;
996 + u64 feature;
997 + int mixed;
998 +
999 + feature = btrfs_super_incompat_flags(info->super_copy);
1000 + mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
1001
1002 root = info->extent_root;
1003 key.objectid = 0;
1004 @@ -9739,6 +9761,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
1005 btrfs_item_ptr_offset(leaf, path->slots[0]),
1006 sizeof(cache->item));
1007 cache->flags = btrfs_block_group_flags(&cache->item);
1008 + if (!mixed &&
1009 + ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1010 + (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1011 + btrfs_err(info,
1012 +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1013 + cache->key.objectid);
1014 + ret = -EINVAL;
1015 + goto error;
1016 + }
1017
1018 key.objectid = found_key.objectid + found_key.offset;
1019 btrfs_release_path(path);
1020 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
1021 index 257bbdcb5df6..e767f347f2b1 100644
1022 --- a/fs/btrfs/extent_io.c
1023 +++ b/fs/btrfs/extent_io.c
1024 @@ -5294,11 +5294,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
1025 lock_page(page);
1026 }
1027 locked_pages++;
1028 + }
1029 + /*
1030 + * We need to firstly lock all pages to make sure that
1031 + * the uptodate bit of our pages won't be affected by
1032 + * clear_extent_buffer_uptodate().
1033 + */
1034 + for (i = start_i; i < num_pages; i++) {
1035 + page = eb->pages[i];
1036 if (!PageUptodate(page)) {
1037 num_reads++;
1038 all_uptodate = 0;
1039 }
1040 }
1041 +
1042 if (all_uptodate) {
1043 if (start_i == 0)
1044 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1045 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1046 index a7e18dbadf74..317b99acdf4b 100644
1047 --- a/fs/btrfs/ioctl.c
1048 +++ b/fs/btrfs/ioctl.c
1049 @@ -3825,6 +3825,11 @@ process_slot:
1050 }
1051 btrfs_release_path(path);
1052 key.offset = next_key_min_offset;
1053 +
1054 + if (fatal_signal_pending(current)) {
1055 + ret = -EINTR;
1056 + goto out;
1057 + }
1058 }
1059 ret = 0;
1060
1061 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
1062 index bcc965ed5fa1..88d9b66e2207 100644
1063 --- a/fs/btrfs/qgroup.c
1064 +++ b/fs/btrfs/qgroup.c
1065 @@ -2283,10 +2283,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1066 int err = -ENOMEM;
1067 int ret = 0;
1068
1069 - mutex_lock(&fs_info->qgroup_rescan_lock);
1070 - fs_info->qgroup_rescan_running = true;
1071 - mutex_unlock(&fs_info->qgroup_rescan_lock);
1072 -
1073 path = btrfs_alloc_path();
1074 if (!path)
1075 goto out;
1076 @@ -2397,6 +2393,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
1077 sizeof(fs_info->qgroup_rescan_progress));
1078 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
1079 init_completion(&fs_info->qgroup_rescan_completion);
1080 + fs_info->qgroup_rescan_running = true;
1081
1082 spin_unlock(&fs_info->qgroup_lock);
1083 mutex_unlock(&fs_info->qgroup_rescan_lock);
1084 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1085 index b4ca5454ef1a..8ca9aa92972d 100644
1086 --- a/fs/btrfs/relocation.c
1087 +++ b/fs/btrfs/relocation.c
1088 @@ -921,9 +921,16 @@ again:
1089 path2->slots[level]--;
1090
1091 eb = path2->nodes[level];
1092 - WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
1093 - cur->bytenr);
1094 -
1095 + if (btrfs_node_blockptr(eb, path2->slots[level]) !=
1096 + cur->bytenr) {
1097 + btrfs_err(root->fs_info,
1098 + "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
1099 + cur->bytenr, level - 1, root->objectid,
1100 + node_key->objectid, node_key->type,
1101 + node_key->offset);
1102 + err = -ENOENT;
1103 + goto out;
1104 + }
1105 lower = cur;
1106 need_check = true;
1107 for (; level < BTRFS_MAX_LEVEL; level++) {
1108 @@ -2343,6 +2350,10 @@ void free_reloc_roots(struct list_head *list)
1109 while (!list_empty(list)) {
1110 reloc_root = list_entry(list->next, struct btrfs_root,
1111 root_list);
1112 + free_extent_buffer(reloc_root->node);
1113 + free_extent_buffer(reloc_root->commit_root);
1114 + reloc_root->node = NULL;
1115 + reloc_root->commit_root = NULL;
1116 __del_reloc_root(reloc_root);
1117 }
1118 }
1119 @@ -2676,11 +2687,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
1120
1121 if (!upper->eb) {
1122 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
1123 - if (ret < 0) {
1124 - err = ret;
1125 + if (ret) {
1126 + if (ret < 0)
1127 + err = ret;
1128 + else
1129 + err = -ENOENT;
1130 +
1131 + btrfs_release_path(path);
1132 break;
1133 }
1134 - BUG_ON(ret > 0);
1135
1136 if (!upper->eb) {
1137 upper->eb = path->nodes[upper->level];
1138 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1139 index f7441193bf35..ee7832e2d39d 100644
1140 --- a/fs/btrfs/tree-log.c
1141 +++ b/fs/btrfs/tree-log.c
1142 @@ -1923,12 +1923,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
1143 next:
1144 /* check the next slot in the tree to see if it is a valid item */
1145 nritems = btrfs_header_nritems(path->nodes[0]);
1146 + path->slots[0]++;
1147 if (path->slots[0] >= nritems) {
1148 ret = btrfs_next_leaf(root, path);
1149 if (ret)
1150 goto out;
1151 - } else {
1152 - path->slots[0]++;
1153 }
1154
1155 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1156 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1157 index c669a1471395..b76883606e4b 100644
1158 --- a/fs/cifs/cifsglob.h
1159 +++ b/fs/cifs/cifsglob.h
1160 @@ -627,6 +627,8 @@ struct TCP_Server_Info {
1161 #ifdef CONFIG_CIFS_SMB2
1162 unsigned int max_read;
1163 unsigned int max_write;
1164 + struct delayed_work reconnect; /* reconnect workqueue job */
1165 + struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
1166 #endif /* CONFIG_CIFS_SMB2 */
1167 };
1168
1169 @@ -826,6 +828,7 @@ cap_unix(struct cifs_ses *ses)
1170 struct cifs_tcon {
1171 struct list_head tcon_list;
1172 int tc_count;
1173 + struct list_head rlist; /* reconnect list */
1174 struct list_head openFileList;
1175 spinlock_t open_file_lock; /* protects list above */
1176 struct cifs_ses *ses; /* pointer to session associated with */
1177 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
1178 index c63fd1dde25b..54590fd33df1 100644
1179 --- a/fs/cifs/cifsproto.h
1180 +++ b/fs/cifs/cifsproto.h
1181 @@ -205,6 +205,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
1182 struct tcon_link *tlink,
1183 struct cifs_pending_open *open);
1184 extern void cifs_del_pending_open(struct cifs_pending_open *open);
1185 +extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
1186 + int from_reconnect);
1187 +extern void cifs_put_tcon(struct cifs_tcon *tcon);
1188
1189 #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
1190 extern void cifs_dfs_release_automount_timer(void);
1191 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1192 index 812a8cb07c63..5d59f25521ce 100644
1193 --- a/fs/cifs/connect.c
1194 +++ b/fs/cifs/connect.c
1195 @@ -52,6 +52,9 @@
1196 #include "nterr.h"
1197 #include "rfc1002pdu.h"
1198 #include "fscache.h"
1199 +#ifdef CONFIG_CIFS_SMB2
1200 +#include "smb2proto.h"
1201 +#endif
1202
1203 #define CIFS_PORT 445
1204 #define RFC1001_PORT 139
1205 @@ -2113,8 +2116,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
1206 return NULL;
1207 }
1208
1209 -static void
1210 -cifs_put_tcp_session(struct TCP_Server_Info *server)
1211 +void
1212 +cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1213 {
1214 struct task_struct *task;
1215
1216 @@ -2131,6 +2134,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
1217
1218 cancel_delayed_work_sync(&server->echo);
1219
1220 +#ifdef CONFIG_CIFS_SMB2
1221 + if (from_reconnect)
1222 + /*
1223 + * Avoid deadlock here: reconnect work calls
1224 + * cifs_put_tcp_session() at its end. Need to be sure
1225 + * that reconnect work does nothing with server pointer after
1226 + * that step.
1227 + */
1228 + cancel_delayed_work(&server->reconnect);
1229 + else
1230 + cancel_delayed_work_sync(&server->reconnect);
1231 +#endif
1232 +
1233 spin_lock(&GlobalMid_Lock);
1234 server->tcpStatus = CifsExiting;
1235 spin_unlock(&GlobalMid_Lock);
1236 @@ -2195,6 +2211,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1237 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1238 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1239 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1240 +#ifdef CONFIG_CIFS_SMB2
1241 + INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1242 + mutex_init(&tcp_ses->reconnect_mutex);
1243 +#endif
1244 memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
1245 sizeof(tcp_ses->srcaddr));
1246 memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
1247 @@ -2347,7 +2367,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
1248 spin_unlock(&cifs_tcp_ses_lock);
1249
1250 sesInfoFree(ses);
1251 - cifs_put_tcp_session(server);
1252 + cifs_put_tcp_session(server, 0);
1253 }
1254
1255 #ifdef CONFIG_KEYS
1256 @@ -2521,7 +2541,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1257 mutex_unlock(&ses->session_mutex);
1258
1259 /* existing SMB ses has a server reference already */
1260 - cifs_put_tcp_session(server);
1261 + cifs_put_tcp_session(server, 0);
1262 free_xid(xid);
1263 return ses;
1264 }
1265 @@ -2611,7 +2631,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
1266 return NULL;
1267 }
1268
1269 -static void
1270 +void
1271 cifs_put_tcon(struct cifs_tcon *tcon)
1272 {
1273 unsigned int xid;
1274 @@ -3767,7 +3787,7 @@ mount_fail_check:
1275 else if (ses)
1276 cifs_put_smb_ses(ses);
1277 else
1278 - cifs_put_tcp_session(server);
1279 + cifs_put_tcp_session(server, 0);
1280 bdi_destroy(&cifs_sb->bdi);
1281 }
1282
1283 @@ -4078,7 +4098,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
1284 ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
1285 if (IS_ERR(ses)) {
1286 tcon = (struct cifs_tcon *)ses;
1287 - cifs_put_tcp_session(master_tcon->ses->server);
1288 + cifs_put_tcp_session(master_tcon->ses->server, 0);
1289 goto out;
1290 }
1291
1292 diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
1293 index f9e766f464be..b2aff0c6f22c 100644
1294 --- a/fs/cifs/smb2file.c
1295 +++ b/fs/cifs/smb2file.c
1296 @@ -260,7 +260,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
1297 * and check it for zero before using.
1298 */
1299 max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
1300 - if (!max_buf) {
1301 + if (max_buf < sizeof(struct smb2_lock_element)) {
1302 free_xid(xid);
1303 return -EINVAL;
1304 }
1305 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1306 index 0dbbdf5e4aee..2fa754c5fd62 100644
1307 --- a/fs/cifs/smb2pdu.c
1308 +++ b/fs/cifs/smb2pdu.c
1309 @@ -278,7 +278,7 @@ out:
1310 case SMB2_CHANGE_NOTIFY:
1311 case SMB2_QUERY_INFO:
1312 case SMB2_SET_INFO:
1313 - return -EAGAIN;
1314 + rc = -EAGAIN;
1315 }
1316 unload_nls(nls_codepage);
1317 return rc;
1318 @@ -1822,6 +1822,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
1319 add_credits(server, credits_received, CIFS_ECHO_OP);
1320 }
1321
1322 +void smb2_reconnect_server(struct work_struct *work)
1323 +{
1324 + struct TCP_Server_Info *server = container_of(work,
1325 + struct TCP_Server_Info, reconnect.work);
1326 + struct cifs_ses *ses;
1327 + struct cifs_tcon *tcon, *tcon2;
1328 + struct list_head tmp_list;
1329 + int tcon_exist = false;
1330 +
1331 + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
1332 + mutex_lock(&server->reconnect_mutex);
1333 +
1334 + INIT_LIST_HEAD(&tmp_list);
1335 + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
1336 +
1337 + spin_lock(&cifs_tcp_ses_lock);
1338 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1339 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1340 + if (tcon->need_reconnect) {
1341 + tcon->tc_count++;
1342 + list_add_tail(&tcon->rlist, &tmp_list);
1343 + tcon_exist = true;
1344 + }
1345 + }
1346 + }
1347 + /*
1348 + * Get the reference to server struct to be sure that the last call of
1349 + * cifs_put_tcon() in the loop below won't release the server pointer.
1350 + */
1351 + if (tcon_exist)
1352 + server->srv_count++;
1353 +
1354 + spin_unlock(&cifs_tcp_ses_lock);
1355 +
1356 + list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
1357 + smb2_reconnect(SMB2_ECHO, tcon);
1358 + list_del_init(&tcon->rlist);
1359 + cifs_put_tcon(tcon);
1360 + }
1361 +
1362 + cifs_dbg(FYI, "Reconnecting tcons finished\n");
1363 + mutex_unlock(&server->reconnect_mutex);
1364 +
1365 + /* now we can safely release srv struct */
1366 + if (tcon_exist)
1367 + cifs_put_tcp_session(server, 1);
1368 +}
1369 +
1370 int
1371 SMB2_echo(struct TCP_Server_Info *server)
1372 {
1373 @@ -1834,32 +1882,11 @@ SMB2_echo(struct TCP_Server_Info *server)
1374 cifs_dbg(FYI, "In echo request\n");
1375
1376 if (server->tcpStatus == CifsNeedNegotiate) {
1377 - struct list_head *tmp, *tmp2;
1378 - struct cifs_ses *ses;
1379 - struct cifs_tcon *tcon;
1380 -
1381 - cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
1382 - spin_lock(&cifs_tcp_ses_lock);
1383 - list_for_each(tmp, &server->smb_ses_list) {
1384 - ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
1385 - list_for_each(tmp2, &ses->tcon_list) {
1386 - tcon = list_entry(tmp2, struct cifs_tcon,
1387 - tcon_list);
1388 - /* add check for persistent handle reconnect */
1389 - if (tcon && tcon->need_reconnect) {
1390 - spin_unlock(&cifs_tcp_ses_lock);
1391 - rc = smb2_reconnect(SMB2_ECHO, tcon);
1392 - spin_lock(&cifs_tcp_ses_lock);
1393 - }
1394 - }
1395 - }
1396 - spin_unlock(&cifs_tcp_ses_lock);
1397 + /* No need to send echo on newly established connections */
1398 + queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
1399 + return rc;
1400 }
1401
1402 - /* if no session, renegotiate failed above */
1403 - if (server->tcpStatus == CifsNeedNegotiate)
1404 - return -EIO;
1405 -
1406 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1407 if (rc)
1408 return rc;
1409 diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
1410 index 9bc59f9c12fb..0a406ae78129 100644
1411 --- a/fs/cifs/smb2proto.h
1412 +++ b/fs/cifs/smb2proto.h
1413 @@ -95,6 +95,7 @@ extern int smb2_open_file(const unsigned int xid,
1414 extern int smb2_unlock_range(struct cifsFileInfo *cfile,
1415 struct file_lock *flock, const unsigned int xid);
1416 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
1417 +extern void smb2_reconnect_server(struct work_struct *work);
1418
1419 /*
1420 * SMB2 Worker functions - most of protocol specific implementation details
1421 diff --git a/fs/exec.c b/fs/exec.c
1422 index b06623a9347f..3a6de10d3891 100644
1423 --- a/fs/exec.c
1424 +++ b/fs/exec.c
1425 @@ -19,7 +19,7 @@
1426 * current->executable is only used by the procfs. This allows a dispatch
1427 * table to check for several different types of binary formats. We keep
1428 * trying until we recognize the file or we run out of supported binary
1429 - * formats.
1430 + * formats.
1431 */
1432
1433 #include <linux/slab.h>
1434 @@ -56,6 +56,7 @@
1435 #include <linux/pipe_fs_i.h>
1436 #include <linux/oom.h>
1437 #include <linux/compat.h>
1438 +#include <linux/user_namespace.h>
1439
1440 #include <asm/uaccess.h>
1441 #include <asm/mmu_context.h>
1442 @@ -1114,6 +1115,13 @@ int flush_old_exec(struct linux_binprm * bprm)
1443 flush_thread();
1444 current->personality &= ~bprm->per_clear;
1445
1446 + /*
1447 + * We have to apply CLOEXEC before we change whether the process is
1448 + * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1449 + * trying to access the should-be-closed file descriptors of a process
1450 + * undergoing exec(2).
1451 + */
1452 + do_close_on_exec(current->files);
1453 return 0;
1454
1455 out:
1456 @@ -1123,8 +1131,22 @@ EXPORT_SYMBOL(flush_old_exec);
1457
1458 void would_dump(struct linux_binprm *bprm, struct file *file)
1459 {
1460 - if (inode_permission(file_inode(file), MAY_READ) < 0)
1461 + struct inode *inode = file_inode(file);
1462 + if (inode_permission(inode, MAY_READ) < 0) {
1463 + struct user_namespace *old, *user_ns;
1464 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1465 +
1466 + /* Ensure mm->user_ns contains the executable */
1467 + user_ns = old = bprm->mm->user_ns;
1468 + while ((user_ns != &init_user_ns) &&
1469 + !privileged_wrt_inode_uidgid(user_ns, inode))
1470 + user_ns = user_ns->parent;
1471 +
1472 + if (old != user_ns) {
1473 + bprm->mm->user_ns = get_user_ns(user_ns);
1474 + put_user_ns(old);
1475 + }
1476 + }
1477 }
1478 EXPORT_SYMBOL(would_dump);
1479
1480 @@ -1154,7 +1176,6 @@ void setup_new_exec(struct linux_binprm * bprm)
1481 !gid_eq(bprm->cred->gid, current_egid())) {
1482 current->pdeath_signal = 0;
1483 } else {
1484 - would_dump(bprm, bprm->file);
1485 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1486 set_dumpable(current->mm, suid_dumpable);
1487 }
1488 @@ -1163,7 +1184,6 @@ void setup_new_exec(struct linux_binprm * bprm)
1489 group */
1490 current->self_exec_id++;
1491 flush_signal_handlers(current, 0);
1492 - do_close_on_exec(current->files);
1493 }
1494 EXPORT_SYMBOL(setup_new_exec);
1495
1496 @@ -1254,7 +1274,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
1497 unsigned n_fs;
1498
1499 if (p->ptrace) {
1500 - if (p->ptrace & PT_PTRACE_CAP)
1501 + if (ptracer_capable(p, current_user_ns()))
1502 bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1503 else
1504 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1505 @@ -1587,6 +1607,8 @@ static int do_execveat_common(int fd, struct filename *filename,
1506 if (retval < 0)
1507 goto out;
1508
1509 + would_dump(bprm, bprm->file);
1510 +
1511 retval = exec_binprm(bprm);
1512 if (retval < 0)
1513 goto out;
1514 diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
1515 index 5f5846211095..f817ed58f5ad 100644
1516 --- a/fs/ext4/ext4_jbd2.h
1517 +++ b/fs/ext4/ext4_jbd2.h
1518 @@ -395,17 +395,19 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
1519 return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
1520 /* We do not support data journalling with delayed allocation */
1521 if (!S_ISREG(inode->i_mode) ||
1522 - test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
1523 - return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
1524 - if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
1525 - !test_opt(inode->i_sb, DELALLOC))
1526 + test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
1527 + (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
1528 + !test_opt(inode->i_sb, DELALLOC))) {
1529 + /* We do not support data journalling for encrypted data */
1530 + if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
1531 + return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
1532 return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
1533 + }
1534 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
1535 return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
1536 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
1537 return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
1538 - else
1539 - BUG();
1540 + BUG();
1541 }
1542
1543 static inline int ext4_should_journal_data(struct inode *inode)
1544 diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1545 index d884989cc83d..8968a93e2150 100644
1546 --- a/fs/ext4/inline.c
1547 +++ b/fs/ext4/inline.c
1548 @@ -336,8 +336,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
1549
1550 len -= EXT4_MIN_INLINE_DATA_SIZE;
1551 value = kzalloc(len, GFP_NOFS);
1552 - if (!value)
1553 + if (!value) {
1554 + error = -ENOMEM;
1555 goto out;
1556 + }
1557
1558 error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
1559 value, len);
1560 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1561 index c71d2941a45b..10690e5ba2eb 100644
1562 --- a/fs/ext4/inode.c
1563 +++ b/fs/ext4/inode.c
1564 @@ -4175,6 +4175,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1565 struct inode *inode;
1566 journal_t *journal = EXT4_SB(sb)->s_journal;
1567 long ret;
1568 + loff_t size;
1569 int block;
1570 uid_t i_uid;
1571 gid_t i_gid;
1572 @@ -4266,6 +4267,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1573 ei->i_file_acl |=
1574 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
1575 inode->i_size = ext4_isize(raw_inode);
1576 + if ((size = i_size_read(inode)) < 0) {
1577 + EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
1578 + ret = -EFSCORRUPTED;
1579 + goto bad_inode;
1580 + }
1581 ei->i_disksize = inode->i_size;
1582 #ifdef CONFIG_QUOTA
1583 ei->i_reserved_quota = 0;
1584 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1585 index 3c7f0c44cfb3..b7a3957a9dca 100644
1586 --- a/fs/ext4/mballoc.c
1587 +++ b/fs/ext4/mballoc.c
1588 @@ -669,7 +669,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
1589 ext4_grpblk_t min;
1590 ext4_grpblk_t max;
1591 ext4_grpblk_t chunk;
1592 - unsigned short border;
1593 + unsigned int border;
1594
1595 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
1596
1597 @@ -2287,7 +2287,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
1598 struct ext4_group_info *grinfo;
1599 struct sg {
1600 struct ext4_group_info info;
1601 - ext4_grpblk_t counters[16];
1602 + ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
1603 } sg;
1604
1605 group--;
1606 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1607 index 127155b82e6e..68640e6f95c5 100644
1608 --- a/fs/ext4/super.c
1609 +++ b/fs/ext4/super.c
1610 @@ -3037,10 +3037,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
1611 ext4_set_bit(s++, buf);
1612 count++;
1613 }
1614 - for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
1615 - ext4_set_bit(EXT4_B2C(sbi, s++), buf);
1616 - count++;
1617 + j = ext4_bg_num_gdb(sb, grp);
1618 + if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
1619 + ext4_error(sb, "Invalid number of block group "
1620 + "descriptor blocks: %d", j);
1621 + j = EXT4_BLOCKS_PER_GROUP(sb) - s;
1622 }
1623 + count += j;
1624 + for (; j > 0; j--)
1625 + ext4_set_bit(EXT4_B2C(sbi, s++), buf);
1626 }
1627 if (!count)
1628 return 0;
1629 @@ -3130,7 +3135,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1630 char *orig_data = kstrdup(data, GFP_KERNEL);
1631 struct buffer_head *bh;
1632 struct ext4_super_block *es = NULL;
1633 - struct ext4_sb_info *sbi;
1634 + struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1635 ext4_fsblk_t block;
1636 ext4_fsblk_t sb_block = get_sb_block(&data);
1637 ext4_fsblk_t logical_sb_block;
1638 @@ -3149,16 +3154,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1639 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
1640 ext4_group_t first_not_zeroed;
1641
1642 - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
1643 - if (!sbi)
1644 - goto out_free_orig;
1645 + if ((data && !orig_data) || !sbi)
1646 + goto out_free_base;
1647
1648 sbi->s_blockgroup_lock =
1649 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
1650 - if (!sbi->s_blockgroup_lock) {
1651 - kfree(sbi);
1652 - goto out_free_orig;
1653 - }
1654 + if (!sbi->s_blockgroup_lock)
1655 + goto out_free_base;
1656 +
1657 sb->s_fs_info = sbi;
1658 sbi->s_sb = sb;
1659 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
1660 @@ -3304,11 +3307,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1661 */
1662 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
1663
1664 - if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
1665 - &journal_devnum, &journal_ioprio, 0)) {
1666 - ext4_msg(sb, KERN_WARNING,
1667 - "failed to parse options in superblock: %s",
1668 - sbi->s_es->s_mount_opts);
1669 + if (sbi->s_es->s_mount_opts[0]) {
1670 + char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
1671 + sizeof(sbi->s_es->s_mount_opts),
1672 + GFP_KERNEL);
1673 + if (!s_mount_opts)
1674 + goto failed_mount;
1675 + if (!parse_options(s_mount_opts, sb, &journal_devnum,
1676 + &journal_ioprio, 0)) {
1677 + ext4_msg(sb, KERN_WARNING,
1678 + "failed to parse options in superblock: %s",
1679 + s_mount_opts);
1680 + }
1681 + kfree(s_mount_opts);
1682 }
1683 sbi->s_def_mount_opt = sbi->s_mount_opt;
1684 if (!parse_options((char *) data, sb, &journal_devnum,
1685 @@ -3334,6 +3345,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1686 "both data=journal and dax");
1687 goto failed_mount;
1688 }
1689 + if (ext4_has_feature_encrypt(sb)) {
1690 + ext4_msg(sb, KERN_WARNING,
1691 + "encrypted files will use data=ordered "
1692 + "instead of data journaling mode");
1693 + }
1694 if (test_opt(sb, DELALLOC))
1695 clear_opt(sb, DELALLOC);
1696 } else {
1697 @@ -3496,12 +3512,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1698
1699 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
1700 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
1701 - if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
1702 - goto cantfind_ext4;
1703
1704 sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
1705 if (sbi->s_inodes_per_block == 0)
1706 goto cantfind_ext4;
1707 + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
1708 + sbi->s_inodes_per_group > blocksize * 8) {
1709 + ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
1710 + sbi->s_blocks_per_group);
1711 + goto failed_mount;
1712 + }
1713 sbi->s_itb_per_group = sbi->s_inodes_per_group /
1714 sbi->s_inodes_per_block;
1715 sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
1716 @@ -3584,13 +3604,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1717 }
1718 sbi->s_cluster_ratio = clustersize / blocksize;
1719
1720 - if (sbi->s_inodes_per_group > blocksize * 8) {
1721 - ext4_msg(sb, KERN_ERR,
1722 - "#inodes per group too big: %lu",
1723 - sbi->s_inodes_per_group);
1724 - goto failed_mount;
1725 - }
1726 -
1727 /* Do we have standard group size of clustersize * 8 blocks ? */
1728 if (sbi->s_blocks_per_group == clustersize << 3)
1729 set_opt2(sb, STD_GROUP_SIZE);
1730 @@ -3994,7 +4007,9 @@ no_journal:
1731
1732 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
1733 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
1734 - "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
1735 + "Opts: %.*s%s%s", descr,
1736 + (int) sizeof(sbi->s_es->s_mount_opts),
1737 + sbi->s_es->s_mount_opts,
1738 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
1739
1740 if (es->s_error_count)
1741 @@ -4064,8 +4079,8 @@ failed_mount:
1742 out_fail:
1743 sb->s_fs_info = NULL;
1744 kfree(sbi->s_blockgroup_lock);
1745 +out_free_base:
1746 kfree(sbi);
1747 -out_free_orig:
1748 kfree(orig_data);
1749 return err ? err : ret;
1750 }
1751 diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
1752 index 478e5d54154f..24d6a51b48d1 100644
1753 --- a/fs/f2fs/debug.c
1754 +++ b/fs/f2fs/debug.c
1755 @@ -352,6 +352,7 @@ static int stat_open(struct inode *inode, struct file *file)
1756 }
1757
1758 static const struct file_operations stat_fops = {
1759 + .owner = THIS_MODULE,
1760 .open = stat_open,
1761 .read = seq_read,
1762 .llseek = seq_lseek,
1763 diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
1764 index 5991cdcb9040..8cab78eeb0c2 100644
1765 --- a/fs/xfs/xfs_log_recover.c
1766 +++ b/fs/xfs/xfs_log_recover.c
1767 @@ -3980,6 +3980,7 @@ xlog_recover_clear_agi_bucket(
1768 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
1769 offset = offsetof(xfs_agi_t, agi_unlinked) +
1770 (sizeof(xfs_agino_t) * bucket);
1771 + xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
1772 xfs_trans_log_buf(tp, agibp, offset,
1773 (offset + sizeof(xfs_agino_t) - 1));
1774
1775 diff --git a/include/linux/capability.h b/include/linux/capability.h
1776 index 5f8249d378a2..2654f75a4c46 100644
1777 --- a/include/linux/capability.h
1778 +++ b/include/linux/capability.h
1779 @@ -247,8 +247,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
1780 return true;
1781 }
1782 #endif /* CONFIG_MULTIUSER */
1783 +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
1784 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
1785 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
1786 +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
1787
1788 /* audit system wants to get cap info from files as well */
1789 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
1790 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
1791 index f8d1492a114f..2ccccbfcd532 100644
1792 --- a/include/linux/mm_types.h
1793 +++ b/include/linux/mm_types.h
1794 @@ -469,6 +469,7 @@ struct mm_struct {
1795 */
1796 struct task_struct __rcu *owner;
1797 #endif
1798 + struct user_namespace *user_ns;
1799
1800 /* store ref to file /proc/<pid>/exe symlink points to */
1801 struct file __rcu *exe_file;
1802 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
1803 index 504c98a278d4..e13bfdf7f314 100644
1804 --- a/include/linux/ptrace.h
1805 +++ b/include/linux/ptrace.h
1806 @@ -19,7 +19,6 @@
1807 #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
1808 #define PT_PTRACED 0x00000001
1809 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
1810 -#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
1811
1812 #define PT_OPT_FLAG_SHIFT 3
1813 /* PT_TRACE_* event enable flags */
1814 diff --git a/include/linux/sched.h b/include/linux/sched.h
1815 index 1c0193baea2a..ce0f61dcd887 100644
1816 --- a/include/linux/sched.h
1817 +++ b/include/linux/sched.h
1818 @@ -1540,6 +1540,7 @@ struct task_struct {
1819 struct list_head cpu_timers[3];
1820
1821 /* process credentials */
1822 + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
1823 const struct cred __rcu *real_cred; /* objective and real subjective task
1824 * credentials (COW) */
1825 const struct cred __rcu *cred; /* effective (overridable) subjective task
1826 diff --git a/kernel/capability.c b/kernel/capability.c
1827 index 00411c82dac5..4984e1f552eb 100644
1828 --- a/kernel/capability.c
1829 +++ b/kernel/capability.c
1830 @@ -457,6 +457,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
1831 EXPORT_SYMBOL(file_ns_capable);
1832
1833 /**
1834 + * privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
1835 + * @ns: The user namespace in question
1836 + * @inode: The inode in question
1837 + *
1838 + * Return true if the inode uid and gid are within the namespace.
1839 + */
1840 +bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
1841 +{
1842 + return kuid_has_mapping(ns, inode->i_uid) &&
1843 + kgid_has_mapping(ns, inode->i_gid);
1844 +}
1845 +
1846 +/**
1847 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
1848 * @inode: The inode in question
1849 * @cap: The capability in question
1850 @@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
1851 {
1852 struct user_namespace *ns = current_user_ns();
1853
1854 - return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
1855 - kgid_has_mapping(ns, inode->i_gid);
1856 + return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
1857 }
1858 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
1859 +
1860 +/**
1861 + * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
1862 + * @tsk: The task that may be ptraced
1863 + * @ns: The user namespace to search for CAP_SYS_PTRACE in
1864 + *
1865 + * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
1866 + * in the specified user namespace.
1867 + */
1868 +bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
1869 +{
1870 + int ret = 0; /* An absent tracer adds no restrictions */
1871 + const struct cred *cred;
1872 + rcu_read_lock();
1873 + cred = rcu_dereference(tsk->ptracer_cred);
1874 + if (cred)
1875 + ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
1876 + rcu_read_unlock();
1877 + return (ret == 0);
1878 +}
1879 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
1880 index 0874e2edd275..79517e5549f1 100644
1881 --- a/kernel/debug/debug_core.c
1882 +++ b/kernel/debug/debug_core.c
1883 @@ -598,11 +598,11 @@ return_normal:
1884 /*
1885 * Wait for the other CPUs to be notified and be waiting for us:
1886 */
1887 - time_left = loops_per_jiffy * HZ;
1888 + time_left = MSEC_PER_SEC;
1889 while (kgdb_do_roundup && --time_left &&
1890 (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
1891 online_cpus)
1892 - cpu_relax();
1893 + udelay(1000);
1894 if (!time_left)
1895 pr_crit("Timed out waiting for secondary CPUs.\n");
1896
1897 diff --git a/kernel/fork.c b/kernel/fork.c
1898 index 7161ebe67cbb..2e55b53399de 100644
1899 --- a/kernel/fork.c
1900 +++ b/kernel/fork.c
1901 @@ -585,7 +585,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1902 #endif
1903 }
1904
1905 -static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
1906 +static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1907 + struct user_namespace *user_ns)
1908 {
1909 mm->mmap = NULL;
1910 mm->mm_rb = RB_ROOT;
1911 @@ -625,6 +626,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
1912 if (init_new_context(p, mm))
1913 goto fail_nocontext;
1914
1915 + mm->user_ns = get_user_ns(user_ns);
1916 return mm;
1917
1918 fail_nocontext:
1919 @@ -670,7 +672,7 @@ struct mm_struct *mm_alloc(void)
1920 return NULL;
1921
1922 memset(mm, 0, sizeof(*mm));
1923 - return mm_init(mm, current);
1924 + return mm_init(mm, current, current_user_ns());
1925 }
1926
1927 /*
1928 @@ -685,6 +687,7 @@ void __mmdrop(struct mm_struct *mm)
1929 destroy_context(mm);
1930 mmu_notifier_mm_destroy(mm);
1931 check_mm(mm);
1932 + put_user_ns(mm->user_ns);
1933 free_mm(mm);
1934 }
1935 EXPORT_SYMBOL_GPL(__mmdrop);
1936 @@ -942,7 +945,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
1937
1938 memcpy(mm, oldmm, sizeof(*mm));
1939
1940 - if (!mm_init(mm, tsk))
1941 + if (!mm_init(mm, tsk, mm->user_ns))
1942 goto fail_nomem;
1943
1944 err = dup_mmap(mm, oldmm);
1945 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
1946 index 3189e51db7e8..a46c40bfb5f6 100644
1947 --- a/kernel/ptrace.c
1948 +++ b/kernel/ptrace.c
1949 @@ -39,6 +39,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
1950 BUG_ON(!list_empty(&child->ptrace_entry));
1951 list_add(&child->ptrace_entry, &new_parent->ptraced);
1952 child->parent = new_parent;
1953 + rcu_read_lock();
1954 + child->ptracer_cred = get_cred(__task_cred(new_parent));
1955 + rcu_read_unlock();
1956 }
1957
1958 /**
1959 @@ -71,11 +74,15 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
1960 */
1961 void __ptrace_unlink(struct task_struct *child)
1962 {
1963 + const struct cred *old_cred;
1964 BUG_ON(!child->ptrace);
1965
1966 child->ptrace = 0;
1967 child->parent = child->real_parent;
1968 list_del_init(&child->ptrace_entry);
1969 + old_cred = child->ptracer_cred;
1970 + child->ptracer_cred = NULL;
1971 + put_cred(old_cred);
1972
1973 spin_lock(&child->sighand->siglock);
1974
1975 @@ -219,7 +226,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
1976 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
1977 {
1978 const struct cred *cred = current_cred(), *tcred;
1979 - int dumpable = 0;
1980 + struct mm_struct *mm;
1981 kuid_t caller_uid;
1982 kgid_t caller_gid;
1983
1984 @@ -270,16 +277,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
1985 return -EPERM;
1986 ok:
1987 rcu_read_unlock();
1988 - smp_rmb();
1989 - if (task->mm)
1990 - dumpable = get_dumpable(task->mm);
1991 - rcu_read_lock();
1992 - if (dumpable != SUID_DUMP_USER &&
1993 - !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
1994 - rcu_read_unlock();
1995 - return -EPERM;
1996 - }
1997 - rcu_read_unlock();
1998 + mm = task->mm;
1999 + if (mm &&
2000 + ((get_dumpable(mm) != SUID_DUMP_USER) &&
2001 + !ptrace_has_cap(mm->user_ns, mode)))
2002 + return -EPERM;
2003
2004 return security_ptrace_access_check(task, mode);
2005 }
2006 @@ -343,10 +345,6 @@ static int ptrace_attach(struct task_struct *task, long request,
2007
2008 if (seize)
2009 flags |= PT_SEIZED;
2010 - rcu_read_lock();
2011 - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
2012 - flags |= PT_PTRACE_CAP;
2013 - rcu_read_unlock();
2014 task->ptrace = flags;
2015
2016 __ptrace_link(task, current);
2017 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
2018 index 198137b1cadc..c1e0b5f429b6 100644
2019 --- a/kernel/watchdog.c
2020 +++ b/kernel/watchdog.c
2021 @@ -328,7 +328,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
2022 */
2023 if (is_hardlockup()) {
2024 int this_cpu = smp_processor_id();
2025 - struct pt_regs *regs = get_irq_regs();
2026
2027 /* only print hardlockups once */
2028 if (__this_cpu_read(hard_watchdog_warn) == true)
2029 diff --git a/mm/init-mm.c b/mm/init-mm.c
2030 index a56a851908d2..975e49f00f34 100644
2031 --- a/mm/init-mm.c
2032 +++ b/mm/init-mm.c
2033 @@ -6,6 +6,7 @@
2034 #include <linux/cpumask.h>
2035
2036 #include <linux/atomic.h>
2037 +#include <linux/user_namespace.h>
2038 #include <asm/pgtable.h>
2039 #include <asm/mmu.h>
2040
2041 @@ -21,5 +22,6 @@ struct mm_struct init_mm = {
2042 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
2043 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
2044 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
2045 + .user_ns = &init_user_ns,
2046 INIT_MM_CONTEXT(init_mm)
2047 };
2048 diff --git a/mm/vmscan.c b/mm/vmscan.c
2049 index de1c59d8daa3..bfc5050cbd01 100644
2050 --- a/mm/vmscan.c
2051 +++ b/mm/vmscan.c
2052 @@ -277,6 +277,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2053 int nid = shrinkctl->nid;
2054 long batch_size = shrinker->batch ? shrinker->batch
2055 : SHRINK_BATCH;
2056 + long scanned = 0, next_deferred;
2057
2058 freeable = shrinker->count_objects(shrinker, shrinkctl);
2059 if (freeable == 0)
2060 @@ -298,7 +299,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2061 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
2062 shrinker->scan_objects, total_scan);
2063 total_scan = freeable;
2064 - }
2065 + next_deferred = nr;
2066 + } else
2067 + next_deferred = total_scan;
2068
2069 /*
2070 * We need to avoid excessive windup on filesystem shrinkers
2071 @@ -355,17 +358,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
2072
2073 count_vm_events(SLABS_SCANNED, nr_to_scan);
2074 total_scan -= nr_to_scan;
2075 + scanned += nr_to_scan;
2076
2077 cond_resched();
2078 }
2079
2080 + if (next_deferred >= scanned)
2081 + next_deferred -= scanned;
2082 + else
2083 + next_deferred = 0;
2084 /*
2085 * move the unused scan count back into the shrinker in a
2086 * manner that handles concurrent updates. If we exhausted the
2087 * scan, there is no need to do an update.
2088 */
2089 - if (total_scan > 0)
2090 - new_nr = atomic_long_add_return(total_scan,
2091 + if (next_deferred > 0)
2092 + new_nr = atomic_long_add_return(next_deferred,
2093 &shrinker->nr_deferred[nid]);
2094 else
2095 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
2096 diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
2097 index 7f57a145a47e..a03cf68d0bcd 100644
2098 --- a/sound/pci/hda/hda_auto_parser.c
2099 +++ b/sound/pci/hda/hda_auto_parser.c
2100 @@ -884,6 +884,8 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
2101 }
2102 EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
2103
2104 +#define IGNORE_SEQ_ASSOC (~(AC_DEFCFG_SEQUENCE | AC_DEFCFG_DEF_ASSOC))
2105 +
2106 static bool pin_config_match(struct hda_codec *codec,
2107 const struct hda_pintbl *pins)
2108 {
2109 @@ -901,7 +903,7 @@ static bool pin_config_match(struct hda_codec *codec,
2110 for (; t_pins->nid; t_pins++) {
2111 if (t_pins->nid == nid) {
2112 found = 1;
2113 - if (t_pins->val == cfg)
2114 + if ((t_pins->val & IGNORE_SEQ_ASSOC) == (cfg & IGNORE_SEQ_ASSOC))
2115 break;
2116 else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
2117 break;
2118 diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
2119 index 9ceb2bc36e68..c146d0de53d8 100644
2120 --- a/sound/pci/hda/patch_ca0132.c
2121 +++ b/sound/pci/hda/patch_ca0132.c
2122 @@ -780,6 +780,7 @@ static const struct hda_pintbl alienware_pincfgs[] = {
2123 static const struct snd_pci_quirk ca0132_quirks[] = {
2124 SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
2125 SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
2126 + SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
2127 {}
2128 };
2129
2130 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2131 index 36cd715986bc..46f7b023f69c 100644
2132 --- a/sound/pci/hda/patch_conexant.c
2133 +++ b/sound/pci/hda/patch_conexant.c
2134 @@ -262,6 +262,7 @@ enum {
2135 CXT_FIXUP_CAP_MIX_AMP_5047,
2136 CXT_FIXUP_MUTE_LED_EAPD,
2137 CXT_FIXUP_HP_SPECTRE,
2138 + CXT_FIXUP_HP_GATE_MIC,
2139 };
2140
2141 /* for hda_fixup_thinkpad_acpi() */
2142 @@ -633,6 +634,17 @@ static void cxt_fixup_cap_mix_amp_5047(struct hda_codec *codec,
2143 (1 << AC_AMPCAP_MUTE_SHIFT));
2144 }
2145
2146 +static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
2147 + const struct hda_fixup *fix,
2148 + int action)
2149 +{
2150 + /* the mic pin (0x19) doesn't give an unsolicited event;
2151 + * probe the mic pin together with the headphone pin (0x16)
2152 + */
2153 + if (action == HDA_FIXUP_ACT_PROBE)
2154 + snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
2155 +}
2156 +
2157 /* ThinkPad X200 & co with cxt5051 */
2158 static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
2159 { 0x16, 0x042140ff }, /* HP (seq# overridden) */
2160 @@ -774,6 +786,10 @@ static const struct hda_fixup cxt_fixups[] = {
2161 { }
2162 }
2163 },
2164 + [CXT_FIXUP_HP_GATE_MIC] = {
2165 + .type = HDA_FIXUP_FUNC,
2166 + .v.func = cxt_fixup_hp_gate_mic_jack,
2167 + },
2168 };
2169
2170 static const struct snd_pci_quirk cxt5045_fixups[] = {
2171 @@ -824,6 +840,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2172 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
2173 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
2174 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2175 + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
2176 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
2177 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
2178 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
2179 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2180 index f0986cac82f1..3b2687889cd5 100644
2181 --- a/sound/pci/hda/patch_realtek.c
2182 +++ b/sound/pci/hda/patch_realtek.c
2183 @@ -5899,6 +5899,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2184 {0x12, 0x90a60180},
2185 {0x14, 0x90170120},
2186 {0x21, 0x02211030}),
2187 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2188 + {0x1b, 0x01011020},
2189 + {0x21, 0x02211010}),
2190 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2191 {0x12, 0x90a60160},
2192 {0x14, 0x90170120},
2193 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2194 index 0487cfaac538..2b96b11fbe71 100644
2195 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2196 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2197 @@ -762,6 +762,9 @@ static int sst_soc_prepare(struct device *dev)
2198 struct sst_data *drv = dev_get_drvdata(dev);
2199 int i;
2200
2201 + if (!drv->soc_card)
2202 + return 0;
2203 +
2204 /* suspend all pcms first */
2205 snd_soc_suspend(drv->soc_card->dev);
2206 snd_soc_poweroff(drv->soc_card->dev);
2207 @@ -784,6 +787,9 @@ static void sst_soc_complete(struct device *dev)
2208 struct sst_data *drv = dev_get_drvdata(dev);
2209 int i;
2210
2211 + if (!drv->soc_card)
2212 + return;
2213 +
2214 /* restart SSPs */
2215 for (i = 0; i < drv->soc_card->num_rtd; i++) {
2216 struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai;
2217 diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
2218 index 2c44139b4041..33db205dd12b 100644
2219 --- a/sound/usb/hiface/pcm.c
2220 +++ b/sound/usb/hiface/pcm.c
2221 @@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
2222
2223 mutex_lock(&rt->stream_mutex);
2224
2225 + hiface_pcm_stream_stop(rt);
2226 +
2227 sub->dma_off = 0;
2228 sub->period_off = 0;
2229
2230 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2231 index 4f85757009b3..499b03c8281d 100644
2232 --- a/sound/usb/mixer.c
2233 +++ b/sound/usb/mixer.c
2234 @@ -931,9 +931,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
2235 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
2236 case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
2237 case USB_ID(0x046d, 0x0991):
2238 + case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
2239 /* Most audio usb devices lie about volume resolution.
2240 * Most Logitech webcams have res = 384.
2241 - * Proboly there is some logitech magic behind this number --fishor
2242 + * Probably there is some logitech magic behind this number --fishor
2243 */
2244 if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
2245 usb_audio_info(chip,