Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0144-3.10.45-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 85525 byte(s)
-3.10.84-alx-r1
1 diff --git a/Makefile b/Makefile
2 index e55476c4aef0..8a63ca1db77a 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 44
9 +SUBLEVEL = 45
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
14 index 2ba694f9626b..f8bc3511a8c8 100644
15 --- a/arch/arm/mach-at91/sysirq_mask.c
16 +++ b/arch/arm/mach-at91/sysirq_mask.c
17 @@ -25,24 +25,28 @@
18
19 #include "generic.h"
20
21 -#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
22 -#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
23 +#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
24 +#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
25 +#define AT91_RTC_IRQ_MASK 0x1f /* Available IRQs mask */
26
27 void __init at91_sysirq_mask_rtc(u32 rtc_base)
28 {
29 void __iomem *base;
30 - u32 mask;
31
32 base = ioremap(rtc_base, 64);
33 if (!base)
34 return;
35
36 - mask = readl_relaxed(base + AT91_RTC_IMR);
37 - if (mask) {
38 - pr_info("AT91: Disabling rtc irq\n");
39 - writel_relaxed(mask, base + AT91_RTC_IDR);
40 - (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
41 - }
42 + /*
43 + * sam9x5 SoCs have the following errata:
44 + * "RTC: Interrupt Mask Register cannot be used
45 + * Interrupt Mask Register read always returns 0."
46 + *
47 + * Hence we're not relying on IMR values to disable
48 + * interrupts.
49 + */
50 + writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
51 + (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
52
53 iounmap(base);
54 }
55 diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
56 index dd203e59e6fd..426345ac6f6e 100644
57 --- a/arch/mips/kvm/kvm_mips.c
58 +++ b/arch/mips/kvm/kvm_mips.c
59 @@ -299,7 +299,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
60 if (cpu_has_veic || cpu_has_vint) {
61 size = 0x200 + VECTORSPACING * 64;
62 } else {
63 - size = 0x200;
64 + size = 0x4000;
65 }
66
67 /* Save Linux EBASE */
68 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
69 index fd95862c65aa..224fc0c71b8a 100644
70 --- a/arch/sparc/net/bpf_jit_comp.c
71 +++ b/arch/sparc/net/bpf_jit_comp.c
72 @@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
73 #define BNE (F2(0, 2) | CONDNE)
74
75 #ifdef CONFIG_SPARC64
76 -#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
77 +#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
78 #else
79 -#define BNE_PTR BNE
80 +#define BE_PTR BE
81 #endif
82
83 #define SETHI(K, REG) \
84 @@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
85 case BPF_S_ANC_IFINDEX:
86 emit_skb_loadptr(dev, r_A);
87 emit_cmpi(r_A, 0);
88 - emit_branch(BNE_PTR, cleanup_addr + 4);
89 + emit_branch(BE_PTR, cleanup_addr + 4);
90 emit_nop();
91 emit_load32(r_A, struct net_device, ifindex, r_A);
92 break;
93 @@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
94 case BPF_S_ANC_HATYPE:
95 emit_skb_loadptr(dev, r_A);
96 emit_cmpi(r_A, 0);
97 - emit_branch(BNE_PTR, cleanup_addr + 4);
98 + emit_branch(BE_PTR, cleanup_addr + 4);
99 emit_nop();
100 emit_load16(r_A, struct net_device, type, r_A);
101 break;
102 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
103 index 61d9fed5eb31..279d093524b4 100644
104 --- a/arch/x86/kvm/lapic.c
105 +++ b/arch/x86/kvm/lapic.c
106 @@ -370,6 +370,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
107
108 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
109 {
110 + /* Note that we never get here with APIC virtualization enabled. */
111 +
112 if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
113 ++apic->isr_count;
114 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
115 @@ -381,12 +383,48 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
116 apic->highest_isr_cache = vec;
117 }
118
119 +static inline int apic_find_highest_isr(struct kvm_lapic *apic)
120 +{
121 + int result;
122 +
123 + /*
124 + * Note that isr_count is always 1, and highest_isr_cache
125 + * is always -1, with APIC virtualization enabled.
126 + */
127 + if (!apic->isr_count)
128 + return -1;
129 + if (likely(apic->highest_isr_cache != -1))
130 + return apic->highest_isr_cache;
131 +
132 + result = find_highest_vector(apic->regs + APIC_ISR);
133 + ASSERT(result == -1 || result >= 16);
134 +
135 + return result;
136 +}
137 +
138 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
139 {
140 - if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
141 + struct kvm_vcpu *vcpu;
142 + if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
143 + return;
144 +
145 + vcpu = apic->vcpu;
146 +
147 + /*
148 + * We do get here for APIC virtualization enabled if the guest
149 + * uses the Hyper-V APIC enlightenment. In this case we may need
150 + * to trigger a new interrupt delivery by writing the SVI field;
151 + * on the other hand isr_count and highest_isr_cache are unused
152 + * and must be left alone.
153 + */
154 + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
155 + kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
156 + apic_find_highest_isr(apic));
157 + else {
158 --apic->isr_count;
159 - BUG_ON(apic->isr_count < 0);
160 - apic->highest_isr_cache = -1;
161 + BUG_ON(apic->isr_count < 0);
162 + apic->highest_isr_cache = -1;
163 + }
164 }
165
166 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
167 @@ -466,22 +504,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
168 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
169 }
170
171 -static inline int apic_find_highest_isr(struct kvm_lapic *apic)
172 -{
173 - int result;
174 -
175 - /* Note that isr_count is always 1 with vid enabled */
176 - if (!apic->isr_count)
177 - return -1;
178 - if (likely(apic->highest_isr_cache != -1))
179 - return apic->highest_isr_cache;
180 -
181 - result = find_highest_vector(apic->regs + APIC_ISR);
182 - ASSERT(result == -1 || result >= 16);
183 -
184 - return result;
185 -}
186 -
187 void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
188 {
189 struct kvm_lapic *apic = vcpu->arch.apic;
190 @@ -1619,6 +1641,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
191 int vector = kvm_apic_has_interrupt(vcpu);
192 struct kvm_lapic *apic = vcpu->arch.apic;
193
194 + /* Note that we never get here with APIC virtualization enabled. */
195 +
196 if (vector == -1)
197 return -1;
198
199 diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
200 index 1512e41cd93d..43665d0d0905 100644
201 --- a/crypto/crypto_user.c
202 +++ b/crypto/crypto_user.c
203 @@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
204 type -= CRYPTO_MSG_BASE;
205 link = &crypto_dispatch[type];
206
207 - if (!capable(CAP_NET_ADMIN))
208 + if (!netlink_capable(skb, CAP_NET_ADMIN))
209 return -EPERM;
210
211 if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
212 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
213 index 18c5b9b16645..3165811e2407 100644
214 --- a/drivers/connector/cn_proc.c
215 +++ b/drivers/connector/cn_proc.c
216 @@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
217 return;
218
219 /* Can only change if privileged. */
220 - if (!capable(CAP_NET_ADMIN)) {
221 + if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
222 err = EPERM;
223 goto out;
224 }
225 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
226 index deb5c25305af..694173f662d1 100644
227 --- a/drivers/hv/hv_balloon.c
228 +++ b/drivers/hv/hv_balloon.c
229 @@ -19,6 +19,7 @@
230 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
231
232 #include <linux/kernel.h>
233 +#include <linux/jiffies.h>
234 #include <linux/mman.h>
235 #include <linux/delay.h>
236 #include <linux/init.h>
237 @@ -459,6 +460,11 @@ static bool do_hot_add;
238 */
239 static uint pressure_report_delay = 45;
240
241 +/*
242 + * The last time we posted a pressure report to host.
243 + */
244 +static unsigned long last_post_time;
245 +
246 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
247 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
248
249 @@ -542,6 +548,7 @@ struct hv_dynmem_device {
250
251 static struct hv_dynmem_device dm_device;
252
253 +static void post_status(struct hv_dynmem_device *dm);
254 #ifdef CONFIG_MEMORY_HOTPLUG
255
256 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
257 @@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
258 * have not been "onlined" within the allowed time.
259 */
260 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
261 -
262 + post_status(&dm_device);
263 }
264
265 return;
266 @@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
267 {
268 struct dm_status status;
269 struct sysinfo val;
270 + unsigned long now = jiffies;
271 + unsigned long last_post = last_post_time;
272
273 if (pressure_report_delay > 0) {
274 --pressure_report_delay;
275 return;
276 }
277 +
278 + if (!time_after(now, (last_post_time + HZ)))
279 + return;
280 +
281 si_meminfo(&val);
282 memset(&status, 0, sizeof(struct dm_status));
283 status.hdr.type = DM_STATUS_REPORT;
284 @@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
285 if (status.hdr.trans_id != atomic_read(&trans_id))
286 return;
287
288 + /*
289 + * If the last post time that we sampled has changed,
290 + * we have raced, don't post the status.
291 + */
292 + if (last_post != last_post_time)
293 + return;
294 +
295 + last_post_time = jiffies;
296 vmbus_sendpacket(dm->dev->channel, &status,
297 sizeof(struct dm_status),
298 (unsigned long)NULL,
299 @@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
300
301 if (ret == -EAGAIN)
302 msleep(20);
303 -
304 + post_status(&dm_device);
305 } while (ret == -EAGAIN);
306
307 if (ret) {
308 @@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
309 struct dm_unballoon_response resp;
310 int i;
311
312 - for (i = 0; i < range_count; i++)
313 + for (i = 0; i < range_count; i++) {
314 free_balloon_pages(dm, &range_array[i]);
315 + post_status(&dm_device);
316 + }
317
318 if (req->more_pages == 1)
319 return;
320 diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
321 index e5b88d5d3b59..14fdaf0f9d23 100644
322 --- a/drivers/iio/adc/at91_adc.c
323 +++ b/drivers/iio/adc/at91_adc.c
324 @@ -161,12 +161,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
325 return idev->num_channels;
326 }
327
328 -static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
329 +static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
330 struct at91_adc_trigger *triggers,
331 const char *trigger_name)
332 {
333 struct at91_adc_state *st = iio_priv(idev);
334 - u8 value = 0;
335 int i;
336
337 for (i = 0; i < st->trigger_number; i++) {
338 @@ -179,15 +178,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
339 return -ENOMEM;
340
341 if (strcmp(trigger_name, name) == 0) {
342 - value = triggers[i].value;
343 kfree(name);
344 - break;
345 + if (triggers[i].value == 0)
346 + return -EINVAL;
347 + return triggers[i].value;
348 }
349
350 kfree(name);
351 }
352
353 - return value;
354 + return -EINVAL;
355 }
356
357 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
358 @@ -197,14 +197,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
359 struct iio_buffer *buffer = idev->buffer;
360 struct at91_adc_reg_desc *reg = st->registers;
361 u32 status = at91_adc_readl(st, reg->trigger_register);
362 - u8 value;
363 + int value;
364 u8 bit;
365
366 value = at91_adc_get_trigger_value_by_name(idev,
367 st->trigger_list,
368 idev->trig->name);
369 - if (value == 0)
370 - return -EINVAL;
371 + if (value < 0)
372 + return value;
373
374 if (state) {
375 st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
376 diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
377 index 9e6da72ad823..b2b5dcbf7122 100644
378 --- a/drivers/iio/adc/max1363.c
379 +++ b/drivers/iio/adc/max1363.c
380 @@ -1214,8 +1214,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
381 .num_modes = ARRAY_SIZE(max1238_mode_list),
382 .default_mode = s0to11,
383 .info = &max1238_info,
384 - .channels = max1238_channels,
385 - .num_channels = ARRAY_SIZE(max1238_channels),
386 + .channels = max1038_channels,
387 + .num_channels = ARRAY_SIZE(max1038_channels),
388 },
389 [max11605] = {
390 .bits = 8,
391 @@ -1224,8 +1224,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
392 .num_modes = ARRAY_SIZE(max1238_mode_list),
393 .default_mode = s0to11,
394 .info = &max1238_info,
395 - .channels = max1238_channels,
396 - .num_channels = ARRAY_SIZE(max1238_channels),
397 + .channels = max1038_channels,
398 + .num_channels = ARRAY_SIZE(max1038_channels),
399 },
400 [max11606] = {
401 .bits = 10,
402 @@ -1274,8 +1274,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
403 .num_modes = ARRAY_SIZE(max1238_mode_list),
404 .default_mode = s0to11,
405 .info = &max1238_info,
406 - .channels = max1238_channels,
407 - .num_channels = ARRAY_SIZE(max1238_channels),
408 + .channels = max1138_channels,
409 + .num_channels = ARRAY_SIZE(max1138_channels),
410 },
411 [max11611] = {
412 .bits = 10,
413 @@ -1284,8 +1284,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
414 .num_modes = ARRAY_SIZE(max1238_mode_list),
415 .default_mode = s0to11,
416 .info = &max1238_info,
417 - .channels = max1238_channels,
418 - .num_channels = ARRAY_SIZE(max1238_channels),
419 + .channels = max1138_channels,
420 + .num_channels = ARRAY_SIZE(max1138_channels),
421 },
422 [max11612] = {
423 .bits = 12,
424 diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
425 index af6c320a534e..53f829004a03 100644
426 --- a/drivers/iio/magnetometer/ak8975.c
427 +++ b/drivers/iio/magnetometer/ak8975.c
428 @@ -276,8 +276,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
429 {
430 struct ak8975_data *data = iio_priv(indio_dev);
431 struct i2c_client *client = data->client;
432 - u16 meas_reg;
433 - s16 raw;
434 int ret;
435
436 mutex_lock(&data->lock);
437 @@ -322,16 +320,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
438 dev_err(&client->dev, "Read axis data fails\n");
439 goto exit;
440 }
441 - meas_reg = ret;
442
443 mutex_unlock(&data->lock);
444
445 - /* Endian conversion of the measured values. */
446 - raw = (s16) (le16_to_cpu(meas_reg));
447 -
448 /* Clamp to valid range. */
449 - raw = clamp_t(s16, raw, -4096, 4095);
450 - *val = raw;
451 + *val = clamp_t(s16, ret, -4096, 4095);
452 return IIO_VAL_INT;
453
454 exit:
455 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
456 index 1b195fc7f411..3fb2643d05b4 100644
457 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
458 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
459 @@ -2129,13 +2129,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
460 /* Allow large DMA segments, up to the firmware limit of 1 GB */
461 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
462
463 - priv = kzalloc(sizeof(*priv), GFP_KERNEL);
464 - if (!priv) {
465 - err = -ENOMEM;
466 - goto err_release_regions;
467 - }
468 -
469 - dev = &priv->dev;
470 + dev = pci_get_drvdata(pdev);
471 + priv = mlx4_priv(dev);
472 dev->pdev = pdev;
473 INIT_LIST_HEAD(&priv->ctx_list);
474 spin_lock_init(&priv->ctx_lock);
475 @@ -2300,8 +2295,7 @@ slave_start:
476 mlx4_sense_init(dev);
477 mlx4_start_sense(dev);
478
479 - priv->pci_dev_data = pci_dev_data;
480 - pci_set_drvdata(pdev, dev);
481 + priv->removed = 0;
482
483 return 0;
484
485 @@ -2367,84 +2361,110 @@ err_disable_pdev:
486
487 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
488 {
489 + struct mlx4_priv *priv;
490 + struct mlx4_dev *dev;
491 +
492 printk_once(KERN_INFO "%s", mlx4_version);
493
494 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
495 + if (!priv)
496 + return -ENOMEM;
497 +
498 + dev = &priv->dev;
499 + pci_set_drvdata(pdev, dev);
500 + priv->pci_dev_data = id->driver_data;
501 +
502 return __mlx4_init_one(pdev, id->driver_data);
503 }
504
505 -static void mlx4_remove_one(struct pci_dev *pdev)
506 +static void __mlx4_remove_one(struct pci_dev *pdev)
507 {
508 struct mlx4_dev *dev = pci_get_drvdata(pdev);
509 struct mlx4_priv *priv = mlx4_priv(dev);
510 + int pci_dev_data;
511 int p;
512
513 - if (dev) {
514 - /* in SRIOV it is not allowed to unload the pf's
515 - * driver while there are alive vf's */
516 - if (mlx4_is_master(dev)) {
517 - if (mlx4_how_many_lives_vf(dev))
518 - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
519 - }
520 - mlx4_stop_sense(dev);
521 - mlx4_unregister_device(dev);
522 + if (priv->removed)
523 + return;
524
525 - for (p = 1; p <= dev->caps.num_ports; p++) {
526 - mlx4_cleanup_port_info(&priv->port[p]);
527 - mlx4_CLOSE_PORT(dev, p);
528 - }
529 + pci_dev_data = priv->pci_dev_data;
530
531 - if (mlx4_is_master(dev))
532 - mlx4_free_resource_tracker(dev,
533 - RES_TR_FREE_SLAVES_ONLY);
534 -
535 - mlx4_cleanup_counters_table(dev);
536 - mlx4_cleanup_mcg_table(dev);
537 - mlx4_cleanup_qp_table(dev);
538 - mlx4_cleanup_srq_table(dev);
539 - mlx4_cleanup_cq_table(dev);
540 - mlx4_cmd_use_polling(dev);
541 - mlx4_cleanup_eq_table(dev);
542 - mlx4_cleanup_mr_table(dev);
543 - mlx4_cleanup_xrcd_table(dev);
544 - mlx4_cleanup_pd_table(dev);
545 + /* in SRIOV it is not allowed to unload the pf's
546 + * driver while there are alive vf's */
547 + if (mlx4_is_master(dev)) {
548 + if (mlx4_how_many_lives_vf(dev))
549 + printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
550 + }
551 + mlx4_stop_sense(dev);
552 + mlx4_unregister_device(dev);
553
554 - if (mlx4_is_master(dev))
555 - mlx4_free_resource_tracker(dev,
556 - RES_TR_FREE_STRUCTS_ONLY);
557 -
558 - iounmap(priv->kar);
559 - mlx4_uar_free(dev, &priv->driver_uar);
560 - mlx4_cleanup_uar_table(dev);
561 - if (!mlx4_is_slave(dev))
562 - mlx4_clear_steering(dev);
563 - mlx4_free_eq_table(dev);
564 - if (mlx4_is_master(dev))
565 - mlx4_multi_func_cleanup(dev);
566 - mlx4_close_hca(dev);
567 - if (mlx4_is_slave(dev))
568 - mlx4_multi_func_cleanup(dev);
569 - mlx4_cmd_cleanup(dev);
570 -
571 - if (dev->flags & MLX4_FLAG_MSI_X)
572 - pci_disable_msix(pdev);
573 - if (dev->flags & MLX4_FLAG_SRIOV) {
574 - mlx4_warn(dev, "Disabling SR-IOV\n");
575 - pci_disable_sriov(pdev);
576 - }
577 + for (p = 1; p <= dev->caps.num_ports; p++) {
578 + mlx4_cleanup_port_info(&priv->port[p]);
579 + mlx4_CLOSE_PORT(dev, p);
580 + }
581 +
582 + if (mlx4_is_master(dev))
583 + mlx4_free_resource_tracker(dev,
584 + RES_TR_FREE_SLAVES_ONLY);
585 +
586 + mlx4_cleanup_counters_table(dev);
587 + mlx4_cleanup_qp_table(dev);
588 + mlx4_cleanup_srq_table(dev);
589 + mlx4_cleanup_cq_table(dev);
590 + mlx4_cmd_use_polling(dev);
591 + mlx4_cleanup_eq_table(dev);
592 + mlx4_cleanup_mcg_table(dev);
593 + mlx4_cleanup_mr_table(dev);
594 + mlx4_cleanup_xrcd_table(dev);
595 + mlx4_cleanup_pd_table(dev);
596
597 - if (!mlx4_is_slave(dev))
598 - mlx4_free_ownership(dev);
599 + if (mlx4_is_master(dev))
600 + mlx4_free_resource_tracker(dev,
601 + RES_TR_FREE_STRUCTS_ONLY);
602
603 - kfree(dev->caps.qp0_tunnel);
604 - kfree(dev->caps.qp0_proxy);
605 - kfree(dev->caps.qp1_tunnel);
606 - kfree(dev->caps.qp1_proxy);
607 + iounmap(priv->kar);
608 + mlx4_uar_free(dev, &priv->driver_uar);
609 + mlx4_cleanup_uar_table(dev);
610 + if (!mlx4_is_slave(dev))
611 + mlx4_clear_steering(dev);
612 + mlx4_free_eq_table(dev);
613 + if (mlx4_is_master(dev))
614 + mlx4_multi_func_cleanup(dev);
615 + mlx4_close_hca(dev);
616 + if (mlx4_is_slave(dev))
617 + mlx4_multi_func_cleanup(dev);
618 + mlx4_cmd_cleanup(dev);
619
620 - kfree(priv);
621 - pci_release_regions(pdev);
622 - pci_disable_device(pdev);
623 - pci_set_drvdata(pdev, NULL);
624 + if (dev->flags & MLX4_FLAG_MSI_X)
625 + pci_disable_msix(pdev);
626 + if (dev->flags & MLX4_FLAG_SRIOV) {
627 + mlx4_warn(dev, "Disabling SR-IOV\n");
628 + pci_disable_sriov(pdev);
629 }
630 +
631 + if (!mlx4_is_slave(dev))
632 + mlx4_free_ownership(dev);
633 +
634 + kfree(dev->caps.qp0_tunnel);
635 + kfree(dev->caps.qp0_proxy);
636 + kfree(dev->caps.qp1_tunnel);
637 + kfree(dev->caps.qp1_proxy);
638 +
639 + pci_release_regions(pdev);
640 + pci_disable_device(pdev);
641 + memset(priv, 0, sizeof(*priv));
642 + priv->pci_dev_data = pci_dev_data;
643 + priv->removed = 1;
644 +}
645 +
646 +static void mlx4_remove_one(struct pci_dev *pdev)
647 +{
648 + struct mlx4_dev *dev = pci_get_drvdata(pdev);
649 + struct mlx4_priv *priv = mlx4_priv(dev);
650 +
651 + __mlx4_remove_one(pdev);
652 + kfree(priv);
653 + pci_set_drvdata(pdev, NULL);
654 }
655
656 int mlx4_restart_one(struct pci_dev *pdev)
657 @@ -2454,7 +2474,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
658 int pci_dev_data;
659
660 pci_dev_data = priv->pci_dev_data;
661 - mlx4_remove_one(pdev);
662 + __mlx4_remove_one(pdev);
663 return __mlx4_init_one(pdev, pci_dev_data);
664 }
665
666 @@ -2509,7 +2529,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
667 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
668 pci_channel_state_t state)
669 {
670 - mlx4_remove_one(pdev);
671 + __mlx4_remove_one(pdev);
672
673 return state == pci_channel_io_perm_failure ?
674 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
675 @@ -2517,7 +2537,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
676
677 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
678 {
679 - int ret = __mlx4_init_one(pdev, 0);
680 + struct mlx4_dev *dev = pci_get_drvdata(pdev);
681 + struct mlx4_priv *priv = mlx4_priv(dev);
682 + int ret;
683 +
684 + ret = __mlx4_init_one(pdev, priv->pci_dev_data);
685
686 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
687 }
688 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
689 index df15bb6631cc..da4f0002fd27 100644
690 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
691 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
692 @@ -743,6 +743,7 @@ struct mlx4_priv {
693 spinlock_t ctx_lock;
694
695 int pci_dev_data;
696 + int removed;
697
698 struct list_head pgdir_list;
699 struct mutex pgdir_mutex;
700 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
701 index c12aeaee22fa..155ef4bbde91 100644
702 --- a/drivers/net/macvlan.c
703 +++ b/drivers/net/macvlan.c
704 @@ -961,7 +961,6 @@ static int macvlan_device_event(struct notifier_block *unused,
705 list_for_each_entry_safe(vlan, next, &port->vlans, list)
706 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
707 unregister_netdevice_many(&list_kill);
708 - list_del(&list_kill);
709 break;
710 case NETDEV_PRE_TYPE_CHANGE:
711 /* Forbid underlaying device to change its type. */
712 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
713 index fe3fd77821bf..12222290c802 100644
714 --- a/drivers/net/team/team.c
715 +++ b/drivers/net/team/team.c
716 @@ -1542,6 +1542,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
717 * to traverse list in reverse under rcu_read_lock
718 */
719 mutex_lock(&team->lock);
720 + team->port_mtu_change_allowed = true;
721 list_for_each_entry(port, &team->port_list, list) {
722 err = dev_set_mtu(port->dev, new_mtu);
723 if (err) {
724 @@ -1550,6 +1551,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
725 goto unwind;
726 }
727 }
728 + team->port_mtu_change_allowed = false;
729 mutex_unlock(&team->lock);
730
731 dev->mtu = new_mtu;
732 @@ -1559,6 +1561,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
733 unwind:
734 list_for_each_entry_continue_reverse(port, &team->port_list, list)
735 dev_set_mtu(port->dev, dev->mtu);
736 + team->port_mtu_change_allowed = false;
737 mutex_unlock(&team->lock);
738
739 return err;
740 @@ -2678,7 +2681,9 @@ static int team_device_event(struct notifier_block *unused,
741 break;
742 case NETDEV_CHANGEMTU:
743 /* Forbid to change mtu of underlaying device */
744 - return NOTIFY_BAD;
745 + if (!port->team->port_mtu_change_allowed)
746 + return NOTIFY_BAD;
747 + break;
748 case NETDEV_PRE_TYPE_CHANGE:
749 /* Forbid to change type of underlaying device */
750 return NOTIFY_BAD;
751 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
752 index 7be4860ccfd7..6fb0082b3308 100644
753 --- a/drivers/net/usb/qmi_wwan.c
754 +++ b/drivers/net/usb/qmi_wwan.c
755 @@ -739,7 +739,12 @@ static const struct usb_device_id products[] = {
756 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
757 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
758 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
759 - {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
760 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
761 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
762 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
763 + {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
764 + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
765 + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
766 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
767 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
768 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
769 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
770 index 9673edfff451..fcbd4eee52cc 100644
771 --- a/drivers/net/vxlan.c
772 +++ b/drivers/net/vxlan.c
773 @@ -1314,7 +1314,7 @@ static void vxlan_setup(struct net_device *dev)
774
775 eth_hw_addr_random(dev);
776 ether_setup(dev);
777 - dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
778 + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
779
780 dev->netdev_ops = &vxlan_netdev_ops;
781 dev->destructor = vxlan_free;
782 @@ -1454,7 +1454,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
783 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
784
785 /* update header length based on lower device */
786 - dev->hard_header_len = lowerdev->hard_header_len +
787 + dev->needed_headroom = lowerdev->hard_header_len +
788 VXLAN_HEADROOM;
789 }
790
791 diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
792 index 1237c2173c6d..e51cc5fec98a 100644
793 --- a/drivers/rtc/rtc-at91rm9200.c
794 +++ b/drivers/rtc/rtc-at91rm9200.c
795 @@ -49,6 +49,7 @@ struct at91_rtc_config {
796
797 static const struct at91_rtc_config *at91_rtc_config;
798 static DECLARE_COMPLETION(at91_rtc_updated);
799 +static DECLARE_COMPLETION(at91_rtc_upd_rdy);
800 static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
801 static void __iomem *at91_rtc_regs;
802 static int irq;
803 @@ -162,6 +163,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
804 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
805 tm->tm_hour, tm->tm_min, tm->tm_sec);
806
807 + wait_for_completion(&at91_rtc_upd_rdy);
808 +
809 /* Stop Time/Calendar from counting */
810 cr = at91_rtc_read(AT91_RTC_CR);
811 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
812 @@ -184,7 +187,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
813
814 /* Restart Time/Calendar */
815 cr = at91_rtc_read(AT91_RTC_CR);
816 + at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
817 at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
818 + at91_rtc_write_ier(AT91_RTC_SECEV);
819
820 return 0;
821 }
822 @@ -291,8 +296,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
823 if (rtsr) { /* this interrupt is shared! Is it ours? */
824 if (rtsr & AT91_RTC_ALARM)
825 events |= (RTC_AF | RTC_IRQF);
826 - if (rtsr & AT91_RTC_SECEV)
827 - events |= (RTC_UF | RTC_IRQF);
828 + if (rtsr & AT91_RTC_SECEV) {
829 + complete(&at91_rtc_upd_rdy);
830 + at91_rtc_write_idr(AT91_RTC_SECEV);
831 + }
832 if (rtsr & AT91_RTC_ACKUPD)
833 complete(&at91_rtc_updated);
834
835 @@ -415,6 +422,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
836 }
837 platform_set_drvdata(pdev, rtc);
838
839 + /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
840 + * completion.
841 + */
842 + at91_rtc_write_ier(AT91_RTC_SECEV);
843 +
844 dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
845 return 0;
846
847 diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
848 index fe30ea94ffe6..109802f776ed 100644
849 --- a/drivers/scsi/scsi_netlink.c
850 +++ b/drivers/scsi/scsi_netlink.c
851 @@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
852 goto next_msg;
853 }
854
855 - if (!capable(CAP_SYS_ADMIN)) {
856 + if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
857 err = -EPERM;
858 goto next_msg;
859 }
860 diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
861 index c99f890cc6c6..64c73adfa3b0 100644
862 --- a/drivers/staging/iio/light/tsl2x7x_core.c
863 +++ b/drivers/staging/iio/light/tsl2x7x_core.c
864 @@ -672,9 +672,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
865 chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
866 chip->tsl2x7x_settings.prox_pulse_count;
867 chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
868 - chip->tsl2x7x_settings.prox_thres_low;
869 + (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
870 + chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
871 + (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
872 chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
873 - chip->tsl2x7x_settings.prox_thres_high;
874 + (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
875 + chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
876 + (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
877
878 /* and make sure we're not already on */
879 if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
880 diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
881 index 130a1e4f96a1..3c9a8dfd1c2e 100644
882 --- a/drivers/target/iscsi/iscsi_target_auth.c
883 +++ b/drivers/target/iscsi/iscsi_target_auth.c
884 @@ -316,6 +316,16 @@ static int chap_server_compute_md5(
885 goto out;
886 }
887 /*
888 + * During mutual authentication, the CHAP_C generated by the
889 + * initiator must not match the original CHAP_C generated by
890 + * the target.
891 + */
892 + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
893 + pr_err("initiator CHAP_C matches target CHAP_C, failing"
894 + " login attempt\n");
895 + goto out;
896 + }
897 + /*
898 * Generate CHAP_N and CHAP_R for mutual authentication.
899 */
900 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
901 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
902 index 89cce1a32059..fbf3f11aed2c 100644
903 --- a/drivers/usb/class/cdc-acm.c
904 +++ b/drivers/usb/class/cdc-acm.c
905 @@ -122,13 +122,23 @@ static void acm_release_minor(struct acm *acm)
906 static int acm_ctrl_msg(struct acm *acm, int request, int value,
907 void *buf, int len)
908 {
909 - int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
910 + int retval;
911 +
912 + retval = usb_autopm_get_interface(acm->control);
913 + if (retval)
914 + return retval;
915 +
916 + retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
917 request, USB_RT_ACM, value,
918 acm->control->altsetting[0].desc.bInterfaceNumber,
919 buf, len, 5000);
920 +
921 dev_dbg(&acm->control->dev,
922 "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
923 __func__, request, value, len, retval);
924 +
925 + usb_autopm_put_interface(acm->control);
926 +
927 return retval < 0 ? retval : 0;
928 }
929
930 @@ -233,12 +243,9 @@ static int acm_write_start(struct acm *acm, int wbn)
931 acm->susp_count);
932 usb_autopm_get_interface_async(acm->control);
933 if (acm->susp_count) {
934 - if (!acm->delayed_wb)
935 - acm->delayed_wb = wb;
936 - else
937 - usb_autopm_put_interface_async(acm->control);
938 + usb_anchor_urb(wb->urb, &acm->delayed);
939 spin_unlock_irqrestore(&acm->write_lock, flags);
940 - return 0; /* A white lie */
941 + return 0;
942 }
943 usb_mark_last_busy(acm->dev);
944
945 @@ -516,6 +523,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
946 {
947 struct acm *acm = container_of(port, struct acm, port);
948 int retval = -ENODEV;
949 + int i;
950
951 dev_dbg(&acm->control->dev, "%s\n", __func__);
952
953 @@ -564,6 +572,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
954 return 0;
955
956 error_submit_read_urbs:
957 + for (i = 0; i < acm->rx_buflimit; i++)
958 + usb_kill_urb(acm->read_urbs[i]);
959 acm->ctrlout = 0;
960 acm_set_control(acm, acm->ctrlout);
961 error_set_control:
962 @@ -591,21 +601,35 @@ static void acm_port_destruct(struct tty_port *port)
963 static void acm_port_shutdown(struct tty_port *port)
964 {
965 struct acm *acm = container_of(port, struct acm, port);
966 + struct urb *urb;
967 + struct acm_wb *wb;
968 int i;
969 + int pm_err;
970
971 dev_dbg(&acm->control->dev, "%s\n", __func__);
972
973 mutex_lock(&acm->mutex);
974 if (!acm->disconnected) {
975 - usb_autopm_get_interface(acm->control);
976 + pm_err = usb_autopm_get_interface(acm->control);
977 acm_set_control(acm, acm->ctrlout = 0);
978 +
979 + for (;;) {
980 + urb = usb_get_from_anchor(&acm->delayed);
981 + if (!urb)
982 + break;
983 + wb = urb->context;
984 + wb->use = 0;
985 + usb_autopm_put_interface_async(acm->control);
986 + }
987 +
988 usb_kill_urb(acm->ctrlurb);
989 for (i = 0; i < ACM_NW; i++)
990 usb_kill_urb(acm->wb[i].urb);
991 for (i = 0; i < acm->rx_buflimit; i++)
992 usb_kill_urb(acm->read_urbs[i]);
993 acm->control->needs_remote_wakeup = 0;
994 - usb_autopm_put_interface(acm->control);
995 + if (!pm_err)
996 + usb_autopm_put_interface(acm->control);
997 }
998 mutex_unlock(&acm->mutex);
999 }
1000 @@ -1190,6 +1214,7 @@ made_compressed_probe:
1001 acm->bInterval = epread->bInterval;
1002 tty_port_init(&acm->port);
1003 acm->port.ops = &acm_port_ops;
1004 + init_usb_anchor(&acm->delayed);
1005
1006 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
1007 if (!buf) {
1008 @@ -1434,18 +1459,15 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1009 struct acm *acm = usb_get_intfdata(intf);
1010 int cnt;
1011
1012 + spin_lock_irq(&acm->read_lock);
1013 + spin_lock(&acm->write_lock);
1014 if (PMSG_IS_AUTO(message)) {
1015 - int b;
1016 -
1017 - spin_lock_irq(&acm->write_lock);
1018 - b = acm->transmitting;
1019 - spin_unlock_irq(&acm->write_lock);
1020 - if (b)
1021 + if (acm->transmitting) {
1022 + spin_unlock(&acm->write_lock);
1023 + spin_unlock_irq(&acm->read_lock);
1024 return -EBUSY;
1025 + }
1026 }
1027 -
1028 - spin_lock_irq(&acm->read_lock);
1029 - spin_lock(&acm->write_lock);
1030 cnt = acm->susp_count++;
1031 spin_unlock(&acm->write_lock);
1032 spin_unlock_irq(&acm->read_lock);
1033 @@ -1453,8 +1475,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1034 if (cnt)
1035 return 0;
1036
1037 - if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
1038 - stop_data_traffic(acm);
1039 + stop_data_traffic(acm);
1040
1041 return 0;
1042 }
1043 @@ -1462,29 +1483,24 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1044 static int acm_resume(struct usb_interface *intf)
1045 {
1046 struct acm *acm = usb_get_intfdata(intf);
1047 - struct acm_wb *wb;
1048 + struct urb *urb;
1049 int rv = 0;
1050 - int cnt;
1051
1052 spin_lock_irq(&acm->read_lock);
1053 - acm->susp_count -= 1;
1054 - cnt = acm->susp_count;
1055 - spin_unlock_irq(&acm->read_lock);
1056 + spin_lock(&acm->write_lock);
1057
1058 - if (cnt)
1059 - return 0;
1060 + if (--acm->susp_count)
1061 + goto out;
1062
1063 if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
1064 - rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1065 -
1066 - spin_lock_irq(&acm->write_lock);
1067 - if (acm->delayed_wb) {
1068 - wb = acm->delayed_wb;
1069 - acm->delayed_wb = NULL;
1070 - spin_unlock_irq(&acm->write_lock);
1071 - acm_start_wb(acm, wb);
1072 - } else {
1073 - spin_unlock_irq(&acm->write_lock);
1074 + rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
1075 +
1076 + for (;;) {
1077 + urb = usb_get_from_anchor(&acm->delayed);
1078 + if (!urb)
1079 + break;
1080 +
1081 + acm_start_wb(acm, urb->context);
1082 }
1083
1084 /*
1085 @@ -1492,12 +1508,14 @@ static int acm_resume(struct usb_interface *intf)
1086 * do the write path at all cost
1087 */
1088 if (rv < 0)
1089 - goto err_out;
1090 + goto out;
1091
1092 - rv = acm_submit_read_urbs(acm, GFP_NOIO);
1093 + rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
1094 }
1095 +out:
1096 + spin_unlock(&acm->write_lock);
1097 + spin_unlock_irq(&acm->read_lock);
1098
1099 -err_out:
1100 return rv;
1101 }
1102
1103 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
1104 index 0f76e4af600e..1683ac161cf6 100644
1105 --- a/drivers/usb/class/cdc-acm.h
1106 +++ b/drivers/usb/class/cdc-acm.h
1107 @@ -117,7 +117,7 @@ struct acm {
1108 unsigned int throttled:1; /* actually throttled */
1109 unsigned int throttle_req:1; /* throttle requested */
1110 u8 bInterval;
1111 - struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
1112 + struct usb_anchor delayed; /* writes queued for a device about to be woken */
1113 };
1114
1115 #define CDC_DATA_INTERFACE_TYPE 0x0a
1116 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
1117 index 16fae6436d0e..25b8b15197b0 100644
1118 --- a/include/linux/if_team.h
1119 +++ b/include/linux/if_team.h
1120 @@ -193,6 +193,7 @@ struct team {
1121 bool user_carrier_enabled;
1122 bool queue_override_enabled;
1123 struct list_head *qom_lists; /* array of queue override mapping lists */
1124 + bool port_mtu_change_allowed;
1125 long mode_priv[TEAM_MODE_PRIV_LONGS];
1126 };
1127
1128 diff --git a/include/linux/netlink.h b/include/linux/netlink.h
1129 index 6358da5eeee8..9516dad45109 100644
1130 --- a/include/linux/netlink.h
1131 +++ b/include/linux/netlink.h
1132 @@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
1133 }
1134
1135 enum netlink_skb_flags {
1136 - NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
1137 - NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
1138 - NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
1139 + NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
1140 + NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
1141 + NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
1142 + NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
1143 };
1144
1145 struct netlink_skb_parms {
1146 @@ -144,4 +145,11 @@ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1147 return __netlink_dump_start(ssk, skb, nlh, control);
1148 }
1149
1150 +bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1151 + struct user_namespace *ns, int cap);
1152 +bool netlink_ns_capable(const struct sk_buff *skb,
1153 + struct user_namespace *ns, int cap);
1154 +bool netlink_capable(const struct sk_buff *skb, int cap);
1155 +bool netlink_net_capable(const struct sk_buff *skb, int cap);
1156 +
1157 #endif /* __LINUX_NETLINK_H */
1158 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
1159 index 302ab805b0bb..46cca4c06848 100644
1160 --- a/include/linux/sock_diag.h
1161 +++ b/include/linux/sock_diag.h
1162 @@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
1163 void sock_diag_save_cookie(void *sk, __u32 *cookie);
1164
1165 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
1166 -int sock_diag_put_filterinfo(struct sock *sk,
1167 +int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
1168 struct sk_buff *skb, int attrtype);
1169
1170 #endif
1171 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
1172 index 53f464d7cddc..6ca347a0717e 100644
1173 --- a/include/net/inetpeer.h
1174 +++ b/include/net/inetpeer.h
1175 @@ -178,16 +178,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
1176 /* can be called with or without local BH being disabled */
1177 static inline int inet_getid(struct inet_peer *p, int more)
1178 {
1179 - int old, new;
1180 more++;
1181 inet_peer_refcheck(p);
1182 - do {
1183 - old = atomic_read(&p->ip_id_count);
1184 - new = old + more;
1185 - if (!new)
1186 - new = 1;
1187 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
1188 - return new;
1189 + return atomic_add_return(more, &p->ip_id_count) - more;
1190 }
1191
1192 #endif /* _NET_INETPEER_H */
1193 diff --git a/include/net/sock.h b/include/net/sock.h
1194 index 8f32b779bc83..72f710d2f75a 100644
1195 --- a/include/net/sock.h
1196 +++ b/include/net/sock.h
1197 @@ -2248,6 +2248,11 @@ extern void sock_enable_timestamp(struct sock *sk, int flag);
1198 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1199 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1200
1201 +bool sk_ns_capable(const struct sock *sk,
1202 + struct user_namespace *user_ns, int cap);
1203 +bool sk_capable(const struct sock *sk, int cap);
1204 +bool sk_net_capable(const struct sock *sk, int cap);
1205 +
1206 /*
1207 * Enable debug/info messages
1208 */
1209 diff --git a/include/sound/core.h b/include/sound/core.h
1210 index 5bfe5136441c..97cd9c3592f7 100644
1211 --- a/include/sound/core.h
1212 +++ b/include/sound/core.h
1213 @@ -120,6 +120,8 @@ struct snd_card {
1214 int user_ctl_count; /* count of all user controls */
1215 struct list_head controls; /* all controls for this card */
1216 struct list_head ctl_files; /* active control files */
1217 + struct mutex user_ctl_lock; /* protects user controls against
1218 + concurrent access */
1219
1220 struct snd_info_entry *proc_root; /* root for soundcard specific files */
1221 struct snd_info_entry *proc_id; /* the card id */
1222 diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
1223 index 5759810e1c1b..21eed488783f 100644
1224 --- a/include/uapi/sound/compress_offload.h
1225 +++ b/include/uapi/sound/compress_offload.h
1226 @@ -80,7 +80,7 @@ struct snd_compr_tstamp {
1227 struct snd_compr_avail {
1228 __u64 avail;
1229 struct snd_compr_tstamp tstamp;
1230 -};
1231 +} __attribute__((packed));
1232
1233 enum snd_compr_direction {
1234 SND_COMPRESS_PLAYBACK = 0,
1235 diff --git a/kernel/audit.c b/kernel/audit.c
1236 index 6def25f1b351..a6c632757e57 100644
1237 --- a/kernel/audit.c
1238 +++ b/kernel/audit.c
1239 @@ -593,13 +593,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
1240 case AUDIT_TTY_SET:
1241 case AUDIT_TRIM:
1242 case AUDIT_MAKE_EQUIV:
1243 - if (!capable(CAP_AUDIT_CONTROL))
1244 + if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
1245 err = -EPERM;
1246 break;
1247 case AUDIT_USER:
1248 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
1249 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
1250 - if (!capable(CAP_AUDIT_WRITE))
1251 + if (!netlink_capable(skb, CAP_AUDIT_WRITE))
1252 err = -EPERM;
1253 break;
1254 default: /* bad msg */
1255 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1256 index 4251374578bc..67f7a2d2efbc 100644
1257 --- a/kernel/time/tick-sched.c
1258 +++ b/kernel/time/tick-sched.c
1259 @@ -720,8 +720,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
1260 return false;
1261 }
1262
1263 - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
1264 + if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
1265 + ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
1266 return false;
1267 + }
1268
1269 if (need_resched())
1270 return false;
1271 diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
1272 index 569985d522d5..8563081e8da3 100644
1273 --- a/lib/lzo/lzo1x_decompress_safe.c
1274 +++ b/lib/lzo/lzo1x_decompress_safe.c
1275 @@ -19,11 +19,31 @@
1276 #include <linux/lzo.h>
1277 #include "lzodefs.h"
1278
1279 -#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
1280 -#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
1281 -#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
1282 -#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
1283 -#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
1284 +#define HAVE_IP(t, x) \
1285 + (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
1286 + (((t + x) >= t) && ((t + x) >= x)))
1287 +
1288 +#define HAVE_OP(t, x) \
1289 + (((size_t)(op_end - op) >= (size_t)(t + x)) && \
1290 + (((t + x) >= t) && ((t + x) >= x)))
1291 +
1292 +#define NEED_IP(t, x) \
1293 + do { \
1294 + if (!HAVE_IP(t, x)) \
1295 + goto input_overrun; \
1296 + } while (0)
1297 +
1298 +#define NEED_OP(t, x) \
1299 + do { \
1300 + if (!HAVE_OP(t, x)) \
1301 + goto output_overrun; \
1302 + } while (0)
1303 +
1304 +#define TEST_LB(m_pos) \
1305 + do { \
1306 + if ((m_pos) < out) \
1307 + goto lookbehind_overrun; \
1308 + } while (0)
1309
1310 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1311 unsigned char *out, size_t *out_len)
1312 @@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1313 while (unlikely(*ip == 0)) {
1314 t += 255;
1315 ip++;
1316 - NEED_IP(1);
1317 + NEED_IP(1, 0);
1318 }
1319 t += 15 + *ip++;
1320 }
1321 t += 3;
1322 copy_literal_run:
1323 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1324 - if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
1325 + if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
1326 const unsigned char *ie = ip + t;
1327 unsigned char *oe = op + t;
1328 do {
1329 @@ -81,8 +101,8 @@ copy_literal_run:
1330 } else
1331 #endif
1332 {
1333 - NEED_OP(t);
1334 - NEED_IP(t + 3);
1335 + NEED_OP(t, 0);
1336 + NEED_IP(t, 3);
1337 do {
1338 *op++ = *ip++;
1339 } while (--t > 0);
1340 @@ -95,7 +115,7 @@ copy_literal_run:
1341 m_pos -= t >> 2;
1342 m_pos -= *ip++ << 2;
1343 TEST_LB(m_pos);
1344 - NEED_OP(2);
1345 + NEED_OP(2, 0);
1346 op[0] = m_pos[0];
1347 op[1] = m_pos[1];
1348 op += 2;
1349 @@ -119,10 +139,10 @@ copy_literal_run:
1350 while (unlikely(*ip == 0)) {
1351 t += 255;
1352 ip++;
1353 - NEED_IP(1);
1354 + NEED_IP(1, 0);
1355 }
1356 t += 31 + *ip++;
1357 - NEED_IP(2);
1358 + NEED_IP(2, 0);
1359 }
1360 m_pos = op - 1;
1361 next = get_unaligned_le16(ip);
1362 @@ -137,10 +157,10 @@ copy_literal_run:
1363 while (unlikely(*ip == 0)) {
1364 t += 255;
1365 ip++;
1366 - NEED_IP(1);
1367 + NEED_IP(1, 0);
1368 }
1369 t += 7 + *ip++;
1370 - NEED_IP(2);
1371 + NEED_IP(2, 0);
1372 }
1373 next = get_unaligned_le16(ip);
1374 ip += 2;
1375 @@ -154,7 +174,7 @@ copy_literal_run:
1376 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1377 if (op - m_pos >= 8) {
1378 unsigned char *oe = op + t;
1379 - if (likely(HAVE_OP(t + 15))) {
1380 + if (likely(HAVE_OP(t, 15))) {
1381 do {
1382 COPY8(op, m_pos);
1383 op += 8;
1384 @@ -164,7 +184,7 @@ copy_literal_run:
1385 m_pos += 8;
1386 } while (op < oe);
1387 op = oe;
1388 - if (HAVE_IP(6)) {
1389 + if (HAVE_IP(6, 0)) {
1390 state = next;
1391 COPY4(op, ip);
1392 op += next;
1393 @@ -172,7 +192,7 @@ copy_literal_run:
1394 continue;
1395 }
1396 } else {
1397 - NEED_OP(t);
1398 + NEED_OP(t, 0);
1399 do {
1400 *op++ = *m_pos++;
1401 } while (op < oe);
1402 @@ -181,7 +201,7 @@ copy_literal_run:
1403 #endif
1404 {
1405 unsigned char *oe = op + t;
1406 - NEED_OP(t);
1407 + NEED_OP(t, 0);
1408 op[0] = m_pos[0];
1409 op[1] = m_pos[1];
1410 op += 2;
1411 @@ -194,15 +214,15 @@ match_next:
1412 state = next;
1413 t = next;
1414 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1415 - if (likely(HAVE_IP(6) && HAVE_OP(4))) {
1416 + if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
1417 COPY4(op, ip);
1418 op += t;
1419 ip += t;
1420 } else
1421 #endif
1422 {
1423 - NEED_IP(t + 3);
1424 - NEED_OP(t);
1425 + NEED_IP(t, 3);
1426 + NEED_OP(t, 0);
1427 while (t > 0) {
1428 *op++ = *ip++;
1429 t--;
1430 diff --git a/lib/nlattr.c b/lib/nlattr.c
1431 index fc6754720ced..10ad042d01be 100644
1432 --- a/lib/nlattr.c
1433 +++ b/lib/nlattr.c
1434 @@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
1435 }
1436
1437 if (unlikely(rem > 0))
1438 - printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
1439 - "attributes.\n", rem);
1440 + pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
1441 + rem, current->comm);
1442
1443 err = 0;
1444 errout:
1445 diff --git a/net/can/gw.c b/net/can/gw.c
1446 index 3ee690e8c7d3..de25455b4e3e 100644
1447 --- a/net/can/gw.c
1448 +++ b/net/can/gw.c
1449 @@ -784,7 +784,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
1450 struct cgw_job *gwj;
1451 int err = 0;
1452
1453 - if (!capable(CAP_NET_ADMIN))
1454 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1455 return -EPERM;
1456
1457 if (nlmsg_len(nlh) < sizeof(*r))
1458 @@ -876,7 +876,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
1459 struct can_can_gw ccgw;
1460 int err = 0;
1461
1462 - if (!capable(CAP_NET_ADMIN))
1463 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1464 return -EPERM;
1465
1466 if (nlmsg_len(nlh) < sizeof(*r))
1467 diff --git a/net/core/dev.c b/net/core/dev.c
1468 index 56383a3e5d71..cca7ae0ba915 100644
1469 --- a/net/core/dev.c
1470 +++ b/net/core/dev.c
1471 @@ -5827,6 +5827,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
1472 /**
1473 * unregister_netdevice_many - unregister many devices
1474 * @head: list of devices
1475 + *
1476 + * Note: As most callers use a stack allocated list_head,
1477 + * we force a list_del() to make sure stack wont be corrupted later.
1478 */
1479 void unregister_netdevice_many(struct list_head *head)
1480 {
1481 @@ -5836,6 +5839,7 @@ void unregister_netdevice_many(struct list_head *head)
1482 rollback_registered_many(head);
1483 list_for_each_entry(dev, head, unreg_list)
1484 net_set_todo(dev);
1485 + list_del(head);
1486 }
1487 }
1488 EXPORT_SYMBOL(unregister_netdevice_many);
1489 @@ -6252,7 +6256,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
1490 }
1491 }
1492 unregister_netdevice_many(&dev_kill_list);
1493 - list_del(&dev_kill_list);
1494 rtnl_unlock();
1495 }
1496
1497 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1498 index 87ec574ffac8..ae43dd807bb2 100644
1499 --- a/net/core/rtnetlink.c
1500 +++ b/net/core/rtnetlink.c
1501 @@ -1044,6 +1044,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1502 struct nlattr *tb[IFLA_MAX+1];
1503 u32 ext_filter_mask = 0;
1504 int err;
1505 + int hdrlen;
1506
1507 s_h = cb->args[0];
1508 s_idx = cb->args[1];
1509 @@ -1051,8 +1052,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1510 rcu_read_lock();
1511 cb->seq = net->dev_base_seq;
1512
1513 - if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1514 - ifla_policy) >= 0) {
1515 + /* A hack to preserve kernel<->userspace interface.
1516 + * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1517 + * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1518 + * what iproute2 < v3.9.0 used.
1519 + * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1520 + * attribute, its netlink message is shorter than struct ifinfomsg.
1521 + */
1522 + hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1523 + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1524 +
1525 + if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1526
1527 if (tb[IFLA_EXT_MASK])
1528 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1529 @@ -1294,7 +1304,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
1530 return 0;
1531 }
1532
1533 -static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1534 +static int do_setlink(const struct sk_buff *skb,
1535 + struct net_device *dev, struct ifinfomsg *ifm,
1536 struct nlattr **tb, char *ifname, int modified)
1537 {
1538 const struct net_device_ops *ops = dev->netdev_ops;
1539 @@ -1306,7 +1317,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1540 err = PTR_ERR(net);
1541 goto errout;
1542 }
1543 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1544 + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1545 err = -EPERM;
1546 goto errout;
1547 }
1548 @@ -1560,7 +1571,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
1549 if (err < 0)
1550 goto errout;
1551
1552 - err = do_setlink(dev, ifm, tb, ifname, 0);
1553 + err = do_setlink(skb, dev, ifm, tb, ifname, 0);
1554 errout:
1555 return err;
1556 }
1557 @@ -1600,7 +1611,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
1558
1559 ops->dellink(dev, &list_kill);
1560 unregister_netdevice_many(&list_kill);
1561 - list_del(&list_kill);
1562 return 0;
1563 }
1564
1565 @@ -1678,7 +1688,8 @@ err:
1566 }
1567 EXPORT_SYMBOL(rtnl_create_link);
1568
1569 -static int rtnl_group_changelink(struct net *net, int group,
1570 +static int rtnl_group_changelink(const struct sk_buff *skb,
1571 + struct net *net, int group,
1572 struct ifinfomsg *ifm,
1573 struct nlattr **tb)
1574 {
1575 @@ -1687,7 +1698,7 @@ static int rtnl_group_changelink(struct net *net, int group,
1576
1577 for_each_netdev(net, dev) {
1578 if (dev->group == group) {
1579 - err = do_setlink(dev, ifm, tb, NULL, 0);
1580 + err = do_setlink(skb, dev, ifm, tb, NULL, 0);
1581 if (err < 0)
1582 return err;
1583 }
1584 @@ -1789,12 +1800,12 @@ replay:
1585 modified = 1;
1586 }
1587
1588 - return do_setlink(dev, ifm, tb, ifname, modified);
1589 + return do_setlink(skb, dev, ifm, tb, ifname, modified);
1590 }
1591
1592 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1593 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
1594 - return rtnl_group_changelink(net,
1595 + return rtnl_group_changelink(skb, net,
1596 nla_get_u32(tb[IFLA_GROUP]),
1597 ifm, tb);
1598 return -ENODEV;
1599 @@ -1906,9 +1917,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1600 struct nlattr *tb[IFLA_MAX+1];
1601 u32 ext_filter_mask = 0;
1602 u16 min_ifinfo_dump_size = 0;
1603 + int hdrlen;
1604 +
1605 + /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
1606 + hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
1607 + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1608
1609 - if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1610 - ifla_policy) >= 0) {
1611 + if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1612 if (tb[IFLA_EXT_MASK])
1613 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1614 }
1615 @@ -2179,7 +2194,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
1616 int err = -EINVAL;
1617 __u8 *addr;
1618
1619 - if (!capable(CAP_NET_ADMIN))
1620 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1621 return -EPERM;
1622
1623 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1624 @@ -2635,7 +2650,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1625 sz_idx = type>>2;
1626 kind = type&3;
1627
1628 - if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1629 + if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
1630 return -EPERM;
1631
1632 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
1633 diff --git a/net/core/sock.c b/net/core/sock.c
1634 index d743099250f4..af65d17517b8 100644
1635 --- a/net/core/sock.c
1636 +++ b/net/core/sock.c
1637 @@ -142,6 +142,55 @@
1638 static DEFINE_MUTEX(proto_list_mutex);
1639 static LIST_HEAD(proto_list);
1640
1641 +/**
1642 + * sk_ns_capable - General socket capability test
1643 + * @sk: Socket to use a capability on or through
1644 + * @user_ns: The user namespace of the capability to use
1645 + * @cap: The capability to use
1646 + *
1647 + * Test to see if the opener of the socket had when the socket was
1648 + * created and the current process has the capability @cap in the user
1649 + * namespace @user_ns.
1650 + */
1651 +bool sk_ns_capable(const struct sock *sk,
1652 + struct user_namespace *user_ns, int cap)
1653 +{
1654 + return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
1655 + ns_capable(user_ns, cap);
1656 +}
1657 +EXPORT_SYMBOL(sk_ns_capable);
1658 +
1659 +/**
1660 + * sk_capable - Socket global capability test
1661 + * @sk: Socket to use a capability on or through
1662 + * @cap: The global capbility to use
1663 + *
1664 + * Test to see if the opener of the socket had when the socket was
1665 + * created and the current process has the capability @cap in all user
1666 + * namespaces.
1667 + */
1668 +bool sk_capable(const struct sock *sk, int cap)
1669 +{
1670 + return sk_ns_capable(sk, &init_user_ns, cap);
1671 +}
1672 +EXPORT_SYMBOL(sk_capable);
1673 +
1674 +/**
1675 + * sk_net_capable - Network namespace socket capability test
1676 + * @sk: Socket to use a capability on or through
1677 + * @cap: The capability to use
1678 + *
1679 + * Test to see if the opener of the socket had when the socke was created
1680 + * and the current process has the capability @cap over the network namespace
1681 + * the socket is a member of.
1682 + */
1683 +bool sk_net_capable(const struct sock *sk, int cap)
1684 +{
1685 + return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
1686 +}
1687 +EXPORT_SYMBOL(sk_net_capable);
1688 +
1689 +
1690 #ifdef CONFIG_MEMCG_KMEM
1691 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
1692 {
1693 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
1694 index 6a7fae228634..c38e7a2b5a8e 100644
1695 --- a/net/core/sock_diag.c
1696 +++ b/net/core/sock_diag.c
1697 @@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
1698 }
1699 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
1700
1701 -int sock_diag_put_filterinfo(struct sock *sk,
1702 +int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
1703 struct sk_buff *skb, int attrtype)
1704 {
1705 struct nlattr *attr;
1706 @@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct sock *sk,
1707 unsigned int len;
1708 int err = 0;
1709
1710 - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1711 + if (!may_report_filterinfo) {
1712 nla_reserve(skb, attrtype, 0);
1713 return 0;
1714 }
1715 diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
1716 index 40d5829ed36a..1074ffb6d533 100644
1717 --- a/net/dcb/dcbnl.c
1718 +++ b/net/dcb/dcbnl.c
1719 @@ -1670,7 +1670,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
1720 struct nlmsghdr *reply_nlh = NULL;
1721 const struct reply_func *fn;
1722
1723 - if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
1724 + if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1725 return -EPERM;
1726
1727 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1728 diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
1729 index 7d9197063ebb..b5e52100a89a 100644
1730 --- a/net/decnet/dn_dev.c
1731 +++ b/net/decnet/dn_dev.c
1732 @@ -573,7 +573,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
1733 struct dn_ifaddr __rcu **ifap;
1734 int err = -EINVAL;
1735
1736 - if (!capable(CAP_NET_ADMIN))
1737 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1738 return -EPERM;
1739
1740 if (!net_eq(net, &init_net))
1741 @@ -617,7 +617,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
1742 struct dn_ifaddr *ifa;
1743 int err;
1744
1745 - if (!capable(CAP_NET_ADMIN))
1746 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1747 return -EPERM;
1748
1749 if (!net_eq(net, &init_net))
1750 diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
1751 index 57dc159245ec..d332aefb0846 100644
1752 --- a/net/decnet/dn_fib.c
1753 +++ b/net/decnet/dn_fib.c
1754 @@ -505,7 +505,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1755 struct nlattr *attrs[RTA_MAX+1];
1756 int err;
1757
1758 - if (!capable(CAP_NET_ADMIN))
1759 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1760 return -EPERM;
1761
1762 if (!net_eq(net, &init_net))
1763 @@ -530,7 +530,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1764 struct nlattr *attrs[RTA_MAX+1];
1765 int err;
1766
1767 - if (!capable(CAP_NET_ADMIN))
1768 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1769 return -EPERM;
1770
1771 if (!net_eq(net, &init_net))
1772 diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
1773 index 2a7efe388344..f3dc69a41d63 100644
1774 --- a/net/decnet/netfilter/dn_rtmsg.c
1775 +++ b/net/decnet/netfilter/dn_rtmsg.c
1776 @@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
1777 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
1778 return;
1779
1780 - if (!capable(CAP_NET_ADMIN))
1781 + if (!netlink_capable(skb, CAP_NET_ADMIN))
1782 RCV_SKB_FAIL(-EPERM);
1783
1784 /* Eventually we might send routing messages too */
1785 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
1786 index 19e36376d2a0..5f3dc1df04bf 100644
1787 --- a/net/ipv4/datagram.c
1788 +++ b/net/ipv4/datagram.c
1789 @@ -86,18 +86,26 @@ out:
1790 }
1791 EXPORT_SYMBOL(ip4_datagram_connect);
1792
1793 +/* Because UDP xmit path can manipulate sk_dst_cache without holding
1794 + * socket lock, we need to use sk_dst_set() here,
1795 + * even if we own the socket lock.
1796 + */
1797 void ip4_datagram_release_cb(struct sock *sk)
1798 {
1799 const struct inet_sock *inet = inet_sk(sk);
1800 const struct ip_options_rcu *inet_opt;
1801 __be32 daddr = inet->inet_daddr;
1802 + struct dst_entry *dst;
1803 struct flowi4 fl4;
1804 struct rtable *rt;
1805
1806 - if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
1807 - return;
1808 -
1809 rcu_read_lock();
1810 +
1811 + dst = __sk_dst_get(sk);
1812 + if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
1813 + rcu_read_unlock();
1814 + return;
1815 + }
1816 inet_opt = rcu_dereference(inet->inet_opt);
1817 if (inet_opt && inet_opt->opt.srr)
1818 daddr = inet_opt->opt.faddr;
1819 @@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
1820 inet->inet_saddr, inet->inet_dport,
1821 inet->inet_sport, sk->sk_protocol,
1822 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1823 - if (!IS_ERR(rt))
1824 - __sk_dst_set(sk, &rt->dst);
1825 +
1826 + dst = !IS_ERR(rt) ? &rt->dst : NULL;
1827 + sk_dst_set(sk, dst);
1828 +
1829 rcu_read_unlock();
1830 }
1831 EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
1832 diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
1833 index f5cc7b331511..897b784e9c05 100644
1834 --- a/net/ipv4/ipip.c
1835 +++ b/net/ipv4/ipip.c
1836 @@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
1837
1838 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1839 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1840 - t->dev->ifindex, 0, IPPROTO_IPIP, 0);
1841 + t->parms.link, 0, IPPROTO_IPIP, 0);
1842 err = 0;
1843 goto out;
1844 }
1845
1846 if (type == ICMP_REDIRECT) {
1847 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1848 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1849 IPPROTO_IPIP, 0);
1850 err = 0;
1851 goto out;
1852 @@ -483,4 +483,5 @@ static void __exit ipip_fini(void)
1853 module_init(ipip_init);
1854 module_exit(ipip_fini);
1855 MODULE_LICENSE("GPL");
1856 +MODULE_ALIAS_RTNL_LINK("ipip");
1857 MODULE_ALIAS_NETDEV("tunl0");
1858 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1859 index e15d330919af..ba7d2b7ad9f9 100644
1860 --- a/net/ipv4/tcp_input.c
1861 +++ b/net/ipv4/tcp_input.c
1862 @@ -2720,13 +2720,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
1863 bool recovered = !before(tp->snd_una, tp->high_seq);
1864
1865 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
1866 - if (flag & FLAG_ORIG_SACK_ACKED) {
1867 - /* Step 3.b. A timeout is spurious if not all data are
1868 - * lost, i.e., never-retransmitted data are (s)acked.
1869 - */
1870 - tcp_try_undo_loss(sk, true);
1871 + /* Step 3.b. A timeout is spurious if not all data are
1872 + * lost, i.e., never-retransmitted data are (s)acked.
1873 + */
1874 + if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
1875 return;
1876 - }
1877 +
1878 if (after(tp->snd_nxt, tp->high_seq) &&
1879 (flag & FLAG_DATA_SACKED || is_dupack)) {
1880 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
1881 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1882 index 73d7f68da557..a0ecdf596f2f 100644
1883 --- a/net/ipv6/ip6_tunnel.c
1884 +++ b/net/ipv6/ip6_tunnel.c
1885 @@ -61,6 +61,7 @@
1886 MODULE_AUTHOR("Ville Nuorvala");
1887 MODULE_DESCRIPTION("IPv6 tunneling device");
1888 MODULE_LICENSE("GPL");
1889 +MODULE_ALIAS_RTNL_LINK("ip6tnl");
1890 MODULE_ALIAS_NETDEV("ip6tnl0");
1891
1892 #ifdef IP6_TNL_DEBUG
1893 diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
1894 index c2e73e647e44..3d2c81a66d6a 100644
1895 --- a/net/ipv6/output_core.c
1896 +++ b/net/ipv6/output_core.c
1897 @@ -9,7 +9,7 @@
1898 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
1899 {
1900 static atomic_t ipv6_fragmentation_id;
1901 - int old, new;
1902 + int ident;
1903
1904 #if IS_ENABLED(CONFIG_IPV6)
1905 if (rt && !(rt->dst.flags & DST_NOPEER)) {
1906 @@ -25,13 +25,8 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
1907 }
1908 }
1909 #endif
1910 - do {
1911 - old = atomic_read(&ipv6_fragmentation_id);
1912 - new = old + 1;
1913 - if (!new)
1914 - new = 1;
1915 - } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
1916 - fhdr->identification = htonl(new);
1917 + ident = atomic_inc_return(&ipv6_fragmentation_id);
1918 + fhdr->identification = htonl(ident);
1919 }
1920 EXPORT_SYMBOL(ipv6_select_ident);
1921
1922 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1923 index 620d326e8fdd..540d58921007 100644
1924 --- a/net/ipv6/sit.c
1925 +++ b/net/ipv6/sit.c
1926 @@ -530,12 +530,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
1927
1928 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1929 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1930 - t->dev->ifindex, 0, IPPROTO_IPV6, 0);
1931 + t->parms.link, 0, IPPROTO_IPV6, 0);
1932 err = 0;
1933 goto out;
1934 }
1935 if (type == ICMP_REDIRECT) {
1936 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1937 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1938 IPPROTO_IPV6, 0);
1939 err = 0;
1940 goto out;
1941 @@ -1654,4 +1654,5 @@ xfrm_tunnel_failed:
1942 module_init(sit_init);
1943 module_exit(sit_cleanup);
1944 MODULE_LICENSE("GPL");
1945 +MODULE_ALIAS_RTNL_LINK("sit");
1946 MODULE_ALIAS_NETDEV("sit0");
1947 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
1948 index 514e90f470bf..2c64ab27b515 100644
1949 --- a/net/mac80211/iface.c
1950 +++ b/net/mac80211/iface.c
1951 @@ -1746,7 +1746,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1952 }
1953 mutex_unlock(&local->iflist_mtx);
1954 unregister_netdevice_many(&unreg_list);
1955 - list_del(&unreg_list);
1956
1957 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1958 list_del(&sdata->list);
1959 diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
1960 index 572d87dc116f..0a03662bfbef 100644
1961 --- a/net/netfilter/nfnetlink.c
1962 +++ b/net/netfilter/nfnetlink.c
1963 @@ -147,7 +147,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1964 const struct nfnetlink_subsystem *ss;
1965 int type, err;
1966
1967 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1968 + if (!netlink_net_capable(skb, CAP_NET_ADMIN))
1969 return -EPERM;
1970
1971 /* All the messages must at least contain nfgenmsg */
1972 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1973 index c9c2a8441d32..be34adde692f 100644
1974 --- a/net/netlink/af_netlink.c
1975 +++ b/net/netlink/af_netlink.c
1976 @@ -1219,7 +1219,74 @@ retry:
1977 return err;
1978 }
1979
1980 -static inline int netlink_capable(const struct socket *sock, unsigned int flag)
1981 +/**
1982 + * __netlink_ns_capable - General netlink message capability test
1983 + * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1984 + * @user_ns: The user namespace of the capability to use
1985 + * @cap: The capability to use
1986 + *
1987 + * Test to see if the opener of the socket we received the message
1988 + * from had when the netlink socket was created and the sender of the
1989 + * message has has the capability @cap in the user namespace @user_ns.
1990 + */
1991 +bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1992 + struct user_namespace *user_ns, int cap)
1993 +{
1994 + return ((nsp->flags & NETLINK_SKB_DST) ||
1995 + file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1996 + ns_capable(user_ns, cap);
1997 +}
1998 +EXPORT_SYMBOL(__netlink_ns_capable);
1999 +
2000 +/**
2001 + * netlink_ns_capable - General netlink message capability test
2002 + * @skb: socket buffer holding a netlink command from userspace
2003 + * @user_ns: The user namespace of the capability to use
2004 + * @cap: The capability to use
2005 + *
2006 + * Test to see if the opener of the socket we received the message
2007 + * from had when the netlink socket was created and the sender of the
2008 + * message has has the capability @cap in the user namespace @user_ns.
2009 + */
2010 +bool netlink_ns_capable(const struct sk_buff *skb,
2011 + struct user_namespace *user_ns, int cap)
2012 +{
2013 + return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
2014 +}
2015 +EXPORT_SYMBOL(netlink_ns_capable);
2016 +
2017 +/**
2018 + * netlink_capable - Netlink global message capability test
2019 + * @skb: socket buffer holding a netlink command from userspace
2020 + * @cap: The capability to use
2021 + *
2022 + * Test to see if the opener of the socket we received the message
2023 + * from had when the netlink socket was created and the sender of the
2024 + * message has has the capability @cap in all user namespaces.
2025 + */
2026 +bool netlink_capable(const struct sk_buff *skb, int cap)
2027 +{
2028 + return netlink_ns_capable(skb, &init_user_ns, cap);
2029 +}
2030 +EXPORT_SYMBOL(netlink_capable);
2031 +
2032 +/**
2033 + * netlink_net_capable - Netlink network namespace message capability test
2034 + * @skb: socket buffer holding a netlink command from userspace
2035 + * @cap: The capability to use
2036 + *
2037 + * Test to see if the opener of the socket we received the message
2038 + * from had when the netlink socket was created and the sender of the
2039 + * message has has the capability @cap over the network namespace of
2040 + * the socket we received the message from.
2041 + */
2042 +bool netlink_net_capable(const struct sk_buff *skb, int cap)
2043 +{
2044 + return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
2045 +}
2046 +EXPORT_SYMBOL(netlink_net_capable);
2047 +
2048 +static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
2049 {
2050 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
2051 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
2052 @@ -1287,7 +1354,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2053
2054 /* Only superuser is allowed to listen multicasts */
2055 if (nladdr->nl_groups) {
2056 - if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
2057 + if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2058 return -EPERM;
2059 err = netlink_realloc_groups(sk);
2060 if (err)
2061 @@ -1349,7 +1416,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
2062 return -EINVAL;
2063
2064 /* Only superuser is allowed to send multicasts */
2065 - if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
2066 + if (nladdr->nl_groups && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2067 return -EPERM;
2068
2069 if (!nlk->portid)
2070 @@ -1921,7 +1988,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2071 break;
2072 case NETLINK_ADD_MEMBERSHIP:
2073 case NETLINK_DROP_MEMBERSHIP: {
2074 - if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
2075 + if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2076 return -EPERM;
2077 err = netlink_realloc_groups(sk);
2078 if (err)
2079 @@ -2053,6 +2120,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2080 struct sk_buff *skb;
2081 int err;
2082 struct scm_cookie scm;
2083 + u32 netlink_skb_flags = 0;
2084
2085 if (msg->msg_flags&MSG_OOB)
2086 return -EOPNOTSUPP;
2087 @@ -2072,8 +2140,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2088 dst_group = ffs(addr->nl_groups);
2089 err = -EPERM;
2090 if ((dst_group || dst_portid) &&
2091 - !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
2092 + !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2093 goto out;
2094 + netlink_skb_flags |= NETLINK_SKB_DST;
2095 } else {
2096 dst_portid = nlk->dst_portid;
2097 dst_group = nlk->dst_group;
2098 @@ -2103,6 +2172,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2099 NETLINK_CB(skb).portid = nlk->portid;
2100 NETLINK_CB(skb).dst_group = dst_group;
2101 NETLINK_CB(skb).creds = siocb->scm->creds;
2102 + NETLINK_CB(skb).flags = netlink_skb_flags;
2103
2104 err = -EFAULT;
2105 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2106 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
2107 index 393f17eea1a2..ade434b8abd8 100644
2108 --- a/net/netlink/genetlink.c
2109 +++ b/net/netlink/genetlink.c
2110 @@ -592,7 +592,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
2111 return -EOPNOTSUPP;
2112
2113 if ((ops->flags & GENL_ADMIN_PERM) &&
2114 - !capable(CAP_NET_ADMIN))
2115 + !netlink_capable(skb, CAP_NET_ADMIN))
2116 return -EPERM;
2117
2118 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2119 diff --git a/net/packet/diag.c b/net/packet/diag.c
2120 index ec8b6e8a80b1..674b0a65df6c 100644
2121 --- a/net/packet/diag.c
2122 +++ b/net/packet/diag.c
2123 @@ -127,6 +127,7 @@ static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
2124
2125 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
2126 struct packet_diag_req *req,
2127 + bool may_report_filterinfo,
2128 struct user_namespace *user_ns,
2129 u32 portid, u32 seq, u32 flags, int sk_ino)
2130 {
2131 @@ -171,7 +172,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
2132 goto out_nlmsg_trim;
2133
2134 if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
2135 - sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
2136 + sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
2137 + PACKET_DIAG_FILTER))
2138 goto out_nlmsg_trim;
2139
2140 return nlmsg_end(skb, nlh);
2141 @@ -187,9 +189,11 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
2142 struct packet_diag_req *req;
2143 struct net *net;
2144 struct sock *sk;
2145 + bool may_report_filterinfo;
2146
2147 net = sock_net(skb->sk);
2148 req = nlmsg_data(cb->nlh);
2149 + may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
2150
2151 mutex_lock(&net->packet.sklist_lock);
2152 sk_for_each(sk, &net->packet.sklist) {
2153 @@ -199,6 +203,7 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
2154 goto next;
2155
2156 if (sk_diag_fill(sk, skb, req,
2157 + may_report_filterinfo,
2158 sk_user_ns(NETLINK_CB(cb->skb).sk),
2159 NETLINK_CB(cb->skb).portid,
2160 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2161 diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
2162 index dc15f4300808..b64151ade6b3 100644
2163 --- a/net/phonet/pn_netlink.c
2164 +++ b/net/phonet/pn_netlink.c
2165 @@ -70,10 +70,10 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
2166 int err;
2167 u8 pnaddr;
2168
2169 - if (!capable(CAP_NET_ADMIN))
2170 + if (!netlink_capable(skb, CAP_NET_ADMIN))
2171 return -EPERM;
2172
2173 - if (!capable(CAP_SYS_ADMIN))
2174 + if (!netlink_capable(skb, CAP_SYS_ADMIN))
2175 return -EPERM;
2176
2177 ASSERT_RTNL();
2178 @@ -233,10 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
2179 int err;
2180 u8 dst;
2181
2182 - if (!capable(CAP_NET_ADMIN))
2183 + if (!netlink_capable(skb, CAP_NET_ADMIN))
2184 return -EPERM;
2185
2186 - if (!capable(CAP_SYS_ADMIN))
2187 + if (!netlink_capable(skb, CAP_SYS_ADMIN))
2188 return -EPERM;
2189
2190 ASSERT_RTNL();
2191 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
2192 index fd7072827a40..15d46b9166de 100644
2193 --- a/net/sched/act_api.c
2194 +++ b/net/sched/act_api.c
2195 @@ -989,7 +989,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
2196 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
2197 int ret = 0, ovr = 0;
2198
2199 - if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
2200 + if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
2201 return -EPERM;
2202
2203 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
2204 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
2205 index 8e118af90973..2ea40d1877a6 100644
2206 --- a/net/sched/cls_api.c
2207 +++ b/net/sched/cls_api.c
2208 @@ -138,7 +138,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
2209 int err;
2210 int tp_created = 0;
2211
2212 - if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
2213 + if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
2214 return -EPERM;
2215
2216 replay:
2217 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2218 index 51b968d3febb..2d2f07945c85 100644
2219 --- a/net/sched/sch_api.c
2220 +++ b/net/sched/sch_api.c
2221 @@ -1024,7 +1024,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
2222 struct Qdisc *p = NULL;
2223 int err;
2224
2225 - if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
2226 + if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN))
2227 return -EPERM;
2228
2229 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
2230 @@ -1091,7 +1091,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
2231 struct Qdisc *q, *p;
2232 int err;
2233
2234 - if (!capable(CAP_NET_ADMIN))
2235 + if (!netlink_capable(skb, CAP_NET_ADMIN))
2236 return -EPERM;
2237
2238 replay:
2239 @@ -1431,7 +1431,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
2240 u32 qid;
2241 int err;
2242
2243 - if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
2244 + if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN))
2245 return -EPERM;
2246
2247 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
2248 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
2249 index 91cfd8f94a19..229b3c3fb6c9 100644
2250 --- a/net/sctp/associola.c
2251 +++ b/net/sctp/associola.c
2252 @@ -387,7 +387,7 @@ void sctp_association_free(struct sctp_association *asoc)
2253 /* Only real associations count against the endpoint, so
2254 * don't bother for if this is a temporary association.
2255 */
2256 - if (!asoc->temp) {
2257 + if (!list_empty(&asoc->asocs)) {
2258 list_del(&asoc->asocs);
2259
2260 /* Decrement the backlog value for a TCP-style listening
2261 diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
2262 index 8bcd4985d0fb..1e6081fb6078 100644
2263 --- a/net/tipc/netlink.c
2264 +++ b/net/tipc/netlink.c
2265 @@ -47,7 +47,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
2266 int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
2267 u16 cmd;
2268
2269 - if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
2270 + if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
2271 cmd = TIPC_CMD_NOT_NET_ADMIN;
2272 else
2273 cmd = req_userhdr->cmd;
2274 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
2275 index 3f565e495ac6..7a70a5a5671a 100644
2276 --- a/net/xfrm/xfrm_user.c
2277 +++ b/net/xfrm/xfrm_user.c
2278 @@ -2362,7 +2362,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2279 link = &xfrm_dispatch[type];
2280
2281 /* All operations require privileges, even GET */
2282 - if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2283 + if (!netlink_net_capable(skb, CAP_NET_ADMIN))
2284 return -EPERM;
2285
2286 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2287 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
2288 index cdbde1762189..b9b2bebeb350 100644
2289 --- a/security/integrity/evm/evm_main.c
2290 +++ b/security/integrity/evm/evm_main.c
2291 @@ -275,12 +275,20 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
2292 * @xattr_value: pointer to the new extended attribute value
2293 * @xattr_value_len: pointer to the new extended attribute value length
2294 *
2295 - * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
2296 - * the current value is valid.
2297 + * Before allowing the 'security.evm' protected xattr to be updated,
2298 + * verify the existing value is valid. As only the kernel should have
2299 + * access to the EVM encrypted key needed to calculate the HMAC, prevent
2300 + * userspace from writing HMAC value. Writing 'security.evm' requires
2301 + * requires CAP_SYS_ADMIN privileges.
2302 */
2303 int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
2304 const void *xattr_value, size_t xattr_value_len)
2305 {
2306 + const struct evm_ima_xattr_data *xattr_data = xattr_value;
2307 +
2308 + if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
2309 + && (xattr_data->type == EVM_XATTR_HMAC))
2310 + return -EPERM;
2311 return evm_protect_xattr(dentry, xattr_name, xattr_value,
2312 xattr_value_len);
2313 }
2314 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
2315 index a02e0791cf15..9da974c0f958 100644
2316 --- a/security/integrity/ima/ima_crypto.c
2317 +++ b/security/integrity/ima/ima_crypto.c
2318 @@ -24,6 +24,36 @@
2319
2320 static struct crypto_shash *ima_shash_tfm;
2321
2322 +/**
2323 + * ima_kernel_read - read file content
2324 + *
2325 + * This is a function for reading file content instead of kernel_read().
2326 + * It does not perform locking checks to ensure it cannot be blocked.
2327 + * It does not perform security checks because it is irrelevant for IMA.
2328 + *
2329 + */
2330 +static int ima_kernel_read(struct file *file, loff_t offset,
2331 + char *addr, unsigned long count)
2332 +{
2333 + mm_segment_t old_fs;
2334 + char __user *buf = addr;
2335 + ssize_t ret;
2336 +
2337 + if (!(file->f_mode & FMODE_READ))
2338 + return -EBADF;
2339 + if (!file->f_op->read && !file->f_op->aio_read)
2340 + return -EINVAL;
2341 +
2342 + old_fs = get_fs();
2343 + set_fs(get_ds());
2344 + if (file->f_op->read)
2345 + ret = file->f_op->read(file, buf, count, &offset);
2346 + else
2347 + ret = do_sync_read(file, buf, count, &offset);
2348 + set_fs(old_fs);
2349 + return ret;
2350 +}
2351 +
2352 int ima_init_crypto(void)
2353 {
2354 long rc;
2355 @@ -70,7 +100,7 @@ int ima_calc_file_hash(struct file *file, char *digest)
2356 while (offset < i_size) {
2357 int rbuf_len;
2358
2359 - rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
2360 + rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
2361 if (rbuf_len < 0) {
2362 rc = rbuf_len;
2363 break;
2364 diff --git a/sound/core/control.c b/sound/core/control.c
2365 index d8aa206e8bde..98a29b26c5f4 100644
2366 --- a/sound/core/control.c
2367 +++ b/sound/core/control.c
2368 @@ -289,6 +289,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
2369 {
2370 struct snd_kcontrol *kctl;
2371
2372 + /* Make sure that the ids assigned to the control do not wrap around */
2373 + if (card->last_numid >= UINT_MAX - count)
2374 + card->last_numid = 0;
2375 +
2376 list_for_each_entry(kctl, &card->controls, list) {
2377 if (kctl->id.numid < card->last_numid + 1 + count &&
2378 kctl->id.numid + kctl->count > card->last_numid + 1) {
2379 @@ -331,6 +335,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2380 {
2381 struct snd_ctl_elem_id id;
2382 unsigned int idx;
2383 + unsigned int count;
2384 int err = -EINVAL;
2385
2386 if (! kcontrol)
2387 @@ -338,6 +343,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2388 if (snd_BUG_ON(!card || !kcontrol->info))
2389 goto error;
2390 id = kcontrol->id;
2391 + if (id.index > UINT_MAX - kcontrol->count)
2392 + goto error;
2393 +
2394 down_write(&card->controls_rwsem);
2395 if (snd_ctl_find_id(card, &id)) {
2396 up_write(&card->controls_rwsem);
2397 @@ -359,8 +367,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2398 card->controls_count += kcontrol->count;
2399 kcontrol->id.numid = card->last_numid + 1;
2400 card->last_numid += kcontrol->count;
2401 + count = kcontrol->count;
2402 up_write(&card->controls_rwsem);
2403 - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2404 + for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2405 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2406 return 0;
2407
2408 @@ -389,6 +398,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
2409 bool add_on_replace)
2410 {
2411 struct snd_ctl_elem_id id;
2412 + unsigned int count;
2413 unsigned int idx;
2414 struct snd_kcontrol *old;
2415 int ret;
2416 @@ -424,8 +434,9 @@ add:
2417 card->controls_count += kcontrol->count;
2418 kcontrol->id.numid = card->last_numid + 1;
2419 card->last_numid += kcontrol->count;
2420 + count = kcontrol->count;
2421 up_write(&card->controls_rwsem);
2422 - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2423 + for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2424 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2425 return 0;
2426
2427 @@ -898,9 +909,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
2428 result = kctl->put(kctl, control);
2429 }
2430 if (result > 0) {
2431 + struct snd_ctl_elem_id id = control->id;
2432 up_read(&card->controls_rwsem);
2433 - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
2434 - &control->id);
2435 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
2436 return 0;
2437 }
2438 }
2439 @@ -992,6 +1003,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
2440
2441 struct user_element {
2442 struct snd_ctl_elem_info info;
2443 + struct snd_card *card;
2444 void *elem_data; /* element data */
2445 unsigned long elem_data_size; /* size of element data in bytes */
2446 void *tlv_data; /* TLV data */
2447 @@ -1035,7 +1047,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
2448 {
2449 struct user_element *ue = kcontrol->private_data;
2450
2451 + mutex_lock(&ue->card->user_ctl_lock);
2452 memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
2453 + mutex_unlock(&ue->card->user_ctl_lock);
2454 return 0;
2455 }
2456
2457 @@ -1044,10 +1058,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
2458 {
2459 int change;
2460 struct user_element *ue = kcontrol->private_data;
2461 -
2462 +
2463 + mutex_lock(&ue->card->user_ctl_lock);
2464 change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
2465 if (change)
2466 memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
2467 + mutex_unlock(&ue->card->user_ctl_lock);
2468 return change;
2469 }
2470
2471 @@ -1067,19 +1083,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
2472 new_data = memdup_user(tlv, size);
2473 if (IS_ERR(new_data))
2474 return PTR_ERR(new_data);
2475 + mutex_lock(&ue->card->user_ctl_lock);
2476 change = ue->tlv_data_size != size;
2477 if (!change)
2478 change = memcmp(ue->tlv_data, new_data, size);
2479 kfree(ue->tlv_data);
2480 ue->tlv_data = new_data;
2481 ue->tlv_data_size = size;
2482 + mutex_unlock(&ue->card->user_ctl_lock);
2483 } else {
2484 - if (! ue->tlv_data_size || ! ue->tlv_data)
2485 - return -ENXIO;
2486 - if (size < ue->tlv_data_size)
2487 - return -ENOSPC;
2488 + int ret = 0;
2489 +
2490 + mutex_lock(&ue->card->user_ctl_lock);
2491 + if (!ue->tlv_data_size || !ue->tlv_data) {
2492 + ret = -ENXIO;
2493 + goto err_unlock;
2494 + }
2495 + if (size < ue->tlv_data_size) {
2496 + ret = -ENOSPC;
2497 + goto err_unlock;
2498 + }
2499 if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
2500 - return -EFAULT;
2501 + ret = -EFAULT;
2502 +err_unlock:
2503 + mutex_unlock(&ue->card->user_ctl_lock);
2504 + if (ret)
2505 + return ret;
2506 }
2507 return change;
2508 }
2509 @@ -1137,8 +1166,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2510 struct user_element *ue;
2511 int idx, err;
2512
2513 - if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
2514 - return -ENOMEM;
2515 if (info->count < 1)
2516 return -EINVAL;
2517 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
2518 @@ -1147,21 +1174,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2519 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
2520 info->id.numid = 0;
2521 memset(&kctl, 0, sizeof(kctl));
2522 - down_write(&card->controls_rwsem);
2523 - _kctl = snd_ctl_find_id(card, &info->id);
2524 - err = 0;
2525 - if (_kctl) {
2526 - if (replace)
2527 - err = snd_ctl_remove(card, _kctl);
2528 - else
2529 - err = -EBUSY;
2530 - } else {
2531 - if (replace)
2532 - err = -ENOENT;
2533 +
2534 + if (replace) {
2535 + err = snd_ctl_remove_user_ctl(file, &info->id);
2536 + if (err)
2537 + return err;
2538 }
2539 - up_write(&card->controls_rwsem);
2540 - if (err < 0)
2541 - return err;
2542 +
2543 + if (card->user_ctl_count >= MAX_USER_CONTROLS)
2544 + return -ENOMEM;
2545 +
2546 memcpy(&kctl.id, &info->id, sizeof(info->id));
2547 kctl.count = info->owner ? info->owner : 1;
2548 access |= SNDRV_CTL_ELEM_ACCESS_USER;
2549 @@ -1211,6 +1233,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2550 ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
2551 if (ue == NULL)
2552 return -ENOMEM;
2553 + ue->card = card;
2554 ue->info = *info;
2555 ue->info.access = 0;
2556 ue->elem_data = (char *)ue + sizeof(*ue);
2557 @@ -1322,8 +1345,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
2558 }
2559 err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
2560 if (err > 0) {
2561 + struct snd_ctl_elem_id id = kctl->id;
2562 up_read(&card->controls_rwsem);
2563 - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
2564 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
2565 return 0;
2566 }
2567 } else {
2568 diff --git a/sound/core/init.c b/sound/core/init.c
2569 index 6ef06400dfc8..27791a58e448 100644
2570 --- a/sound/core/init.c
2571 +++ b/sound/core/init.c
2572 @@ -208,6 +208,7 @@ int snd_card_create(int idx, const char *xid,
2573 INIT_LIST_HEAD(&card->devices);
2574 init_rwsem(&card->controls_rwsem);
2575 rwlock_init(&card->ctl_files_rwlock);
2576 + mutex_init(&card->user_ctl_lock);
2577 INIT_LIST_HEAD(&card->controls);
2578 INIT_LIST_HEAD(&card->ctl_files);
2579 spin_lock_init(&card->files_lock);
2580 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2581 index 0923f09df503..0b85e857f1c7 100644
2582 --- a/sound/pci/hda/patch_realtek.c
2583 +++ b/sound/pci/hda/patch_realtek.c
2584 @@ -3356,6 +3356,7 @@ enum {
2585 ALC269_FIXUP_STEREO_DMIC,
2586 ALC269_FIXUP_QUANTA_MUTE,
2587 ALC269_FIXUP_LIFEBOOK,
2588 + ALC269_FIXUP_LIFEBOOK_EXTMIC,
2589 ALC269_FIXUP_AMIC,
2590 ALC269_FIXUP_DMIC,
2591 ALC269VB_FIXUP_AMIC,
2592 @@ -3463,6 +3464,13 @@ static const struct hda_fixup alc269_fixups[] = {
2593 .chained = true,
2594 .chain_id = ALC269_FIXUP_QUANTA_MUTE
2595 },
2596 + [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
2597 + .type = HDA_FIXUP_PINS,
2598 + .v.pins = (const struct hda_pintbl[]) {
2599 + { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
2600 + { }
2601 + },
2602 + },
2603 [ALC269_FIXUP_AMIC] = {
2604 .type = HDA_FIXUP_PINS,
2605 .v.pins = (const struct hda_pintbl[]) {
2606 @@ -3713,6 +3721,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2607 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
2608 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
2609 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
2610 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2611 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
2612 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
2613 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
2614 @@ -4664,6 +4673,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
2615 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
2616 { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
2617 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
2618 + { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
2619 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
2620 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
2621 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
2622 diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
2623 index 4fdcc1cefc25..9b7746c9546f 100644
2624 --- a/sound/soc/codecs/max98090.c
2625 +++ b/sound/soc/codecs/max98090.c
2626 @@ -255,6 +255,7 @@ static struct reg_default max98090_reg[] = {
2627 static bool max98090_volatile_register(struct device *dev, unsigned int reg)
2628 {
2629 switch (reg) {
2630 + case M98090_REG_SOFTWARE_RESET:
2631 case M98090_REG_DEVICE_STATUS:
2632 case M98090_REG_JACK_STATUS:
2633 case M98090_REG_REVISION_ID:
2634 @@ -2343,6 +2344,8 @@ static int max98090_runtime_resume(struct device *dev)
2635
2636 regcache_cache_only(max98090->regmap, false);
2637
2638 + max98090_reset(max98090);
2639 +
2640 regcache_sync(max98090->regmap);
2641
2642 return 0;