Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.15/0101-3.15.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2445 - (show annotations) (download)
Fri Jun 27 08:39:52 2014 UTC (9 years, 10 months ago) by niro
File size: 81015 byte(s)
-linux-3.15.2
1 diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
2 index f1c5cc9d17a8..4c3efe434806 100644
3 --- a/Documentation/ABI/testing/ima_policy
4 +++ b/Documentation/ABI/testing/ima_policy
5 @@ -23,7 +23,7 @@ Description:
6 [fowner]]
7 lsm: [[subj_user=] [subj_role=] [subj_type=]
8 [obj_user=] [obj_role=] [obj_type=]]
9 - option: [[appraise_type=]]
10 + option: [[appraise_type=]] [permit_directio]
11
12 base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
13 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
14 diff --git a/Makefile b/Makefile
15 index e2846acd2841..475e0853a2f4 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 15
21 -SUBLEVEL = 1
22 +SUBLEVEL = 2
23 EXTRAVERSION =
24 NAME = Shuffling Zombie Juror
25
26 diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
27 index 2ba694f9626b..f8bc3511a8c8 100644
28 --- a/arch/arm/mach-at91/sysirq_mask.c
29 +++ b/arch/arm/mach-at91/sysirq_mask.c
30 @@ -25,24 +25,28 @@
31
32 #include "generic.h"
33
34 -#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
35 -#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
36 +#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
37 +#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
38 +#define AT91_RTC_IRQ_MASK 0x1f /* Available IRQs mask */
39
40 void __init at91_sysirq_mask_rtc(u32 rtc_base)
41 {
42 void __iomem *base;
43 - u32 mask;
44
45 base = ioremap(rtc_base, 64);
46 if (!base)
47 return;
48
49 - mask = readl_relaxed(base + AT91_RTC_IMR);
50 - if (mask) {
51 - pr_info("AT91: Disabling rtc irq\n");
52 - writel_relaxed(mask, base + AT91_RTC_IDR);
53 - (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
54 - }
55 + /*
56 + * sam9x5 SoCs have the following errata:
57 + * "RTC: Interrupt Mask Register cannot be used
58 + * Interrupt Mask Register read always returns 0."
59 + *
60 + * Hence we're not relying on IMR values to disable
61 + * interrupts.
62 + */
63 + writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
64 + (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
65
66 iounmap(base);
67 }
68 diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
69 index da5186fbd77a..5efce56f0df0 100644
70 --- a/arch/mips/kvm/kvm_mips.c
71 +++ b/arch/mips/kvm/kvm_mips.c
72 @@ -304,7 +304,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
73 if (cpu_has_veic || cpu_has_vint) {
74 size = 0x200 + VECTORSPACING * 64;
75 } else {
76 - size = 0x200;
77 + size = 0x4000;
78 }
79
80 /* Save Linux EBASE */
81 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
82 index 200a8f9390b6..0c734baea2d4 100644
83 --- a/arch/s390/kvm/interrupt.c
84 +++ b/arch/s390/kvm/interrupt.c
85 @@ -900,7 +900,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
86 return 0;
87 }
88
89 -static void clear_floating_interrupts(struct kvm *kvm)
90 +void kvm_s390_clear_float_irqs(struct kvm *kvm)
91 {
92 struct kvm_s390_float_interrupt *fi;
93 struct kvm_s390_interrupt_info *n, *inti = NULL;
94 @@ -1246,7 +1246,7 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
95 break;
96 case KVM_DEV_FLIC_CLEAR_IRQS:
97 r = 0;
98 - clear_floating_interrupts(dev->kvm);
99 + kvm_s390_clear_float_irqs(dev->kvm);
100 break;
101 case KVM_DEV_FLIC_APF_ENABLE:
102 dev->kvm->arch.gmap->pfault_enabled = 1;
103 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
104 index 9ae6664ff08c..6c3699ec998e 100644
105 --- a/arch/s390/kvm/kvm-s390.c
106 +++ b/arch/s390/kvm/kvm-s390.c
107 @@ -322,6 +322,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
108 {
109 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
110 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
111 + kvm_s390_clear_local_irqs(vcpu);
112 kvm_clear_async_pf_completion_queue(vcpu);
113 if (!kvm_is_ucontrol(vcpu->kvm)) {
114 clear_bit(63 - vcpu->vcpu_id,
115 @@ -372,6 +373,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
116 if (!kvm_is_ucontrol(kvm))
117 gmap_free(kvm->arch.gmap);
118 kvm_s390_destroy_adapters(kvm);
119 + kvm_s390_clear_float_irqs(kvm);
120 }
121
122 /* Section: vcpu related */
123 diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
124 index 3c1e2274d9ea..604872125309 100644
125 --- a/arch/s390/kvm/kvm-s390.h
126 +++ b/arch/s390/kvm/kvm-s390.h
127 @@ -130,6 +130,7 @@ void kvm_s390_tasklet(unsigned long parm);
128 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
129 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
130 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
131 +void kvm_s390_clear_float_irqs(struct kvm *kvm);
132 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
133 struct kvm_s390_interrupt *s390int);
134 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
135 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
136 index a82c6b2a9780..49cee4af16f4 100644
137 --- a/arch/sparc/net/bpf_jit_comp.c
138 +++ b/arch/sparc/net/bpf_jit_comp.c
139 @@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
140 #define BNE (F2(0, 2) | CONDNE)
141
142 #ifdef CONFIG_SPARC64
143 -#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
144 +#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
145 #else
146 -#define BNE_PTR BNE
147 +#define BE_PTR BE
148 #endif
149
150 #define SETHI(K, REG) \
151 @@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
152 case BPF_S_ANC_IFINDEX:
153 emit_skb_loadptr(dev, r_A);
154 emit_cmpi(r_A, 0);
155 - emit_branch(BNE_PTR, cleanup_addr + 4);
156 + emit_branch(BE_PTR, cleanup_addr + 4);
157 emit_nop();
158 emit_load32(r_A, struct net_device, ifindex, r_A);
159 break;
160 @@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
161 case BPF_S_ANC_HATYPE:
162 emit_skb_loadptr(dev, r_A);
163 emit_cmpi(r_A, 0);
164 - emit_branch(BNE_PTR, cleanup_addr + 4);
165 + emit_branch(BE_PTR, cleanup_addr + 4);
166 emit_nop();
167 emit_load16(r_A, struct net_device, type, r_A);
168 break;
169 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
170 index 9736529ade08..006911858174 100644
171 --- a/arch/x86/kvm/lapic.c
172 +++ b/arch/x86/kvm/lapic.c
173 @@ -360,6 +360,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
174
175 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
176 {
177 + /* Note that we never get here with APIC virtualization enabled. */
178 +
179 if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
180 ++apic->isr_count;
181 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
182 @@ -371,12 +373,48 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
183 apic->highest_isr_cache = vec;
184 }
185
186 +static inline int apic_find_highest_isr(struct kvm_lapic *apic)
187 +{
188 + int result;
189 +
190 + /*
191 + * Note that isr_count is always 1, and highest_isr_cache
192 + * is always -1, with APIC virtualization enabled.
193 + */
194 + if (!apic->isr_count)
195 + return -1;
196 + if (likely(apic->highest_isr_cache != -1))
197 + return apic->highest_isr_cache;
198 +
199 + result = find_highest_vector(apic->regs + APIC_ISR);
200 + ASSERT(result == -1 || result >= 16);
201 +
202 + return result;
203 +}
204 +
205 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
206 {
207 - if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
208 + struct kvm_vcpu *vcpu;
209 + if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
210 + return;
211 +
212 + vcpu = apic->vcpu;
213 +
214 + /*
215 + * We do get here for APIC virtualization enabled if the guest
216 + * uses the Hyper-V APIC enlightenment. In this case we may need
217 + * to trigger a new interrupt delivery by writing the SVI field;
218 + * on the other hand isr_count and highest_isr_cache are unused
219 + * and must be left alone.
220 + */
221 + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
222 + kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
223 + apic_find_highest_isr(apic));
224 + else {
225 --apic->isr_count;
226 - BUG_ON(apic->isr_count < 0);
227 - apic->highest_isr_cache = -1;
228 + BUG_ON(apic->isr_count < 0);
229 + apic->highest_isr_cache = -1;
230 + }
231 }
232
233 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
234 @@ -456,22 +494,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
235 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
236 }
237
238 -static inline int apic_find_highest_isr(struct kvm_lapic *apic)
239 -{
240 - int result;
241 -
242 - /* Note that isr_count is always 1 with vid enabled */
243 - if (!apic->isr_count)
244 - return -1;
245 - if (likely(apic->highest_isr_cache != -1))
246 - return apic->highest_isr_cache;
247 -
248 - result = find_highest_vector(apic->regs + APIC_ISR);
249 - ASSERT(result == -1 || result >= 16);
250 -
251 - return result;
252 -}
253 -
254 void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
255 {
256 struct kvm_lapic *apic = vcpu->arch.apic;
257 @@ -1605,6 +1627,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
258 int vector = kvm_apic_has_interrupt(vcpu);
259 struct kvm_lapic *apic = vcpu->arch.apic;
260
261 + /* Note that we never get here with APIC virtualization enabled. */
262 +
263 if (vector == -1)
264 return -1;
265
266 diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
267 index 2e7801af466e..05827eccc53a 100644
268 --- a/drivers/hv/connection.c
269 +++ b/drivers/hv/connection.c
270 @@ -224,8 +224,8 @@ cleanup:
271 vmbus_connection.int_page = NULL;
272 }
273
274 - free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1);
275 - free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1);
276 + free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
277 + free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
278 vmbus_connection.monitor_pages[0] = NULL;
279 vmbus_connection.monitor_pages[1] = NULL;
280
281 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
282 index 7e6d78dc9437..5e90c5d771a7 100644
283 --- a/drivers/hv/hv_balloon.c
284 +++ b/drivers/hv/hv_balloon.c
285 @@ -19,6 +19,7 @@
286 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
287
288 #include <linux/kernel.h>
289 +#include <linux/jiffies.h>
290 #include <linux/mman.h>
291 #include <linux/delay.h>
292 #include <linux/init.h>
293 @@ -459,6 +460,11 @@ static bool do_hot_add;
294 */
295 static uint pressure_report_delay = 45;
296
297 +/*
298 + * The last time we posted a pressure report to host.
299 + */
300 +static unsigned long last_post_time;
301 +
302 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
303 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
304
305 @@ -542,6 +548,7 @@ struct hv_dynmem_device {
306
307 static struct hv_dynmem_device dm_device;
308
309 +static void post_status(struct hv_dynmem_device *dm);
310 #ifdef CONFIG_MEMORY_HOTPLUG
311
312 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
313 @@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
314 * have not been "onlined" within the allowed time.
315 */
316 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
317 -
318 + post_status(&dm_device);
319 }
320
321 return;
322 @@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
323 {
324 struct dm_status status;
325 struct sysinfo val;
326 + unsigned long now = jiffies;
327 + unsigned long last_post = last_post_time;
328
329 if (pressure_report_delay > 0) {
330 --pressure_report_delay;
331 return;
332 }
333 +
334 + if (!time_after(now, (last_post_time + HZ)))
335 + return;
336 +
337 si_meminfo(&val);
338 memset(&status, 0, sizeof(struct dm_status));
339 status.hdr.type = DM_STATUS_REPORT;
340 @@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
341 if (status.hdr.trans_id != atomic_read(&trans_id))
342 return;
343
344 + /*
345 + * If the last post time that we sampled has changed,
346 + * we have raced, don't post the status.
347 + */
348 + if (last_post != last_post_time)
349 + return;
350 +
351 + last_post_time = jiffies;
352 vmbus_sendpacket(dm->dev->channel, &status,
353 sizeof(struct dm_status),
354 (unsigned long)NULL,
355 @@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
356
357 if (ret == -EAGAIN)
358 msleep(20);
359 -
360 + post_status(&dm_device);
361 } while (ret == -EAGAIN);
362
363 if (ret) {
364 @@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
365 struct dm_unballoon_response resp;
366 int i;
367
368 - for (i = 0; i < range_count; i++)
369 + for (i = 0; i < range_count; i++) {
370 free_balloon_pages(dm, &range_array[i]);
371 + post_status(&dm_device);
372 + }
373
374 if (req->more_pages == 1)
375 return;
376 diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
377 index 89777ed9abd8..be0b2accf895 100644
378 --- a/drivers/iio/adc/at91_adc.c
379 +++ b/drivers/iio/adc/at91_adc.c
380 @@ -322,12 +322,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
381 return idev->num_channels;
382 }
383
384 -static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
385 +static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
386 struct at91_adc_trigger *triggers,
387 const char *trigger_name)
388 {
389 struct at91_adc_state *st = iio_priv(idev);
390 - u8 value = 0;
391 int i;
392
393 for (i = 0; i < st->trigger_number; i++) {
394 @@ -340,15 +339,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
395 return -ENOMEM;
396
397 if (strcmp(trigger_name, name) == 0) {
398 - value = triggers[i].value;
399 kfree(name);
400 - break;
401 + if (triggers[i].value == 0)
402 + return -EINVAL;
403 + return triggers[i].value;
404 }
405
406 kfree(name);
407 }
408
409 - return value;
410 + return -EINVAL;
411 }
412
413 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
414 @@ -358,14 +358,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
415 struct iio_buffer *buffer = idev->buffer;
416 struct at91_adc_reg_desc *reg = st->registers;
417 u32 status = at91_adc_readl(st, reg->trigger_register);
418 - u8 value;
419 + int value;
420 u8 bit;
421
422 value = at91_adc_get_trigger_value_by_name(idev,
423 st->trigger_list,
424 idev->trig->name);
425 - if (value == 0)
426 - return -EINVAL;
427 + if (value < 0)
428 + return value;
429
430 if (state) {
431 st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
432 diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
433 index 9cf3229a7272..1b3b74be5c20 100644
434 --- a/drivers/iio/adc/max1363.c
435 +++ b/drivers/iio/adc/max1363.c
436 @@ -1252,8 +1252,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
437 .num_modes = ARRAY_SIZE(max1238_mode_list),
438 .default_mode = s0to11,
439 .info = &max1238_info,
440 - .channels = max1238_channels,
441 - .num_channels = ARRAY_SIZE(max1238_channels),
442 + .channels = max1038_channels,
443 + .num_channels = ARRAY_SIZE(max1038_channels),
444 },
445 [max11605] = {
446 .bits = 8,
447 @@ -1262,8 +1262,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
448 .num_modes = ARRAY_SIZE(max1238_mode_list),
449 .default_mode = s0to11,
450 .info = &max1238_info,
451 - .channels = max1238_channels,
452 - .num_channels = ARRAY_SIZE(max1238_channels),
453 + .channels = max1038_channels,
454 + .num_channels = ARRAY_SIZE(max1038_channels),
455 },
456 [max11606] = {
457 .bits = 10,
458 @@ -1312,8 +1312,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
459 .num_modes = ARRAY_SIZE(max1238_mode_list),
460 .default_mode = s0to11,
461 .info = &max1238_info,
462 - .channels = max1238_channels,
463 - .num_channels = ARRAY_SIZE(max1238_channels),
464 + .channels = max1138_channels,
465 + .num_channels = ARRAY_SIZE(max1138_channels),
466 },
467 [max11611] = {
468 .bits = 10,
469 @@ -1322,8 +1322,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
470 .num_modes = ARRAY_SIZE(max1238_mode_list),
471 .default_mode = s0to11,
472 .info = &max1238_info,
473 - .channels = max1238_channels,
474 - .num_channels = ARRAY_SIZE(max1238_channels),
475 + .channels = max1138_channels,
476 + .num_channels = ARRAY_SIZE(max1138_channels),
477 },
478 [max11612] = {
479 .bits = 12,
480 diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
481 index 6989c16aec2b..b58d6302521f 100644
482 --- a/drivers/iio/adc/men_z188_adc.c
483 +++ b/drivers/iio/adc/men_z188_adc.c
484 @@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
485 indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
486
487 mem = mcb_request_mem(dev, "z188-adc");
488 - if (!mem)
489 - return -ENOMEM;
490 + if (IS_ERR(mem))
491 + return PTR_ERR(mem);
492
493 adc->base = ioremap(mem->start, resource_size(mem));
494 if (adc->base == NULL)
495 diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
496 index 74866d1efd1b..2a524acabec8 100644
497 --- a/drivers/iio/magnetometer/ak8975.c
498 +++ b/drivers/iio/magnetometer/ak8975.c
499 @@ -352,8 +352,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
500 {
501 struct ak8975_data *data = iio_priv(indio_dev);
502 struct i2c_client *client = data->client;
503 - u16 meas_reg;
504 - s16 raw;
505 int ret;
506
507 mutex_lock(&data->lock);
508 @@ -401,16 +399,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
509 dev_err(&client->dev, "Read axis data fails\n");
510 goto exit;
511 }
512 - meas_reg = ret;
513
514 mutex_unlock(&data->lock);
515
516 - /* Endian conversion of the measured values. */
517 - raw = (s16) (le16_to_cpu(meas_reg));
518 -
519 /* Clamp to valid range. */
520 - raw = clamp_t(s16, raw, -4096, 4095);
521 - *val = raw;
522 + *val = clamp_t(s16, ret, -4096, 4095);
523 return IIO_VAL_INT;
524
525 exit:
526 diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
527 index ba6d0c520e63..01b2e0b18878 100644
528 --- a/drivers/iio/pressure/mpl3115.c
529 +++ b/drivers/iio/pressure/mpl3115.c
530 @@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
531 mutex_unlock(&data->lock);
532 if (ret < 0)
533 return ret;
534 - *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
535 + *val = be32_to_cpu(tmp) >> 12;
536 return IIO_VAL_INT;
537 case IIO_TEMP: /* in 0.0625 celsius / LSB */
538 mutex_lock(&data->lock);
539 @@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
540 mutex_unlock(&data->lock);
541 if (ret < 0)
542 return ret;
543 - *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
544 + *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
545 return IIO_VAL_INT;
546 default:
547 return -EINVAL;
548 @@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
549 BIT(IIO_CHAN_INFO_SCALE),
550 .scan_index = 0,
551 .scan_type = {
552 - .sign = 's',
553 + .sign = 'u',
554 .realbits = 20,
555 .storagebits = 32,
556 .shift = 12,
557 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
558 index 6a9509ccd33b..08ed9a30c3a7 100644
559 --- a/drivers/net/ethernet/renesas/sh_eth.c
560 +++ b/drivers/net/ethernet/renesas/sh_eth.c
561 @@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
562 };
563
564 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
565 + [EDMR] = 0x0000,
566 + [EDTRR] = 0x0004,
567 + [EDRRR] = 0x0008,
568 + [TDLAR] = 0x000c,
569 + [RDLAR] = 0x0010,
570 + [EESR] = 0x0014,
571 + [EESIPR] = 0x0018,
572 + [TRSCER] = 0x001c,
573 + [RMFCR] = 0x0020,
574 + [TFTR] = 0x0024,
575 + [FDR] = 0x0028,
576 + [RMCR] = 0x002c,
577 + [EDOCR] = 0x0030,
578 + [FCFTR] = 0x0034,
579 + [RPADIR] = 0x0038,
580 + [TRIMD] = 0x003c,
581 + [RBWAR] = 0x0040,
582 + [RDFAR] = 0x0044,
583 + [TBRAR] = 0x004c,
584 + [TDFAR] = 0x0050,
585 +
586 [ECMR] = 0x0160,
587 [ECSR] = 0x0164,
588 [ECSIPR] = 0x0168,
589 @@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
590 .register_type = SH_ETH_REG_FAST_SH4,
591
592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
593 - .rmcr_value = RMCR_RNC,
594
595 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
596 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
597 @@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
598 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
599 EESR_TDE | EESR_ECI,
600 .fdr_value = 0x0000072f,
601 - .rmcr_value = RMCR_RNC,
602
603 .irq_flags = IRQF_SHARED,
604 .apr = 1,
605 @@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
606 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
607 EESR_TDE | EESR_ECI,
608 .fdr_value = 0x0000070f,
609 - .rmcr_value = RMCR_RNC,
610
611 .apr = 1,
612 .mpr = 1,
613 @@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
614 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
615 EESR_TDE | EESR_ECI,
616 .fdr_value = 0x0000070f,
617 - .rmcr_value = RMCR_RNC,
618
619 .no_psr = 1,
620 .apr = 1,
621 @@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
622 if (!cd->fdr_value)
623 cd->fdr_value = DEFAULT_FDR_INIT;
624
625 - if (!cd->rmcr_value)
626 - cd->rmcr_value = DEFAULT_RMCR_VALUE;
627 -
628 if (!cd->tx_check)
629 cd->tx_check = DEFAULT_TX_CHECK;
630
631 @@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
632 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
633 sh_eth_write(ndev, 0, TFTR);
634
635 - /* Frame recv control */
636 - sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
637 + /* Frame recv control (enable multiple-packets per rx irq) */
638 + sh_eth_write(ndev, RMCR_RNC, RMCR);
639
640 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
641
642 diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
643 index d55e37cd5fec..b37c427144ee 100644
644 --- a/drivers/net/ethernet/renesas/sh_eth.h
645 +++ b/drivers/net/ethernet/renesas/sh_eth.h
646 @@ -319,7 +319,6 @@ enum TD_STS_BIT {
647 enum RMCR_BIT {
648 RMCR_RNC = 0x00000001,
649 };
650 -#define DEFAULT_RMCR_VALUE 0x00000000
651
652 /* ECMR */
653 enum FELIC_MODE_BIT {
654 @@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
655 unsigned long fdr_value;
656 unsigned long fcftr_value;
657 unsigned long rpadir_value;
658 - unsigned long rmcr_value;
659
660 /* interrupt checking mask */
661 unsigned long tx_check;
662 diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
663 index 4d3f119b67b3..afb94aa2c15e 100644
664 --- a/drivers/net/ethernet/sfc/io.h
665 +++ b/drivers/net/ethernet/sfc/io.h
666 @@ -66,10 +66,17 @@
667 #define EFX_USE_QWORD_IO 1
668 #endif
669
670 +/* Hardware issue requires that only 64-bit naturally aligned writes
671 + * are seen by hardware. Its not strictly necessary to restrict to
672 + * x86_64 arch, but done for safety since unusual write combining behaviour
673 + * can break PIO.
674 + */
675 +#ifdef CONFIG_X86_64
676 /* PIO is a win only if write-combining is possible */
677 #ifdef ARCH_HAS_IOREMAP_WC
678 #define EFX_USE_PIO 1
679 #endif
680 +#endif
681
682 #ifdef EFX_USE_QWORD_IO
683 static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
684 diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
685 index fa9475300411..ede8dcca0ff3 100644
686 --- a/drivers/net/ethernet/sfc/tx.c
687 +++ b/drivers/net/ethernet/sfc/tx.c
688 @@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
689 u8 buf[L1_CACHE_BYTES];
690 };
691
692 +/* Copy in explicit 64-bit writes. */
693 +static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
694 +{
695 + u64 *src64 = src;
696 + u64 __iomem *dest64 = dest;
697 + size_t l64 = len / 8;
698 + size_t i;
699 +
700 + for (i = 0; i < l64; i++)
701 + writeq(src64[i], &dest64[i]);
702 +}
703 +
704 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
705 * Advances piobuf pointer. Leaves additional data in the copy buffer.
706 */
707 @@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
708 {
709 int block_len = len & ~(sizeof(copy_buf->buf) - 1);
710
711 - memcpy_toio(*piobuf, data, block_len);
712 + efx_memcpy_64(*piobuf, data, block_len);
713 *piobuf += block_len;
714 len -= block_len;
715
716 @@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
717 if (copy_buf->used < sizeof(copy_buf->buf))
718 return;
719
720 - memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
721 + efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
722 *piobuf += sizeof(copy_buf->buf);
723 data += copy_to_buf;
724 len -= copy_to_buf;
725 @@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
726 {
727 /* if there's anything in it, write the whole buffer, including junk */
728 if (copy_buf->used)
729 - memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
730 + efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
731 }
732
733 /* Traverse skb structure and copy fragments in to PIO buffer.
734 @@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
735 */
736 BUILD_BUG_ON(L1_CACHE_BYTES >
737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
738 - memcpy_toio(tx_queue->piobuf, skb->data,
739 - ALIGN(skb->len, L1_CACHE_BYTES));
740 + efx_memcpy_64(tx_queue->piobuf, skb->data,
741 + ALIGN(skb->len, L1_CACHE_BYTES));
742 }
743
744 EFX_POPULATE_QWORD_5(buffer->option,
745 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
746 index d53e299ae1d9..7eec598c5cb6 100644
747 --- a/drivers/net/macvlan.c
748 +++ b/drivers/net/macvlan.c
749 @@ -1036,7 +1036,6 @@ static int macvlan_device_event(struct notifier_block *unused,
750 list_for_each_entry_safe(vlan, next, &port->vlans, list)
751 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
752 unregister_netdevice_many(&list_kill);
753 - list_del(&list_kill);
754 break;
755 case NETDEV_PRE_TYPE_CHANGE:
756 /* Forbid underlaying device to change its type. */
757 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
758 index dc4bf06948c7..cf62d7e8329f 100644
759 --- a/drivers/net/usb/qmi_wwan.c
760 +++ b/drivers/net/usb/qmi_wwan.c
761 @@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
762 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
763 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
764 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
765 - {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
766 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
767 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
768 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
769 + {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
770 + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
771 + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
772 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
773 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
774 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
775 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
776 index 4dbb2ed85b97..77dcf92ea350 100644
777 --- a/drivers/net/vxlan.c
778 +++ b/drivers/net/vxlan.c
779 @@ -2275,9 +2275,9 @@ static void vxlan_setup(struct net_device *dev)
780 eth_hw_addr_random(dev);
781 ether_setup(dev);
782 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
783 - dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
784 + dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
785 else
786 - dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
787 + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
788
789 dev->netdev_ops = &vxlan_netdev_ops;
790 dev->destructor = free_netdev;
791 @@ -2660,8 +2660,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
792 if (!tb[IFLA_MTU])
793 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
794
795 - /* update header length based on lower device */
796 - dev->hard_header_len = lowerdev->hard_header_len +
797 + dev->needed_headroom = lowerdev->hard_header_len +
798 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
799 } else if (use_ipv6)
800 vxlan->flags |= VXLAN_F_IPV6;
801 diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
802 index 3281c90691c3..44fe83ee9bee 100644
803 --- a/drivers/rtc/rtc-at91rm9200.c
804 +++ b/drivers/rtc/rtc-at91rm9200.c
805 @@ -48,6 +48,7 @@ struct at91_rtc_config {
806
807 static const struct at91_rtc_config *at91_rtc_config;
808 static DECLARE_COMPLETION(at91_rtc_updated);
809 +static DECLARE_COMPLETION(at91_rtc_upd_rdy);
810 static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
811 static void __iomem *at91_rtc_regs;
812 static int irq;
813 @@ -161,6 +162,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
814 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
815 tm->tm_hour, tm->tm_min, tm->tm_sec);
816
817 + wait_for_completion(&at91_rtc_upd_rdy);
818 +
819 /* Stop Time/Calendar from counting */
820 cr = at91_rtc_read(AT91_RTC_CR);
821 at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
822 @@ -183,7 +186,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
823
824 /* Restart Time/Calendar */
825 cr = at91_rtc_read(AT91_RTC_CR);
826 + at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
827 at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
828 + at91_rtc_write_ier(AT91_RTC_SECEV);
829
830 return 0;
831 }
832 @@ -290,8 +295,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
833 if (rtsr) { /* this interrupt is shared! Is it ours? */
834 if (rtsr & AT91_RTC_ALARM)
835 events |= (RTC_AF | RTC_IRQF);
836 - if (rtsr & AT91_RTC_SECEV)
837 - events |= (RTC_UF | RTC_IRQF);
838 + if (rtsr & AT91_RTC_SECEV) {
839 + complete(&at91_rtc_upd_rdy);
840 + at91_rtc_write_idr(AT91_RTC_SECEV);
841 + }
842 if (rtsr & AT91_RTC_ACKUPD)
843 complete(&at91_rtc_updated);
844
845 @@ -413,6 +420,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
846 return PTR_ERR(rtc);
847 platform_set_drvdata(pdev, rtc);
848
849 + /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
850 + * completion.
851 + */
852 + at91_rtc_write_ier(AT91_RTC_SECEV);
853 +
854 dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
855 return 0;
856 }
857 diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
858 index dae8d1a9038e..52d7517b342e 100644
859 --- a/drivers/staging/iio/adc/mxs-lradc.c
860 +++ b/drivers/staging/iio/adc/mxs-lradc.c
861 @@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
862 LRADC_CTRL1);
863 mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
864
865 + /* Enable / disable the divider per requirement */
866 + if (test_bit(chan, &lradc->is_divided))
867 + mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
868 + LRADC_CTRL2);
869 + else
870 + mxs_lradc_reg_clear(lradc,
871 + 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
872 +
873 /* Clean the slot's previous content, then set new one. */
874 mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
875 LRADC_CTRL4);
876 @@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
877 if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
878 val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
879 /* divider by two disabled */
880 - writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
881 - lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
882 clear_bit(chan->channel, &lradc->is_divided);
883 ret = 0;
884 } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
885 val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
886 /* divider by two enabled */
887 - writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
888 - lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
889 set_bit(chan->channel, &lradc->is_divided);
890 ret = 0;
891 }
892 diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
893 index 9e0f2a9c73ae..ab338e3ddd05 100644
894 --- a/drivers/staging/iio/light/tsl2x7x_core.c
895 +++ b/drivers/staging/iio/light/tsl2x7x_core.c
896 @@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
897 chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
898 chip->tsl2x7x_settings.prox_pulse_count;
899 chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
900 - chip->tsl2x7x_settings.prox_thres_low;
901 + (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
902 + chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
903 + (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
904 chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
905 - chip->tsl2x7x_settings.prox_thres_high;
906 + (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
907 + chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
908 + (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
909
910 /* and make sure we're not already on */
911 if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
912 diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
913 index de77d9aa22c6..6689de6c5591 100644
914 --- a/drivers/target/iscsi/iscsi_target_auth.c
915 +++ b/drivers/target/iscsi/iscsi_target_auth.c
916 @@ -314,6 +314,16 @@ static int chap_server_compute_md5(
917 goto out;
918 }
919 /*
920 + * During mutual authentication, the CHAP_C generated by the
921 + * initiator must not match the original CHAP_C generated by
922 + * the target.
923 + */
924 + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
925 + pr_err("initiator CHAP_C matches target CHAP_C, failing"
926 + " login attempt\n");
927 + goto out;
928 + }
929 + /*
930 * Generate CHAP_N and CHAP_R for mutual authentication.
931 */
932 tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
933 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
934 index 789aa9eb0a1e..a51dd4efc23b 100644
935 --- a/drivers/target/target_core_transport.c
936 +++ b/drivers/target/target_core_transport.c
937 @@ -2407,6 +2407,10 @@ static void target_release_cmd_kref(struct kref *kref)
938 */
939 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
940 {
941 + if (!se_sess) {
942 + se_cmd->se_tfo->release_cmd(se_cmd);
943 + return 1;
944 + }
945 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
946 &se_sess->sess_cmd_lock);
947 }
948 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
949 index 904efb6035b0..6bbd203f1861 100644
950 --- a/drivers/usb/class/cdc-acm.c
951 +++ b/drivers/usb/class/cdc-acm.c
952 @@ -122,13 +122,23 @@ static void acm_release_minor(struct acm *acm)
953 static int acm_ctrl_msg(struct acm *acm, int request, int value,
954 void *buf, int len)
955 {
956 - int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
957 + int retval;
958 +
959 + retval = usb_autopm_get_interface(acm->control);
960 + if (retval)
961 + return retval;
962 +
963 + retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
964 request, USB_RT_ACM, value,
965 acm->control->altsetting[0].desc.bInterfaceNumber,
966 buf, len, 5000);
967 +
968 dev_dbg(&acm->control->dev,
969 "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
970 __func__, request, value, len, retval);
971 +
972 + usb_autopm_put_interface(acm->control);
973 +
974 return retval < 0 ? retval : 0;
975 }
976
977 @@ -496,6 +506,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
978 {
979 struct acm *acm = container_of(port, struct acm, port);
980 int retval = -ENODEV;
981 + int i;
982
983 dev_dbg(&acm->control->dev, "%s\n", __func__);
984
985 @@ -515,21 +526,17 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
986 acm->control->needs_remote_wakeup = 1;
987
988 acm->ctrlurb->dev = acm->dev;
989 - if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
990 + retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
991 + if (retval) {
992 dev_err(&acm->control->dev,
993 "%s - usb_submit_urb(ctrl irq) failed\n", __func__);
994 - usb_autopm_put_interface(acm->control);
995 goto error_submit_urb;
996 }
997
998 acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
999 - if (acm_set_control(acm, acm->ctrlout) < 0 &&
1000 - (acm->ctrl_caps & USB_CDC_CAP_LINE)) {
1001 - usb_autopm_put_interface(acm->control);
1002 + retval = acm_set_control(acm, acm->ctrlout);
1003 + if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
1004 goto error_set_control;
1005 - }
1006 -
1007 - usb_autopm_put_interface(acm->control);
1008
1009 /*
1010 * Unthrottle device in case the TTY was closed while throttled.
1011 @@ -539,23 +546,30 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1012 acm->throttle_req = 0;
1013 spin_unlock_irq(&acm->read_lock);
1014
1015 - if (acm_submit_read_urbs(acm, GFP_KERNEL))
1016 + retval = acm_submit_read_urbs(acm, GFP_KERNEL);
1017 + if (retval)
1018 goto error_submit_read_urbs;
1019
1020 + usb_autopm_put_interface(acm->control);
1021 +
1022 mutex_unlock(&acm->mutex);
1023
1024 return 0;
1025
1026 error_submit_read_urbs:
1027 + for (i = 0; i < acm->rx_buflimit; i++)
1028 + usb_kill_urb(acm->read_urbs[i]);
1029 acm->ctrlout = 0;
1030 acm_set_control(acm, acm->ctrlout);
1031 error_set_control:
1032 usb_kill_urb(acm->ctrlurb);
1033 error_submit_urb:
1034 + usb_autopm_put_interface(acm->control);
1035 error_get_interface:
1036 disconnected:
1037 mutex_unlock(&acm->mutex);
1038 - return retval;
1039 +
1040 + return usb_translate_errors(retval);
1041 }
1042
1043 static void acm_port_destruct(struct tty_port *port)
1044 @@ -573,21 +587,35 @@ static void acm_port_destruct(struct tty_port *port)
1045 static void acm_port_shutdown(struct tty_port *port)
1046 {
1047 struct acm *acm = container_of(port, struct acm, port);
1048 + struct urb *urb;
1049 + struct acm_wb *wb;
1050 int i;
1051 + int pm_err;
1052
1053 dev_dbg(&acm->control->dev, "%s\n", __func__);
1054
1055 mutex_lock(&acm->mutex);
1056 if (!acm->disconnected) {
1057 - usb_autopm_get_interface(acm->control);
1058 + pm_err = usb_autopm_get_interface(acm->control);
1059 acm_set_control(acm, acm->ctrlout = 0);
1060 +
1061 + for (;;) {
1062 + urb = usb_get_from_anchor(&acm->delayed);
1063 + if (!urb)
1064 + break;
1065 + wb = urb->context;
1066 + wb->use = 0;
1067 + usb_autopm_put_interface_async(acm->control);
1068 + }
1069 +
1070 usb_kill_urb(acm->ctrlurb);
1071 for (i = 0; i < ACM_NW; i++)
1072 usb_kill_urb(acm->wb[i].urb);
1073 for (i = 0; i < acm->rx_buflimit; i++)
1074 usb_kill_urb(acm->read_urbs[i]);
1075 acm->control->needs_remote_wakeup = 0;
1076 - usb_autopm_put_interface(acm->control);
1077 + if (!pm_err)
1078 + usb_autopm_put_interface(acm->control);
1079 }
1080 mutex_unlock(&acm->mutex);
1081 }
1082 @@ -646,14 +674,17 @@ static int acm_tty_write(struct tty_struct *tty,
1083 memcpy(wb->buf, buf, count);
1084 wb->len = count;
1085
1086 - usb_autopm_get_interface_async(acm->control);
1087 + stat = usb_autopm_get_interface_async(acm->control);
1088 + if (stat) {
1089 + wb->use = 0;
1090 + spin_unlock_irqrestore(&acm->write_lock, flags);
1091 + return stat;
1092 + }
1093 +
1094 if (acm->susp_count) {
1095 - if (!acm->delayed_wb)
1096 - acm->delayed_wb = wb;
1097 - else
1098 - usb_autopm_put_interface_async(acm->control);
1099 + usb_anchor_urb(wb->urb, &acm->delayed);
1100 spin_unlock_irqrestore(&acm->write_lock, flags);
1101 - return count; /* A white lie */
1102 + return count;
1103 }
1104 usb_mark_last_busy(acm->dev);
1105
1106 @@ -1269,6 +1300,7 @@ made_compressed_probe:
1107 acm->bInterval = epread->bInterval;
1108 tty_port_init(&acm->port);
1109 acm->port.ops = &acm_port_ops;
1110 + init_usb_anchor(&acm->delayed);
1111
1112 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
1113 if (!buf) {
1114 @@ -1514,18 +1546,15 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1115 struct acm *acm = usb_get_intfdata(intf);
1116 int cnt;
1117
1118 + spin_lock_irq(&acm->read_lock);
1119 + spin_lock(&acm->write_lock);
1120 if (PMSG_IS_AUTO(message)) {
1121 - int b;
1122 -
1123 - spin_lock_irq(&acm->write_lock);
1124 - b = acm->transmitting;
1125 - spin_unlock_irq(&acm->write_lock);
1126 - if (b)
1127 + if (acm->transmitting) {
1128 + spin_unlock(&acm->write_lock);
1129 + spin_unlock_irq(&acm->read_lock);
1130 return -EBUSY;
1131 + }
1132 }
1133 -
1134 - spin_lock_irq(&acm->read_lock);
1135 - spin_lock(&acm->write_lock);
1136 cnt = acm->susp_count++;
1137 spin_unlock(&acm->write_lock);
1138 spin_unlock_irq(&acm->read_lock);
1139 @@ -1533,8 +1562,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1140 if (cnt)
1141 return 0;
1142
1143 - if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
1144 - stop_data_traffic(acm);
1145 + stop_data_traffic(acm);
1146
1147 return 0;
1148 }
1149 @@ -1542,29 +1570,24 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1150 static int acm_resume(struct usb_interface *intf)
1151 {
1152 struct acm *acm = usb_get_intfdata(intf);
1153 - struct acm_wb *wb;
1154 + struct urb *urb;
1155 int rv = 0;
1156 - int cnt;
1157
1158 spin_lock_irq(&acm->read_lock);
1159 - acm->susp_count -= 1;
1160 - cnt = acm->susp_count;
1161 - spin_unlock_irq(&acm->read_lock);
1162 + spin_lock(&acm->write_lock);
1163
1164 - if (cnt)
1165 - return 0;
1166 + if (--acm->susp_count)
1167 + goto out;
1168
1169 if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
1170 - rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1171 -
1172 - spin_lock_irq(&acm->write_lock);
1173 - if (acm->delayed_wb) {
1174 - wb = acm->delayed_wb;
1175 - acm->delayed_wb = NULL;
1176 - spin_unlock_irq(&acm->write_lock);
1177 - acm_start_wb(acm, wb);
1178 - } else {
1179 - spin_unlock_irq(&acm->write_lock);
1180 + rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
1181 +
1182 + for (;;) {
1183 + urb = usb_get_from_anchor(&acm->delayed);
1184 + if (!urb)
1185 + break;
1186 +
1187 + acm_start_wb(acm, urb->context);
1188 }
1189
1190 /*
1191 @@ -1572,12 +1595,14 @@ static int acm_resume(struct usb_interface *intf)
1192 * do the write path at all cost
1193 */
1194 if (rv < 0)
1195 - goto err_out;
1196 + goto out;
1197
1198 - rv = acm_submit_read_urbs(acm, GFP_NOIO);
1199 + rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
1200 }
1201 +out:
1202 + spin_unlock(&acm->write_lock);
1203 + spin_unlock_irq(&acm->read_lock);
1204
1205 -err_out:
1206 return rv;
1207 }
1208
1209 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
1210 index e38dc785808f..80826f843e04 100644
1211 --- a/drivers/usb/class/cdc-acm.h
1212 +++ b/drivers/usb/class/cdc-acm.h
1213 @@ -120,7 +120,7 @@ struct acm {
1214 unsigned int throttled:1; /* actually throttled */
1215 unsigned int throttle_req:1; /* throttle requested */
1216 u8 bInterval;
1217 - struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
1218 + struct usb_anchor delayed; /* writes queued for a device about to be woken */
1219 };
1220
1221 #define CDC_DATA_INTERFACE_TYPE 0x0a
1222 diff --git a/include/sound/core.h b/include/sound/core.h
1223 index d3f5f818e0b9..88c9fbb7ed90 100644
1224 --- a/include/sound/core.h
1225 +++ b/include/sound/core.h
1226 @@ -116,6 +116,8 @@ struct snd_card {
1227 int user_ctl_count; /* count of all user controls */
1228 struct list_head controls; /* all controls for this card */
1229 struct list_head ctl_files; /* active control files */
1230 + struct mutex user_ctl_lock; /* protects user controls against
1231 + concurrent access */
1232
1233 struct snd_info_entry *proc_root; /* root for soundcard specific files */
1234 struct snd_info_entry *proc_id; /* the card id */
1235 diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
1236 index 5759810e1c1b..21eed488783f 100644
1237 --- a/include/uapi/sound/compress_offload.h
1238 +++ b/include/uapi/sound/compress_offload.h
1239 @@ -80,7 +80,7 @@ struct snd_compr_tstamp {
1240 struct snd_compr_avail {
1241 __u64 avail;
1242 struct snd_compr_tstamp tstamp;
1243 -};
1244 +} __attribute__((packed));
1245
1246 enum snd_compr_direction {
1247 SND_COMPRESS_PLAYBACK = 0,
1248 diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
1249 index df6839e3ce08..99a03acb7d47 100644
1250 --- a/lib/lz4/lz4_decompress.c
1251 +++ b/lib/lz4/lz4_decompress.c
1252 @@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
1253 len = *ip++;
1254 for (; len == 255; length += 255)
1255 len = *ip++;
1256 + if (unlikely(length > (size_t)(length + len)))
1257 + goto _output_error;
1258 length += len;
1259 }
1260
1261 diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
1262 index 569985d522d5..8563081e8da3 100644
1263 --- a/lib/lzo/lzo1x_decompress_safe.c
1264 +++ b/lib/lzo/lzo1x_decompress_safe.c
1265 @@ -19,11 +19,31 @@
1266 #include <linux/lzo.h>
1267 #include "lzodefs.h"
1268
1269 -#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
1270 -#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
1271 -#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
1272 -#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
1273 -#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
1274 +#define HAVE_IP(t, x) \
1275 + (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
1276 + (((t + x) >= t) && ((t + x) >= x)))
1277 +
1278 +#define HAVE_OP(t, x) \
1279 + (((size_t)(op_end - op) >= (size_t)(t + x)) && \
1280 + (((t + x) >= t) && ((t + x) >= x)))
1281 +
1282 +#define NEED_IP(t, x) \
1283 + do { \
1284 + if (!HAVE_IP(t, x)) \
1285 + goto input_overrun; \
1286 + } while (0)
1287 +
1288 +#define NEED_OP(t, x) \
1289 + do { \
1290 + if (!HAVE_OP(t, x)) \
1291 + goto output_overrun; \
1292 + } while (0)
1293 +
1294 +#define TEST_LB(m_pos) \
1295 + do { \
1296 + if ((m_pos) < out) \
1297 + goto lookbehind_overrun; \
1298 + } while (0)
1299
1300 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1301 unsigned char *out, size_t *out_len)
1302 @@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1303 while (unlikely(*ip == 0)) {
1304 t += 255;
1305 ip++;
1306 - NEED_IP(1);
1307 + NEED_IP(1, 0);
1308 }
1309 t += 15 + *ip++;
1310 }
1311 t += 3;
1312 copy_literal_run:
1313 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1314 - if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
1315 + if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
1316 const unsigned char *ie = ip + t;
1317 unsigned char *oe = op + t;
1318 do {
1319 @@ -81,8 +101,8 @@ copy_literal_run:
1320 } else
1321 #endif
1322 {
1323 - NEED_OP(t);
1324 - NEED_IP(t + 3);
1325 + NEED_OP(t, 0);
1326 + NEED_IP(t, 3);
1327 do {
1328 *op++ = *ip++;
1329 } while (--t > 0);
1330 @@ -95,7 +115,7 @@ copy_literal_run:
1331 m_pos -= t >> 2;
1332 m_pos -= *ip++ << 2;
1333 TEST_LB(m_pos);
1334 - NEED_OP(2);
1335 + NEED_OP(2, 0);
1336 op[0] = m_pos[0];
1337 op[1] = m_pos[1];
1338 op += 2;
1339 @@ -119,10 +139,10 @@ copy_literal_run:
1340 while (unlikely(*ip == 0)) {
1341 t += 255;
1342 ip++;
1343 - NEED_IP(1);
1344 + NEED_IP(1, 0);
1345 }
1346 t += 31 + *ip++;
1347 - NEED_IP(2);
1348 + NEED_IP(2, 0);
1349 }
1350 m_pos = op - 1;
1351 next = get_unaligned_le16(ip);
1352 @@ -137,10 +157,10 @@ copy_literal_run:
1353 while (unlikely(*ip == 0)) {
1354 t += 255;
1355 ip++;
1356 - NEED_IP(1);
1357 + NEED_IP(1, 0);
1358 }
1359 t += 7 + *ip++;
1360 - NEED_IP(2);
1361 + NEED_IP(2, 0);
1362 }
1363 next = get_unaligned_le16(ip);
1364 ip += 2;
1365 @@ -154,7 +174,7 @@ copy_literal_run:
1366 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1367 if (op - m_pos >= 8) {
1368 unsigned char *oe = op + t;
1369 - if (likely(HAVE_OP(t + 15))) {
1370 + if (likely(HAVE_OP(t, 15))) {
1371 do {
1372 COPY8(op, m_pos);
1373 op += 8;
1374 @@ -164,7 +184,7 @@ copy_literal_run:
1375 m_pos += 8;
1376 } while (op < oe);
1377 op = oe;
1378 - if (HAVE_IP(6)) {
1379 + if (HAVE_IP(6, 0)) {
1380 state = next;
1381 COPY4(op, ip);
1382 op += next;
1383 @@ -172,7 +192,7 @@ copy_literal_run:
1384 continue;
1385 }
1386 } else {
1387 - NEED_OP(t);
1388 + NEED_OP(t, 0);
1389 do {
1390 *op++ = *m_pos++;
1391 } while (op < oe);
1392 @@ -181,7 +201,7 @@ copy_literal_run:
1393 #endif
1394 {
1395 unsigned char *oe = op + t;
1396 - NEED_OP(t);
1397 + NEED_OP(t, 0);
1398 op[0] = m_pos[0];
1399 op[1] = m_pos[1];
1400 op += 2;
1401 @@ -194,15 +214,15 @@ match_next:
1402 state = next;
1403 t = next;
1404 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1405 - if (likely(HAVE_IP(6) && HAVE_OP(4))) {
1406 + if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
1407 COPY4(op, ip);
1408 op += t;
1409 ip += t;
1410 } else
1411 #endif
1412 {
1413 - NEED_IP(t + 3);
1414 - NEED_OP(t);
1415 + NEED_IP(t, 3);
1416 + NEED_OP(t, 0);
1417 while (t > 0) {
1418 *op++ = *ip++;
1419 t--;
1420 diff --git a/mm/shmem.c b/mm/shmem.c
1421 index 9f70e02111c6..a2801ba8ae2d 100644
1422 --- a/mm/shmem.c
1423 +++ b/mm/shmem.c
1424 @@ -1728,6 +1728,9 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1425 pgoff_t start, index, end;
1426 int error;
1427
1428 + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1429 + return -EOPNOTSUPP;
1430 +
1431 mutex_lock(&inode->i_mutex);
1432
1433 if (mode & FALLOC_FL_PUNCH_HOLE) {
1434 diff --git a/mm/slab.c b/mm/slab.c
1435 index 19d92181ce24..9432556ab912 100644
1436 --- a/mm/slab.c
1437 +++ b/mm/slab.c
1438 @@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1439
1440 #endif
1441
1442 +#define OBJECT_FREE (0)
1443 +#define OBJECT_ACTIVE (1)
1444 +
1445 +#ifdef CONFIG_DEBUG_SLAB_LEAK
1446 +
1447 +static void set_obj_status(struct page *page, int idx, int val)
1448 +{
1449 + int freelist_size;
1450 + char *status;
1451 + struct kmem_cache *cachep = page->slab_cache;
1452 +
1453 + freelist_size = cachep->num * sizeof(freelist_idx_t);
1454 + status = (char *)page->freelist + freelist_size;
1455 + status[idx] = val;
1456 +}
1457 +
1458 +static inline unsigned int get_obj_status(struct page *page, int idx)
1459 +{
1460 + int freelist_size;
1461 + char *status;
1462 + struct kmem_cache *cachep = page->slab_cache;
1463 +
1464 + freelist_size = cachep->num * sizeof(freelist_idx_t);
1465 + status = (char *)page->freelist + freelist_size;
1466 +
1467 + return status[idx];
1468 +}
1469 +
1470 +#else
1471 +static inline void set_obj_status(struct page *page, int idx, int val) {}
1472 +
1473 +#endif
1474 +
1475 /*
1476 * Do not go above this order unless 0 objects fit into the slab or
1477 * overridden on the command line.
1478 @@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1479 return cachep->array[smp_processor_id()];
1480 }
1481
1482 +static size_t calculate_freelist_size(int nr_objs, size_t align)
1483 +{
1484 + size_t freelist_size;
1485 +
1486 + freelist_size = nr_objs * sizeof(freelist_idx_t);
1487 + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1488 + freelist_size += nr_objs * sizeof(char);
1489 +
1490 + if (align)
1491 + freelist_size = ALIGN(freelist_size, align);
1492 +
1493 + return freelist_size;
1494 +}
1495 +
1496 static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
1497 size_t idx_size, size_t align)
1498 {
1499 int nr_objs;
1500 + size_t remained_size;
1501 size_t freelist_size;
1502 + int extra_space = 0;
1503
1504 + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1505 + extra_space = sizeof(char);
1506 /*
1507 * Ignore padding for the initial guess. The padding
1508 * is at most @align-1 bytes, and @buffer_size is at
1509 @@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
1510 * into the memory allocation when taking the padding
1511 * into account.
1512 */
1513 - nr_objs = slab_size / (buffer_size + idx_size);
1514 + nr_objs = slab_size / (buffer_size + idx_size + extra_space);
1515
1516 /*
1517 * This calculated number will be either the right
1518 * amount, or one greater than what we want.
1519 */
1520 - freelist_size = slab_size - nr_objs * buffer_size;
1521 - if (freelist_size < ALIGN(nr_objs * idx_size, align))
1522 + remained_size = slab_size - nr_objs * buffer_size;
1523 + freelist_size = calculate_freelist_size(nr_objs, align);
1524 + if (remained_size < freelist_size)
1525 nr_objs--;
1526
1527 return nr_objs;
1528 @@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
1529 } else {
1530 nr_objs = calculate_nr_objs(slab_size, buffer_size,
1531 sizeof(freelist_idx_t), align);
1532 - mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
1533 + mgmt_size = calculate_freelist_size(nr_objs, align);
1534 }
1535 *num = nr_objs;
1536 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1537 @@ -2032,13 +2084,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1538 break;
1539
1540 if (flags & CFLGS_OFF_SLAB) {
1541 + size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1542 /*
1543 * Max number of objs-per-slab for caches which
1544 * use off-slab slabs. Needed to avoid a possible
1545 * looping condition in cache_grow().
1546 */
1547 + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1548 + freelist_size_per_obj += sizeof(char);
1549 offslab_limit = size;
1550 - offslab_limit /= sizeof(freelist_idx_t);
1551 + offslab_limit /= freelist_size_per_obj;
1552
1553 if (num > offslab_limit)
1554 break;
1555 @@ -2285,8 +2340,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
1556 if (!cachep->num)
1557 return -E2BIG;
1558
1559 - freelist_size =
1560 - ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
1561 + freelist_size = calculate_freelist_size(cachep->num, cachep->align);
1562
1563 /*
1564 * If the slab has been placed off-slab, and we have enough space then
1565 @@ -2299,7 +2353,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
1566
1567 if (flags & CFLGS_OFF_SLAB) {
1568 /* really off slab. No need for manual alignment */
1569 - freelist_size = cachep->num * sizeof(freelist_idx_t);
1570 + freelist_size = calculate_freelist_size(cachep->num, 0);
1571
1572 #ifdef CONFIG_PAGE_POISONING
1573 /* If we're going to use the generic kernel_map_pages()
1574 @@ -2625,6 +2679,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
1575 if (cachep->ctor)
1576 cachep->ctor(objp);
1577 #endif
1578 + set_obj_status(page, i, OBJECT_FREE);
1579 set_free_obj(page, i, i);
1580 }
1581 }
1582 @@ -2833,6 +2888,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
1583 BUG_ON(objnr >= cachep->num);
1584 BUG_ON(objp != index_to_obj(cachep, page, objnr));
1585
1586 + set_obj_status(page, objnr, OBJECT_FREE);
1587 if (cachep->flags & SLAB_POISON) {
1588 #ifdef CONFIG_DEBUG_PAGEALLOC
1589 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
1590 @@ -2966,6 +3022,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
1591 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
1592 gfp_t flags, void *objp, unsigned long caller)
1593 {
1594 + struct page *page;
1595 +
1596 if (!objp)
1597 return objp;
1598 if (cachep->flags & SLAB_POISON) {
1599 @@ -2996,6 +3054,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
1600 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
1601 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
1602 }
1603 +
1604 + page = virt_to_head_page(objp);
1605 + set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
1606 objp += obj_offset(cachep);
1607 if (cachep->ctor && cachep->flags & SLAB_POISON)
1608 cachep->ctor(objp);
1609 @@ -4232,21 +4293,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
1610 struct page *page)
1611 {
1612 void *p;
1613 - int i, j;
1614 + int i;
1615
1616 if (n[0] == n[1])
1617 return;
1618 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
1619 - bool active = true;
1620 -
1621 - for (j = page->active; j < c->num; j++) {
1622 - /* Skip freed item */
1623 - if (get_free_obj(page, j) == i) {
1624 - active = false;
1625 - break;
1626 - }
1627 - }
1628 - if (!active)
1629 + if (get_obj_status(page, i) != OBJECT_ACTIVE)
1630 continue;
1631
1632 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
1633 diff --git a/net/core/dev.c b/net/core/dev.c
1634 index fb8b0546485b..a30bef1882f5 100644
1635 --- a/net/core/dev.c
1636 +++ b/net/core/dev.c
1637 @@ -6613,6 +6613,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
1638 /**
1639 * unregister_netdevice_many - unregister many devices
1640 * @head: list of devices
1641 + *
1642 + * Note: As most callers use a stack allocated list_head,
1643 + * we force a list_del() to make sure stack wont be corrupted later.
1644 */
1645 void unregister_netdevice_many(struct list_head *head)
1646 {
1647 @@ -6622,6 +6625,7 @@ void unregister_netdevice_many(struct list_head *head)
1648 rollback_registered_many(head);
1649 list_for_each_entry(dev, head, unreg_list)
1650 net_set_todo(dev);
1651 + list_del(head);
1652 }
1653 }
1654 EXPORT_SYMBOL(unregister_netdevice_many);
1655 @@ -7077,7 +7081,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
1656 }
1657 }
1658 unregister_netdevice_many(&dev_kill_list);
1659 - list_del(&dev_kill_list);
1660 rtnl_unlock();
1661 }
1662
1663 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1664 index 2d8d8fcfa060..1999ed832267 100644
1665 --- a/net/core/rtnetlink.c
1666 +++ b/net/core/rtnetlink.c
1667 @@ -1234,6 +1234,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1668 struct nlattr *tb[IFLA_MAX+1];
1669 u32 ext_filter_mask = 0;
1670 int err;
1671 + int hdrlen;
1672
1673 s_h = cb->args[0];
1674 s_idx = cb->args[1];
1675 @@ -1241,8 +1242,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1676 rcu_read_lock();
1677 cb->seq = net->dev_base_seq;
1678
1679 - if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1680 - ifla_policy) >= 0) {
1681 + /* A hack to preserve kernel<->userspace interface.
1682 + * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1683 + * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1684 + * what iproute2 < v3.9.0 used.
1685 + * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1686 + * attribute, its netlink message is shorter than struct ifinfomsg.
1687 + */
1688 + hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1689 + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1690 +
1691 + if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1692
1693 if (tb[IFLA_EXT_MASK])
1694 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1695 @@ -1744,7 +1754,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
1696
1697 ops->dellink(dev, &list_kill);
1698 unregister_netdevice_many(&list_kill);
1699 - list_del(&list_kill);
1700 return 0;
1701 }
1702
1703 @@ -2095,9 +2104,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1704 struct nlattr *tb[IFLA_MAX+1];
1705 u32 ext_filter_mask = 0;
1706 u16 min_ifinfo_dump_size = 0;
1707 + int hdrlen;
1708 +
1709 + /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
1710 + hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
1711 + sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1712
1713 - if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1714 - ifla_policy) >= 0) {
1715 + if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1716 if (tb[IFLA_EXT_MASK])
1717 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1718 }
1719 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
1720 index 8b5134c582f1..a3095fdefbed 100644
1721 --- a/net/ipv4/datagram.c
1722 +++ b/net/ipv4/datagram.c
1723 @@ -86,18 +86,26 @@ out:
1724 }
1725 EXPORT_SYMBOL(ip4_datagram_connect);
1726
1727 +/* Because UDP xmit path can manipulate sk_dst_cache without holding
1728 + * socket lock, we need to use sk_dst_set() here,
1729 + * even if we own the socket lock.
1730 + */
1731 void ip4_datagram_release_cb(struct sock *sk)
1732 {
1733 const struct inet_sock *inet = inet_sk(sk);
1734 const struct ip_options_rcu *inet_opt;
1735 __be32 daddr = inet->inet_daddr;
1736 + struct dst_entry *dst;
1737 struct flowi4 fl4;
1738 struct rtable *rt;
1739
1740 - if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
1741 - return;
1742 -
1743 rcu_read_lock();
1744 +
1745 + dst = __sk_dst_get(sk);
1746 + if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
1747 + rcu_read_unlock();
1748 + return;
1749 + }
1750 inet_opt = rcu_dereference(inet->inet_opt);
1751 if (inet_opt && inet_opt->opt.srr)
1752 daddr = inet_opt->opt.faddr;
1753 @@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
1754 inet->inet_saddr, inet->inet_dport,
1755 inet->inet_sport, sk->sk_protocol,
1756 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1757 - if (!IS_ERR(rt))
1758 - __sk_dst_set(sk, &rt->dst);
1759 +
1760 + dst = !IS_ERR(rt) ? &rt->dst : NULL;
1761 + sk_dst_set(sk, dst);
1762 +
1763 rcu_read_unlock();
1764 }
1765 EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
1766 diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
1767 index 812b18351462..62eaa005e146 100644
1768 --- a/net/ipv4/ipip.c
1769 +++ b/net/ipv4/ipip.c
1770 @@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
1771
1772 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1773 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1774 - t->dev->ifindex, 0, IPPROTO_IPIP, 0);
1775 + t->parms.link, 0, IPPROTO_IPIP, 0);
1776 err = 0;
1777 goto out;
1778 }
1779
1780 if (type == ICMP_REDIRECT) {
1781 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1782 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1783 IPPROTO_IPIP, 0);
1784 err = 0;
1785 goto out;
1786 @@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
1787 module_init(ipip_init);
1788 module_exit(ipip_fini);
1789 MODULE_LICENSE("GPL");
1790 +MODULE_ALIAS_RTNL_LINK("ipip");
1791 MODULE_ALIAS_NETDEV("tunl0");
1792 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1793 index 4468e1adc094..54a5fe92de5a 100644
1794 --- a/net/ipv4/udp.c
1795 +++ b/net/ipv4/udp.c
1796 @@ -1834,6 +1834,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1797 unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1798 struct udp_hslot *hslot = &udp_table.hash[slot];
1799
1800 + /* Do not bother scanning a too big list */
1801 + if (hslot->count > 10)
1802 + return NULL;
1803 +
1804 rcu_read_lock();
1805 begin:
1806 count = 0;
1807 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1808 index f6a66bb4114d..afa082458360 100644
1809 --- a/net/ipv6/ip6_tunnel.c
1810 +++ b/net/ipv6/ip6_tunnel.c
1811 @@ -61,6 +61,7 @@
1812 MODULE_AUTHOR("Ville Nuorvala");
1813 MODULE_DESCRIPTION("IPv6 tunneling device");
1814 MODULE_LICENSE("GPL");
1815 +MODULE_ALIAS_RTNL_LINK("ip6tnl");
1816 MODULE_ALIAS_NETDEV("ip6tnl0");
1817
1818 #ifdef IP6_TNL_DEBUG
1819 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1820 index e5a453ca302e..4f408176dc64 100644
1821 --- a/net/ipv6/sit.c
1822 +++ b/net/ipv6/sit.c
1823 @@ -560,12 +560,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
1824
1825 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1826 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1827 - t->dev->ifindex, 0, IPPROTO_IPV6, 0);
1828 + t->parms.link, 0, IPPROTO_IPV6, 0);
1829 err = 0;
1830 goto out;
1831 }
1832 if (type == ICMP_REDIRECT) {
1833 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1834 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1835 IPPROTO_IPV6, 0);
1836 err = 0;
1837 goto out;
1838 @@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
1839 module_init(sit_init);
1840 module_exit(sit_cleanup);
1841 MODULE_LICENSE("GPL");
1842 +MODULE_ALIAS_RTNL_LINK("sit");
1843 MODULE_ALIAS_NETDEV("sit0");
1844 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1845 index 1e586d92260e..20b63d2ab70f 100644
1846 --- a/net/ipv6/udp.c
1847 +++ b/net/ipv6/udp.c
1848 @@ -716,15 +716,15 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
1849 if (inet->inet_dport != rmt_port)
1850 continue;
1851 }
1852 - if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
1853 - !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
1854 + if (!ipv6_addr_any(&s->sk_v6_daddr) &&
1855 + !ipv6_addr_equal(&s->sk_v6_daddr, rmt_addr))
1856 continue;
1857
1858 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
1859 continue;
1860
1861 - if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
1862 - if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
1863 + if (!ipv6_addr_any(&s->sk_v6_rcv_saddr)) {
1864 + if (!ipv6_addr_equal(&s->sk_v6_rcv_saddr, loc_addr))
1865 continue;
1866 }
1867 if (!inet6_mc_check(s, loc_addr, rmt_addr))
1868 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
1869 index b8d331e7d883..34799e06ee01 100644
1870 --- a/net/mac80211/iface.c
1871 +++ b/net/mac80211/iface.c
1872 @@ -1758,7 +1758,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1873 }
1874 mutex_unlock(&local->iflist_mtx);
1875 unregister_netdevice_many(&unreg_list);
1876 - list_del(&unreg_list);
1877
1878 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1879 list_del(&sdata->list);
1880 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1881 index 39579c3e0d14..0b999987b658 100644
1882 --- a/net/sctp/associola.c
1883 +++ b/net/sctp/associola.c
1884 @@ -330,7 +330,7 @@ void sctp_association_free(struct sctp_association *asoc)
1885 /* Only real associations count against the endpoint, so
1886 * don't bother for if this is a temporary association.
1887 */
1888 - if (!asoc->temp) {
1889 + if (!list_empty(&asoc->asocs)) {
1890 list_del(&asoc->asocs);
1891
1892 /* Decrement the backlog value for a TCP-style listening
1893 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
1894 index 6e0bd933b6a9..3b312ed51618 100644
1895 --- a/security/integrity/evm/evm_main.c
1896 +++ b/security/integrity/evm/evm_main.c
1897 @@ -287,12 +287,20 @@ out:
1898 * @xattr_value: pointer to the new extended attribute value
1899 * @xattr_value_len: pointer to the new extended attribute value length
1900 *
1901 - * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
1902 - * the current value is valid.
1903 + * Before allowing the 'security.evm' protected xattr to be updated,
1904 + * verify the existing value is valid. As only the kernel should have
1905 + * access to the EVM encrypted key needed to calculate the HMAC, prevent
1906 + * userspace from writing HMAC value. Writing 'security.evm' requires
1907 + * requires CAP_SYS_ADMIN privileges.
1908 */
1909 int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
1910 const void *xattr_value, size_t xattr_value_len)
1911 {
1912 + const struct evm_ima_xattr_data *xattr_data = xattr_value;
1913 +
1914 + if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
1915 + && (xattr_data->type == EVM_XATTR_HMAC))
1916 + return -EPERM;
1917 return evm_protect_xattr(dentry, xattr_name, xattr_value,
1918 xattr_value_len);
1919 }
1920 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
1921 index ba9e4d792dd5..d9cd5ce14d2b 100644
1922 --- a/security/integrity/ima/ima_api.c
1923 +++ b/security/integrity/ima/ima_api.c
1924 @@ -199,6 +199,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1925 struct evm_ima_xattr_data **xattr_value,
1926 int *xattr_len)
1927 {
1928 + const char *audit_cause = "failed";
1929 struct inode *inode = file_inode(file);
1930 const char *filename = file->f_dentry->d_name.name;
1931 int result = 0;
1932 @@ -213,6 +214,12 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1933 if (!(iint->flags & IMA_COLLECTED)) {
1934 u64 i_version = file_inode(file)->i_version;
1935
1936 + if (file->f_flags & O_DIRECT) {
1937 + audit_cause = "failed(directio)";
1938 + result = -EACCES;
1939 + goto out;
1940 + }
1941 +
1942 /* use default hash algorithm */
1943 hash.hdr.algo = ima_hash_algo;
1944
1945 @@ -233,9 +240,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1946 result = -ENOMEM;
1947 }
1948 }
1949 +out:
1950 if (result)
1951 integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
1952 - filename, "collect_data", "failed",
1953 + filename, "collect_data", audit_cause,
1954 result, 0);
1955 return result;
1956 }
1957 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
1958 index 1bde8e627766..ccd0ac8fa9a0 100644
1959 --- a/security/integrity/ima/ima_crypto.c
1960 +++ b/security/integrity/ima/ima_crypto.c
1961 @@ -27,6 +27,36 @@
1962
1963 static struct crypto_shash *ima_shash_tfm;
1964
1965 +/**
1966 + * ima_kernel_read - read file content
1967 + *
1968 + * This is a function for reading file content instead of kernel_read().
1969 + * It does not perform locking checks to ensure it cannot be blocked.
1970 + * It does not perform security checks because it is irrelevant for IMA.
1971 + *
1972 + */
1973 +static int ima_kernel_read(struct file *file, loff_t offset,
1974 + char *addr, unsigned long count)
1975 +{
1976 + mm_segment_t old_fs;
1977 + char __user *buf = addr;
1978 + ssize_t ret;
1979 +
1980 + if (!(file->f_mode & FMODE_READ))
1981 + return -EBADF;
1982 + if (!file->f_op->read && !file->f_op->aio_read)
1983 + return -EINVAL;
1984 +
1985 + old_fs = get_fs();
1986 + set_fs(get_ds());
1987 + if (file->f_op->read)
1988 + ret = file->f_op->read(file, buf, count, &offset);
1989 + else
1990 + ret = do_sync_read(file, buf, count, &offset);
1991 + set_fs(old_fs);
1992 + return ret;
1993 +}
1994 +
1995 int ima_init_crypto(void)
1996 {
1997 long rc;
1998 @@ -104,7 +134,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
1999 while (offset < i_size) {
2000 int rbuf_len;
2001
2002 - rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
2003 + rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
2004 if (rbuf_len < 0) {
2005 rc = rbuf_len;
2006 break;
2007 diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
2008 index 52ac6cf41f88..dcc98cf542d8 100644
2009 --- a/security/integrity/ima/ima_main.c
2010 +++ b/security/integrity/ima/ima_main.c
2011 @@ -214,8 +214,11 @@ static int process_measurement(struct file *file, const char *filename,
2012 xattr_ptr = &xattr_value;
2013
2014 rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
2015 - if (rc != 0)
2016 + if (rc != 0) {
2017 + if (file->f_flags & O_DIRECT)
2018 + rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
2019 goto out_digsig;
2020 + }
2021
2022 pathname = filename ?: ima_d_path(&file->f_path, &pathbuf);
2023
2024 diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
2025 index 93873a450ff7..40a7488f6721 100644
2026 --- a/security/integrity/ima/ima_policy.c
2027 +++ b/security/integrity/ima/ima_policy.c
2028 @@ -353,7 +353,7 @@ enum {
2029 Opt_obj_user, Opt_obj_role, Opt_obj_type,
2030 Opt_subj_user, Opt_subj_role, Opt_subj_type,
2031 Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
2032 - Opt_appraise_type, Opt_fsuuid
2033 + Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
2034 };
2035
2036 static match_table_t policy_tokens = {
2037 @@ -375,6 +375,7 @@ static match_table_t policy_tokens = {
2038 {Opt_uid, "uid=%s"},
2039 {Opt_fowner, "fowner=%s"},
2040 {Opt_appraise_type, "appraise_type=%s"},
2041 + {Opt_permit_directio, "permit_directio"},
2042 {Opt_err, NULL}
2043 };
2044
2045 @@ -622,6 +623,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
2046 else
2047 result = -EINVAL;
2048 break;
2049 + case Opt_permit_directio:
2050 + entry->flags |= IMA_PERMIT_DIRECTIO;
2051 + break;
2052 case Opt_err:
2053 ima_log_string(ab, "UNKNOWN", p);
2054 result = -EINVAL;
2055 diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
2056 index 2fb5e53e927f..33c0a70f6b15 100644
2057 --- a/security/integrity/integrity.h
2058 +++ b/security/integrity/integrity.h
2059 @@ -30,6 +30,7 @@
2060 #define IMA_ACTION_FLAGS 0xff000000
2061 #define IMA_DIGSIG 0x01000000
2062 #define IMA_DIGSIG_REQUIRED 0x02000000
2063 +#define IMA_PERMIT_DIRECTIO 0x04000000
2064
2065 #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
2066 IMA_APPRAISE_SUBMASK)
2067 diff --git a/sound/core/control.c b/sound/core/control.c
2068 index f038f5afafe2..f0b0e14497a5 100644
2069 --- a/sound/core/control.c
2070 +++ b/sound/core/control.c
2071 @@ -288,6 +288,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
2072 {
2073 struct snd_kcontrol *kctl;
2074
2075 + /* Make sure that the ids assigned to the control do not wrap around */
2076 + if (card->last_numid >= UINT_MAX - count)
2077 + card->last_numid = 0;
2078 +
2079 list_for_each_entry(kctl, &card->controls, list) {
2080 if (kctl->id.numid < card->last_numid + 1 + count &&
2081 kctl->id.numid + kctl->count > card->last_numid + 1) {
2082 @@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2083 {
2084 struct snd_ctl_elem_id id;
2085 unsigned int idx;
2086 + unsigned int count;
2087 int err = -EINVAL;
2088
2089 if (! kcontrol)
2090 @@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2091 if (snd_BUG_ON(!card || !kcontrol->info))
2092 goto error;
2093 id = kcontrol->id;
2094 + if (id.index > UINT_MAX - kcontrol->count)
2095 + goto error;
2096 +
2097 down_write(&card->controls_rwsem);
2098 if (snd_ctl_find_id(card, &id)) {
2099 up_write(&card->controls_rwsem);
2100 @@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2101 card->controls_count += kcontrol->count;
2102 kcontrol->id.numid = card->last_numid + 1;
2103 card->last_numid += kcontrol->count;
2104 + count = kcontrol->count;
2105 up_write(&card->controls_rwsem);
2106 - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2107 + for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2108 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2109 return 0;
2110
2111 @@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
2112 bool add_on_replace)
2113 {
2114 struct snd_ctl_elem_id id;
2115 + unsigned int count;
2116 unsigned int idx;
2117 struct snd_kcontrol *old;
2118 int ret;
2119 @@ -423,8 +433,9 @@ add:
2120 card->controls_count += kcontrol->count;
2121 kcontrol->id.numid = card->last_numid + 1;
2122 card->last_numid += kcontrol->count;
2123 + count = kcontrol->count;
2124 up_write(&card->controls_rwsem);
2125 - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2126 + for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2127 snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2128 return 0;
2129
2130 @@ -897,9 +908,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
2131 result = kctl->put(kctl, control);
2132 }
2133 if (result > 0) {
2134 + struct snd_ctl_elem_id id = control->id;
2135 up_read(&card->controls_rwsem);
2136 - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
2137 - &control->id);
2138 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
2139 return 0;
2140 }
2141 }
2142 @@ -991,6 +1002,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
2143
2144 struct user_element {
2145 struct snd_ctl_elem_info info;
2146 + struct snd_card *card;
2147 void *elem_data; /* element data */
2148 unsigned long elem_data_size; /* size of element data in bytes */
2149 void *tlv_data; /* TLV data */
2150 @@ -1034,7 +1046,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
2151 {
2152 struct user_element *ue = kcontrol->private_data;
2153
2154 + mutex_lock(&ue->card->user_ctl_lock);
2155 memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
2156 + mutex_unlock(&ue->card->user_ctl_lock);
2157 return 0;
2158 }
2159
2160 @@ -1043,10 +1057,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
2161 {
2162 int change;
2163 struct user_element *ue = kcontrol->private_data;
2164 -
2165 +
2166 + mutex_lock(&ue->card->user_ctl_lock);
2167 change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
2168 if (change)
2169 memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
2170 + mutex_unlock(&ue->card->user_ctl_lock);
2171 return change;
2172 }
2173
2174 @@ -1066,19 +1082,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
2175 new_data = memdup_user(tlv, size);
2176 if (IS_ERR(new_data))
2177 return PTR_ERR(new_data);
2178 + mutex_lock(&ue->card->user_ctl_lock);
2179 change = ue->tlv_data_size != size;
2180 if (!change)
2181 change = memcmp(ue->tlv_data, new_data, size);
2182 kfree(ue->tlv_data);
2183 ue->tlv_data = new_data;
2184 ue->tlv_data_size = size;
2185 + mutex_unlock(&ue->card->user_ctl_lock);
2186 } else {
2187 - if (! ue->tlv_data_size || ! ue->tlv_data)
2188 - return -ENXIO;
2189 - if (size < ue->tlv_data_size)
2190 - return -ENOSPC;
2191 + int ret = 0;
2192 +
2193 + mutex_lock(&ue->card->user_ctl_lock);
2194 + if (!ue->tlv_data_size || !ue->tlv_data) {
2195 + ret = -ENXIO;
2196 + goto err_unlock;
2197 + }
2198 + if (size < ue->tlv_data_size) {
2199 + ret = -ENOSPC;
2200 + goto err_unlock;
2201 + }
2202 if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
2203 - return -EFAULT;
2204 + ret = -EFAULT;
2205 +err_unlock:
2206 + mutex_unlock(&ue->card->user_ctl_lock);
2207 + if (ret)
2208 + return ret;
2209 }
2210 return change;
2211 }
2212 @@ -1136,8 +1165,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2213 struct user_element *ue;
2214 int idx, err;
2215
2216 - if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
2217 - return -ENOMEM;
2218 if (info->count < 1)
2219 return -EINVAL;
2220 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
2221 @@ -1146,21 +1173,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2222 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
2223 info->id.numid = 0;
2224 memset(&kctl, 0, sizeof(kctl));
2225 - down_write(&card->controls_rwsem);
2226 - _kctl = snd_ctl_find_id(card, &info->id);
2227 - err = 0;
2228 - if (_kctl) {
2229 - if (replace)
2230 - err = snd_ctl_remove(card, _kctl);
2231 - else
2232 - err = -EBUSY;
2233 - } else {
2234 - if (replace)
2235 - err = -ENOENT;
2236 +
2237 + if (replace) {
2238 + err = snd_ctl_remove_user_ctl(file, &info->id);
2239 + if (err)
2240 + return err;
2241 }
2242 - up_write(&card->controls_rwsem);
2243 - if (err < 0)
2244 - return err;
2245 +
2246 + if (card->user_ctl_count >= MAX_USER_CONTROLS)
2247 + return -ENOMEM;
2248 +
2249 memcpy(&kctl.id, &info->id, sizeof(info->id));
2250 kctl.count = info->owner ? info->owner : 1;
2251 access |= SNDRV_CTL_ELEM_ACCESS_USER;
2252 @@ -1210,6 +1232,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2253 ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
2254 if (ue == NULL)
2255 return -ENOMEM;
2256 + ue->card = card;
2257 ue->info = *info;
2258 ue->info.access = 0;
2259 ue->elem_data = (char *)ue + sizeof(*ue);
2260 @@ -1321,8 +1344,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
2261 }
2262 err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
2263 if (err > 0) {
2264 + struct snd_ctl_elem_id id = kctl->id;
2265 up_read(&card->controls_rwsem);
2266 - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
2267 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
2268 return 0;
2269 }
2270 } else {
2271 diff --git a/sound/core/init.c b/sound/core/init.c
2272 index 5ee83845c5de..7bdfd19e24a8 100644
2273 --- a/sound/core/init.c
2274 +++ b/sound/core/init.c
2275 @@ -232,6 +232,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
2276 INIT_LIST_HEAD(&card->devices);
2277 init_rwsem(&card->controls_rwsem);
2278 rwlock_init(&card->ctl_files_rwlock);
2279 + mutex_init(&card->user_ctl_lock);
2280 INIT_LIST_HEAD(&card->controls);
2281 INIT_LIST_HEAD(&card->ctl_files);
2282 spin_lock_init(&card->files_lock);
2283 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2284 index b4218a19df22..8867ab3a71d4 100644
2285 --- a/sound/pci/hda/patch_hdmi.c
2286 +++ b/sound/pci/hda/patch_hdmi.c
2287 @@ -1598,10 +1598,18 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
2288 * Re-setup pin and infoframe. This is needed e.g. when
2289 * - sink is first plugged-in (infoframe is not set up if !monitor_present)
2290 * - transcoder can change during stream playback on Haswell
2291 + * and this can make HW reset converter selection on a pin.
2292 */
2293 - if (eld->eld_valid && !old_eld_valid && per_pin->setup)
2294 + if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
2295 + if (is_haswell_plus(codec) || is_valleyview(codec)) {
2296 + intel_verify_pin_cvt_connect(codec, per_pin);
2297 + intel_not_share_assigned_cvt(codec, pin_nid,
2298 + per_pin->mux_idx);
2299 + }
2300 +
2301 hdmi_setup_audio_infoframe(codec, per_pin,
2302 per_pin->non_pcm);
2303 + }
2304 }
2305
2306 if (eld_changed)
2307 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2308 index d943508a7f48..10014ed541cf 100644
2309 --- a/sound/pci/hda/patch_realtek.c
2310 +++ b/sound/pci/hda/patch_realtek.c
2311 @@ -4114,6 +4114,7 @@ enum {
2312 ALC269_FIXUP_HEADSET_MIC,
2313 ALC269_FIXUP_QUANTA_MUTE,
2314 ALC269_FIXUP_LIFEBOOK,
2315 + ALC269_FIXUP_LIFEBOOK_EXTMIC,
2316 ALC269_FIXUP_AMIC,
2317 ALC269_FIXUP_DMIC,
2318 ALC269VB_FIXUP_AMIC,
2319 @@ -4243,6 +4244,13 @@ static const struct hda_fixup alc269_fixups[] = {
2320 .chained = true,
2321 .chain_id = ALC269_FIXUP_QUANTA_MUTE
2322 },
2323 + [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
2324 + .type = HDA_FIXUP_PINS,
2325 + .v.pins = (const struct hda_pintbl[]) {
2326 + { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
2327 + { }
2328 + },
2329 + },
2330 [ALC269_FIXUP_AMIC] = {
2331 .type = HDA_FIXUP_PINS,
2332 .v.pins = (const struct hda_pintbl[]) {
2333 @@ -4633,14 +4641,24 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2334 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2335 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
2336 /* ALC282 */
2337 + SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2338 + SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2339 SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2340 + SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2341 + SND_PCI_QUIRK(0x103c, 0x2211, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2342 + SND_PCI_QUIRK(0x103c, 0x2212, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2343 SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2344 + SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2345 SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2346 SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2347 SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2348 SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2349 SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2350 SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2351 + SND_PCI_QUIRK(0x103c, 0x226c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2352 + SND_PCI_QUIRK(0x103c, 0x226d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2353 + SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2354 + SND_PCI_QUIRK(0x103c, 0x226f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2355 SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2356 SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2357 SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2358 @@ -4680,6 +4698,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2359 SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2360 SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2361 SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2362 + SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2363 + SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2364 + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2365 + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2366 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
2367 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
2368 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2369 @@ -4702,6 +4724,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2370 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2371 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
2372 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
2373 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2374 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
2375 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
2376 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
2377 @@ -5809,6 +5832,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
2378 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
2379 { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
2380 { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
2381 + { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
2382 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
2383 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
2384 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
2385 diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
2386 index f7b0b37aa858..0757e655bfe3 100644
2387 --- a/sound/soc/codecs/max98090.c
2388 +++ b/sound/soc/codecs/max98090.c
2389 @@ -255,6 +255,7 @@ static struct reg_default max98090_reg[] = {
2390 static bool max98090_volatile_register(struct device *dev, unsigned int reg)
2391 {
2392 switch (reg) {
2393 + case M98090_REG_SOFTWARE_RESET:
2394 case M98090_REG_DEVICE_STATUS:
2395 case M98090_REG_JACK_STATUS:
2396 case M98090_REG_REVISION_ID:
2397 @@ -2373,6 +2374,8 @@ static int max98090_runtime_resume(struct device *dev)
2398
2399 regcache_cache_only(max98090->regmap, false);
2400
2401 + max98090_reset(max98090);
2402 +
2403 regcache_sync(max98090->regmap);
2404
2405 return 0;
2406 diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
2407 index d7349bc89ad3..e12fafbb1e09 100644
2408 --- a/sound/soc/codecs/tlv320aic3x.c
2409 +++ b/sound/soc/codecs/tlv320aic3x.c
2410 @@ -169,7 +169,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
2411 mask <<= shift;
2412 val <<= shift;
2413
2414 - change = snd_soc_test_bits(codec, val, mask, reg);
2415 + change = snd_soc_test_bits(codec, reg, mask, val);
2416 if (change) {
2417 update.kcontrol = kcontrol;
2418 update.reg = reg;
2419 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2420 index 6d6ceee447d5..ebb03a886593 100644
2421 --- a/sound/soc/soc-dapm.c
2422 +++ b/sound/soc/soc-dapm.c
2423 @@ -2857,22 +2857,19 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2424 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
2425
2426 change = dapm_kcontrol_set_value(kcontrol, val);
2427 -
2428 - if (reg != SND_SOC_NOPM) {
2429 - mask = mask << shift;
2430 - val = val << shift;
2431 -
2432 - change = snd_soc_test_bits(codec, reg, mask, val);
2433 - }
2434 -
2435 if (change) {
2436 if (reg != SND_SOC_NOPM) {
2437 - update.kcontrol = kcontrol;
2438 - update.reg = reg;
2439 - update.mask = mask;
2440 - update.val = val;
2441 + mask = mask << shift;
2442 + val = val << shift;
2443 +
2444 + if (snd_soc_test_bits(codec, reg, mask, val)) {
2445 + update.kcontrol = kcontrol;
2446 + update.reg = reg;
2447 + update.mask = mask;
2448 + update.val = val;
2449 + card->update = &update;
2450 + }
2451
2452 - card->update = &update;
2453 }
2454
2455 ret = soc_dapm_mixer_update_power(card, kcontrol, connect);