Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.15/0105-4.15.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3089 - (show annotations) (download)
Wed Mar 21 14:52:34 2018 UTC (6 years, 1 month ago) by niro
File size: 48432 byte(s)
-linux-4.15.6
1 diff --git a/Makefile b/Makefile
2 index 28c537fbe328..51563c76bdf6 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 15
9 -SUBLEVEL = 5
10 +SUBLEVEL = 6
11 EXTRAVERSION =
12 NAME = Fearless Coyote
13
14 diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
15 index 4c10c6452678..f4dc1714a79e 100644
16 --- a/arch/arm/common/bL_switcher_dummy_if.c
17 +++ b/arch/arm/common/bL_switcher_dummy_if.c
18 @@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = {
19 &bL_switcher_fops
20 };
21 module_misc_device(bL_switcher_device);
22 +
23 +MODULE_AUTHOR("Nicolas Pitre <nico@linaro.org>");
24 +MODULE_LICENSE("GPL v2");
25 +MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
26 diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
27 index 26396ef53bde..ea407aff1251 100644
28 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
29 +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
30 @@ -81,6 +81,7 @@
31 reg = <0x000>;
32 enable-method = "psci";
33 cpu-idle-states = <&CPU_SLEEP_0>;
34 + #cooling-cells = <2>;
35 };
36
37 cpu1: cpu@1 {
38 @@ -97,6 +98,7 @@
39 reg = <0x100>;
40 enable-method = "psci";
41 cpu-idle-states = <&CPU_SLEEP_0>;
42 + #cooling-cells = <2>;
43 };
44
45 cpu3: cpu@101 {
46 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
47 index 1c3b7ceb36d2..e7273a606a07 100644
48 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
49 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
50 @@ -55,29 +55,31 @@
51 #define RAB1bl %bl
52 #define RAB2bl %cl
53
54 +#define CD0 0x0(%rsp)
55 +#define CD1 0x8(%rsp)
56 +#define CD2 0x10(%rsp)
57 +
58 +# used only before/after all rounds
59 #define RCD0 %r8
60 #define RCD1 %r9
61 #define RCD2 %r10
62
63 -#define RCD0d %r8d
64 -#define RCD1d %r9d
65 -#define RCD2d %r10d
66 -
67 -#define RX0 %rbp
68 -#define RX1 %r11
69 -#define RX2 %r12
70 +# used only during rounds
71 +#define RX0 %r8
72 +#define RX1 %r9
73 +#define RX2 %r10
74
75 -#define RX0d %ebp
76 -#define RX1d %r11d
77 -#define RX2d %r12d
78 +#define RX0d %r8d
79 +#define RX1d %r9d
80 +#define RX2d %r10d
81
82 -#define RY0 %r13
83 -#define RY1 %r14
84 -#define RY2 %r15
85 +#define RY0 %r11
86 +#define RY1 %r12
87 +#define RY2 %r13
88
89 -#define RY0d %r13d
90 -#define RY1d %r14d
91 -#define RY2d %r15d
92 +#define RY0d %r11d
93 +#define RY1d %r12d
94 +#define RY2d %r13d
95
96 #define RT0 %rdx
97 #define RT1 %rsi
98 @@ -85,6 +87,8 @@
99 #define RT0d %edx
100 #define RT1d %esi
101
102 +#define RT1bl %sil
103 +
104 #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
105 movzbl ab ## bl, tmp2 ## d; \
106 movzbl ab ## bh, tmp1 ## d; \
107 @@ -92,6 +96,11 @@
108 op1##l T0(CTX, tmp2, 4), dst ## d; \
109 op2##l T1(CTX, tmp1, 4), dst ## d;
110
111 +#define swap_ab_with_cd(ab, cd, tmp) \
112 + movq cd, tmp; \
113 + movq ab, cd; \
114 + movq tmp, ab;
115 +
116 /*
117 * Combined G1 & G2 function. Reordered with help of rotates to have moves
118 * at begining.
119 @@ -110,15 +119,15 @@
120 /* G1,2 && G2,2 */ \
121 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
122 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
123 - xchgq cd ## 0, ab ## 0; \
124 + swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
125 \
126 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
127 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
128 - xchgq cd ## 1, ab ## 1; \
129 + swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
130 \
131 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
132 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
133 - xchgq cd ## 2, ab ## 2;
134 + swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
135
136 #define enc_round_end(ab, x, y, n) \
137 addl y ## d, x ## d; \
138 @@ -168,6 +177,16 @@
139 decrypt_round3(ba, dc, (n*2)+1); \
140 decrypt_round3(ba, dc, (n*2));
141
142 +#define push_cd() \
143 + pushq RCD2; \
144 + pushq RCD1; \
145 + pushq RCD0;
146 +
147 +#define pop_cd() \
148 + popq RCD0; \
149 + popq RCD1; \
150 + popq RCD2;
151 +
152 #define inpack3(in, n, xy, m) \
153 movq 4*(n)(in), xy ## 0; \
154 xorq w+4*m(CTX), xy ## 0; \
155 @@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
156 * %rdx: src, RIO
157 * %rcx: bool, if true: xor output
158 */
159 - pushq %r15;
160 - pushq %r14;
161 pushq %r13;
162 pushq %r12;
163 - pushq %rbp;
164 pushq %rbx;
165
166 pushq %rcx; /* bool xor */
167 @@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
168
169 inpack_enc3();
170
171 - encrypt_cycle3(RAB, RCD, 0);
172 - encrypt_cycle3(RAB, RCD, 1);
173 - encrypt_cycle3(RAB, RCD, 2);
174 - encrypt_cycle3(RAB, RCD, 3);
175 - encrypt_cycle3(RAB, RCD, 4);
176 - encrypt_cycle3(RAB, RCD, 5);
177 - encrypt_cycle3(RAB, RCD, 6);
178 - encrypt_cycle3(RAB, RCD, 7);
179 + push_cd();
180 + encrypt_cycle3(RAB, CD, 0);
181 + encrypt_cycle3(RAB, CD, 1);
182 + encrypt_cycle3(RAB, CD, 2);
183 + encrypt_cycle3(RAB, CD, 3);
184 + encrypt_cycle3(RAB, CD, 4);
185 + encrypt_cycle3(RAB, CD, 5);
186 + encrypt_cycle3(RAB, CD, 6);
187 + encrypt_cycle3(RAB, CD, 7);
188 + pop_cd();
189
190 popq RIO; /* dst */
191 - popq %rbp; /* bool xor */
192 + popq RT1; /* bool xor */
193
194 - testb %bpl, %bpl;
195 + testb RT1bl, RT1bl;
196 jnz .L__enc_xor3;
197
198 outunpack_enc3(mov);
199
200 popq %rbx;
201 - popq %rbp;
202 popq %r12;
203 popq %r13;
204 - popq %r14;
205 - popq %r15;
206 ret;
207
208 .L__enc_xor3:
209 outunpack_enc3(xor);
210
211 popq %rbx;
212 - popq %rbp;
213 popq %r12;
214 popq %r13;
215 - popq %r14;
216 - popq %r15;
217 ret;
218 ENDPROC(__twofish_enc_blk_3way)
219
220 @@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
221 * %rsi: dst
222 * %rdx: src, RIO
223 */
224 - pushq %r15;
225 - pushq %r14;
226 pushq %r13;
227 pushq %r12;
228 - pushq %rbp;
229 pushq %rbx;
230
231 pushq %rsi; /* dst */
232
233 inpack_dec3();
234
235 - decrypt_cycle3(RAB, RCD, 7);
236 - decrypt_cycle3(RAB, RCD, 6);
237 - decrypt_cycle3(RAB, RCD, 5);
238 - decrypt_cycle3(RAB, RCD, 4);
239 - decrypt_cycle3(RAB, RCD, 3);
240 - decrypt_cycle3(RAB, RCD, 2);
241 - decrypt_cycle3(RAB, RCD, 1);
242 - decrypt_cycle3(RAB, RCD, 0);
243 + push_cd();
244 + decrypt_cycle3(RAB, CD, 7);
245 + decrypt_cycle3(RAB, CD, 6);
246 + decrypt_cycle3(RAB, CD, 5);
247 + decrypt_cycle3(RAB, CD, 4);
248 + decrypt_cycle3(RAB, CD, 3);
249 + decrypt_cycle3(RAB, CD, 2);
250 + decrypt_cycle3(RAB, CD, 1);
251 + decrypt_cycle3(RAB, CD, 0);
252 + pop_cd();
253
254 popq RIO; /* dst */
255
256 outunpack_dec3();
257
258 popq %rbx;
259 - popq %rbp;
260 popq %r12;
261 popq %r13;
262 - popq %r14;
263 - popq %r15;
264 ret;
265 ENDPROC(twofish_dec_blk_3way)
266 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
267 index ac381437c291..17f4eca37d22 100644
268 --- a/arch/x86/kvm/x86.c
269 +++ b/arch/x86/kvm/x86.c
270 @@ -2939,6 +2939,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
271 pagefault_enable();
272 kvm_x86_ops->vcpu_put(vcpu);
273 vcpu->arch.last_host_tsc = rdtsc();
274 + /*
275 + * If userspace has set any breakpoints or watchpoints, dr6 is restored
276 + * on every vmexit, but if not, we might have a stale dr6 from the
277 + * guest. do_debug expects dr6 to be cleared after it runs, do the same.
278 + */
279 + set_debugreg(0, 6);
280 }
281
282 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
283 diff --git a/block/blk-map.c b/block/blk-map.c
284 index d3a94719f03f..db9373bd31ac 100644
285 --- a/block/blk-map.c
286 +++ b/block/blk-map.c
287 @@ -119,7 +119,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
288 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
289 struct bio *bio = NULL;
290 struct iov_iter i;
291 - int ret;
292 + int ret = -EINVAL;
293
294 if (!iter_is_iovec(iter))
295 goto fail;
296 @@ -148,7 +148,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
297 __blk_rq_unmap_user(bio);
298 fail:
299 rq->bio = NULL;
300 - return -EINVAL;
301 + return ret;
302 }
303 EXPORT_SYMBOL(blk_rq_map_user_iov);
304
305 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
306 index ec0917fb7cca..255eabdca2a4 100644
307 --- a/drivers/android/binder.c
308 +++ b/drivers/android/binder.c
309 @@ -1933,8 +1933,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
310 &target_thread->todo);
311 wake_up_interruptible(&target_thread->wait);
312 } else {
313 - WARN(1, "Unexpected reply error: %u\n",
314 - target_thread->reply_error.cmd);
315 + /*
316 + * Cannot get here for normal operation, but
317 + * we can if multiple synchronous transactions
318 + * are sent without blocking for responses.
319 + * Just ignore the 2nd error in this case.
320 + */
321 + pr_warn("Unexpected reply error: %u\n",
322 + target_thread->reply_error.cmd);
323 }
324 binder_inner_proc_unlock(target_thread->proc);
325 binder_thread_dec_tmpref(target_thread);
326 @@ -2135,7 +2141,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
327 int debug_id = buffer->debug_id;
328
329 binder_debug(BINDER_DEBUG_TRANSACTION,
330 - "%d buffer release %d, size %zd-%zd, failed at %p\n",
331 + "%d buffer release %d, size %zd-%zd, failed at %pK\n",
332 proc->pid, buffer->debug_id,
333 buffer->data_size, buffer->offsets_size, failed_at);
334
335 @@ -3647,7 +3653,7 @@ static int binder_thread_write(struct binder_proc *proc,
336 }
337 }
338 binder_debug(BINDER_DEBUG_DEAD_BINDER,
339 - "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
340 + "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
341 proc->pid, thread->pid, (u64)cookie,
342 death);
343 if (death == NULL) {
344 @@ -4316,6 +4322,15 @@ static int binder_thread_release(struct binder_proc *proc,
345
346 binder_inner_proc_unlock(thread->proc);
347
348 + /*
349 + * This is needed to avoid races between wake_up_poll() above and
350 + * and ep_remove_waitqueue() called for other reasons (eg the epoll file
351 + * descriptor being closed); ep_remove_waitqueue() holds an RCU read
352 + * lock, so we can be sure it's done after calling synchronize_rcu().
353 + */
354 + if (thread->looper & BINDER_LOOPER_STATE_POLL)
355 + synchronize_rcu();
356 +
357 if (send_reply)
358 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
359 binder_release_work(proc, &thread->todo);
360 @@ -4331,6 +4346,8 @@ static unsigned int binder_poll(struct file *filp,
361 bool wait_for_proc_work;
362
363 thread = binder_get_thread(proc);
364 + if (!thread)
365 + return POLLERR;
366
367 binder_inner_proc_lock(thread->proc);
368 thread->looper |= BINDER_LOOPER_STATE_POLL;
369 @@ -4974,7 +4991,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
370 spin_lock(&t->lock);
371 to_proc = t->to_proc;
372 seq_printf(m,
373 - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
374 + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
375 prefix, t->debug_id, t,
376 t->from ? t->from->proc->pid : 0,
377 t->from ? t->from->pid : 0,
378 @@ -4998,7 +5015,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
379 }
380 if (buffer->target_node)
381 seq_printf(m, " node %d", buffer->target_node->debug_id);
382 - seq_printf(m, " size %zd:%zd data %p\n",
383 + seq_printf(m, " size %zd:%zd data %pK\n",
384 buffer->data_size, buffer->offsets_size,
385 buffer->data);
386 }
387 diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
388 index 142c6020cec7..5c0496d1ed41 100644
389 --- a/drivers/crypto/s5p-sss.c
390 +++ b/drivers/crypto/s5p-sss.c
391 @@ -1926,15 +1926,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
392 uint32_t aes_control;
393 unsigned long flags;
394 int err;
395 + u8 *iv;
396
397 aes_control = SSS_AES_KEY_CHANGE_MODE;
398 if (mode & FLAGS_AES_DECRYPT)
399 aes_control |= SSS_AES_MODE_DECRYPT;
400
401 - if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
402 + if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
403 aes_control |= SSS_AES_CHAIN_MODE_CBC;
404 - else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
405 + iv = req->info;
406 + } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
407 aes_control |= SSS_AES_CHAIN_MODE_CTR;
408 + iv = req->info;
409 + } else {
410 + iv = NULL; /* AES_ECB */
411 + }
412
413 if (dev->ctx->keylen == AES_KEYSIZE_192)
414 aes_control |= SSS_AES_KEY_SIZE_192;
415 @@ -1965,7 +1971,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
416 goto outdata_error;
417
418 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
419 - s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
420 + s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
421
422 s5p_set_dma_indata(dev, dev->sg_src);
423 s5p_set_dma_outdata(dev, dev->sg_dst);
424 diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
425 index 8289ee482f49..09bd6c6c176c 100644
426 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
427 +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
428 @@ -3648,6 +3648,12 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
429 hdw);
430 hdw->ctl_write_urb->actual_length = 0;
431 hdw->ctl_write_pend_flag = !0;
432 + if (usb_urb_ep_type_check(hdw->ctl_write_urb)) {
433 + pvr2_trace(
434 + PVR2_TRACE_ERROR_LEGS,
435 + "Invalid write control endpoint");
436 + return -EINVAL;
437 + }
438 status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
439 if (status < 0) {
440 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
441 @@ -3672,6 +3678,12 @@ status);
442 hdw);
443 hdw->ctl_read_urb->actual_length = 0;
444 hdw->ctl_read_pend_flag = !0;
445 + if (usb_urb_ep_type_check(hdw->ctl_read_urb)) {
446 + pvr2_trace(
447 + PVR2_TRACE_ERROR_LEGS,
448 + "Invalid read control endpoint");
449 + return -EINVAL;
450 + }
451 status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
452 if (status < 0) {
453 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
454 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
455 index 0ccccbaf530d..e4b10b2d1a08 100644
456 --- a/drivers/misc/mei/hw-me-regs.h
457 +++ b/drivers/misc/mei/hw-me-regs.h
458 @@ -132,6 +132,11 @@
459 #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
460 #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
461
462 +#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
463 +#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
464 +#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
465 +#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
466 +
467 /*
468 * MEI HW Section
469 */
470 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
471 index 4a0ccda4d04b..ea4e152270a3 100644
472 --- a/drivers/misc/mei/pci-me.c
473 +++ b/drivers/misc/mei/pci-me.c
474 @@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
475 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
476 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
477
478 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)},
479 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
480 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
481 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
482 +
483 /* required last entry */
484 {0, }
485 };
486 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
487 index a8ec589d1359..e29cd5c7d39f 100644
488 --- a/drivers/net/tun.c
489 +++ b/drivers/net/tun.c
490 @@ -1317,27 +1317,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
491 skb->truesize += skb->data_len;
492
493 for (i = 1; i < it->nr_segs; i++) {
494 + struct page_frag *pfrag = &current->task_frag;
495 size_t fragsz = it->iov[i].iov_len;
496 - unsigned long offset;
497 - struct page *page;
498 - void *data;
499
500 if (fragsz == 0 || fragsz > PAGE_SIZE) {
501 err = -EINVAL;
502 goto free;
503 }
504
505 - local_bh_disable();
506 - data = napi_alloc_frag(fragsz);
507 - local_bh_enable();
508 - if (!data) {
509 + if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
510 err = -ENOMEM;
511 goto free;
512 }
513
514 - page = virt_to_head_page(data);
515 - offset = data - page_address(page);
516 - skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
517 + skb_fill_page_desc(skb, i - 1, pfrag->page,
518 + pfrag->offset, fragsz);
519 + page_ref_inc(pfrag->page);
520 + pfrag->offset += fragsz;
521 }
522
523 return skb;
524 diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
525 index ce35ff748adf..0a43b2e8906f 100644
526 --- a/drivers/soc/qcom/rmtfs_mem.c
527 +++ b/drivers/soc/qcom/rmtfs_mem.c
528 @@ -267,3 +267,7 @@ static void qcom_rmtfs_mem_exit(void)
529 unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
530 }
531 module_exit(qcom_rmtfs_mem_exit);
532 +
533 +MODULE_AUTHOR("Linaro Ltd");
534 +MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
535 +MODULE_LICENSE("GPL v2");
536 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
537 index 372ce9913e6d..e7541dc90473 100644
538 --- a/drivers/staging/android/ashmem.c
539 +++ b/drivers/staging/android/ashmem.c
540 @@ -710,30 +710,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
541 size_t pgstart, pgend;
542 int ret = -EINVAL;
543
544 + mutex_lock(&ashmem_mutex);
545 +
546 if (unlikely(!asma->file))
547 - return -EINVAL;
548 + goto out_unlock;
549
550 - if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
551 - return -EFAULT;
552 + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
553 + ret = -EFAULT;
554 + goto out_unlock;
555 + }
556
557 /* per custom, you can pass zero for len to mean "everything onward" */
558 if (!pin.len)
559 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
560
561 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
562 - return -EINVAL;
563 + goto out_unlock;
564
565 if (unlikely(((__u32)-1) - pin.offset < pin.len))
566 - return -EINVAL;
567 + goto out_unlock;
568
569 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
570 - return -EINVAL;
571 + goto out_unlock;
572
573 pgstart = pin.offset / PAGE_SIZE;
574 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
575
576 - mutex_lock(&ashmem_mutex);
577 -
578 switch (cmd) {
579 case ASHMEM_PIN:
580 ret = ashmem_pin(asma, pgstart, pgend);
581 @@ -746,6 +748,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
582 break;
583 }
584
585 +out_unlock:
586 mutex_unlock(&ashmem_mutex);
587
588 return ret;
589 diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
590 index c78989351f9c..6cfed48f376e 100644
591 --- a/drivers/staging/android/ion/ion-ioctl.c
592 +++ b/drivers/staging/android/ion/ion-ioctl.c
593 @@ -70,8 +70,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
594 return -EFAULT;
595
596 ret = validate_ioctl_arg(cmd, &data);
597 - if (WARN_ON_ONCE(ret))
598 + if (ret) {
599 + pr_warn_once("%s: ioctl validate failed\n", __func__);
600 return ret;
601 + }
602
603 if (!(dir & _IOC_WRITE))
604 memset(&data, 0, sizeof(data));
605 diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
606 index 4dc5d7a589c2..b6ece18e6a88 100644
607 --- a/drivers/staging/android/ion/ion_system_heap.c
608 +++ b/drivers/staging/android/ion/ion_system_heap.c
609 @@ -371,7 +371,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
610 unsigned long i;
611 int ret;
612
613 - page = alloc_pages(low_order_gfp_flags, order);
614 + page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
615 if (!page)
616 return -ENOMEM;
617
618 diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
619 index 504c987447f2..eee1c1b277fa 100644
620 --- a/drivers/staging/fsl-mc/bus/Kconfig
621 +++ b/drivers/staging/fsl-mc/bus/Kconfig
622 @@ -8,7 +8,7 @@
623
624 config FSL_MC_BUS
625 bool "QorIQ DPAA2 fsl-mc bus driver"
626 - depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
627 + depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
628 select GENERIC_MSI_IRQ_DOMAIN
629 help
630 Driver to enable the bus infrastructure for the QorIQ DPAA2
631 diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
632 index cadfb96734ed..d4da2807eb55 100644
633 --- a/drivers/staging/iio/adc/ad7192.c
634 +++ b/drivers/staging/iio/adc/ad7192.c
635 @@ -141,6 +141,8 @@
636 #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
637 #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
638
639 +#define AD7192_EXT_FREQ_MHZ_MIN 2457600
640 +#define AD7192_EXT_FREQ_MHZ_MAX 5120000
641 #define AD7192_INT_FREQ_MHZ 4915200
642
643 /* NOTE:
644 @@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
645 ARRAY_SIZE(ad7192_calib_arr));
646 }
647
648 +static inline bool ad7192_valid_external_frequency(u32 freq)
649 +{
650 + return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
651 + freq <= AD7192_EXT_FREQ_MHZ_MAX);
652 +}
653 +
654 static int ad7192_setup(struct ad7192_state *st,
655 const struct ad7192_platform_data *pdata)
656 {
657 @@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st,
658 id);
659
660 switch (pdata->clock_source_sel) {
661 - case AD7192_CLK_EXT_MCLK1_2:
662 - case AD7192_CLK_EXT_MCLK2:
663 - st->mclk = AD7192_INT_FREQ_MHZ;
664 - break;
665 case AD7192_CLK_INT:
666 case AD7192_CLK_INT_CO:
667 - if (pdata->ext_clk_hz)
668 - st->mclk = pdata->ext_clk_hz;
669 - else
670 - st->mclk = AD7192_INT_FREQ_MHZ;
671 + st->mclk = AD7192_INT_FREQ_MHZ;
672 break;
673 + case AD7192_CLK_EXT_MCLK1_2:
674 + case AD7192_CLK_EXT_MCLK2:
675 + if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
676 + st->mclk = pdata->ext_clk_hz;
677 + break;
678 + }
679 + dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
680 + pdata->ext_clk_hz);
681 + ret = -EINVAL;
682 + goto out;
683 default:
684 ret = -EINVAL;
685 goto out;
686 diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
687 index 2b28fb9c0048..3bcf49466361 100644
688 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c
689 +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
690 @@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
691 /* Ring buffer functions - here trigger setup related */
692 indio_dev->setup_ops = &ad5933_ring_setup_ops;
693
694 - indio_dev->modes |= INDIO_BUFFER_HARDWARE;
695 -
696 return 0;
697 }
698
699 @@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client,
700 indio_dev->dev.parent = &client->dev;
701 indio_dev->info = &ad5933_info;
702 indio_dev->name = id->name;
703 - indio_dev->modes = INDIO_DIRECT_MODE;
704 + indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
705 indio_dev->channels = ad5933_channels;
706 indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
707
708 diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
709 index e26e685d8a57..5851052d4668 100644
710 --- a/drivers/usb/host/xhci-debugfs.c
711 +++ b/drivers/usb/host/xhci-debugfs.c
712 @@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,
713 static int xhci_ring_trb_show(struct seq_file *s, void *unused)
714 {
715 int i;
716 - struct xhci_ring *ring = s->private;
717 + struct xhci_ring *ring = *(struct xhci_ring **)s->private;
718 struct xhci_segment *seg = ring->first_seg;
719
720 for (i = 0; i < ring->num_segs; i++) {
721 @@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
722
723 snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
724 epriv->root = xhci_debugfs_create_ring_dir(xhci,
725 - &dev->eps[ep_index].new_ring,
726 + &dev->eps[ep_index].ring,
727 epriv->name,
728 spriv->root);
729 spriv->eps[ep_index] = epriv;
730 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
731 index da6dbe3ebd8b..5c1326154e66 100644
732 --- a/drivers/usb/host/xhci.c
733 +++ b/drivers/usb/host/xhci.c
734 @@ -652,8 +652,6 @@ static void xhci_stop(struct usb_hcd *hcd)
735 return;
736 }
737
738 - xhci_debugfs_exit(xhci);
739 -
740 spin_lock_irq(&xhci->lock);
741 xhci->xhc_state |= XHCI_STATE_HALTED;
742 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
743 @@ -685,6 +683,7 @@ static void xhci_stop(struct usb_hcd *hcd)
744
745 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
746 xhci_mem_cleanup(xhci);
747 + xhci_debugfs_exit(xhci);
748 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
749 "xhci_stop completed - status = %x",
750 readl(&xhci->op_regs->status));
751 @@ -1018,6 +1017,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
752
753 xhci_dbg(xhci, "cleaning up memory\n");
754 xhci_mem_cleanup(xhci);
755 + xhci_debugfs_exit(xhci);
756 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
757 readl(&xhci->op_regs->status));
758
759 @@ -3551,12 +3551,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
760 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
761 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
762 }
763 -
764 + xhci_debugfs_remove_slot(xhci, udev->slot_id);
765 ret = xhci_disable_slot(xhci, udev->slot_id);
766 - if (ret) {
767 - xhci_debugfs_remove_slot(xhci, udev->slot_id);
768 + if (ret)
769 xhci_free_virt_device(xhci, udev->slot_id);
770 - }
771 }
772
773 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
774 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
775 index e31a6f204397..86037e5b1101 100644
776 --- a/drivers/usb/usbip/stub_dev.c
777 +++ b/drivers/usb/usbip/stub_dev.c
778 @@ -73,6 +73,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
779 goto err;
780
781 sdev->ud.tcp_socket = socket;
782 + sdev->ud.sockfd = sockfd;
783
784 spin_unlock_irq(&sdev->ud.lock);
785
786 @@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
787 if (ud->tcp_socket) {
788 sockfd_put(ud->tcp_socket);
789 ud->tcp_socket = NULL;
790 + ud->sockfd = -1;
791 }
792
793 /* 3. free used data */
794 @@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
795 sdev->ud.status = SDEV_ST_AVAILABLE;
796 spin_lock_init(&sdev->ud.lock);
797 sdev->ud.tcp_socket = NULL;
798 + sdev->ud.sockfd = -1;
799
800 INIT_LIST_HEAD(&sdev->priv_init);
801 INIT_LIST_HEAD(&sdev->priv_tx);
802 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
803 index c3e1008aa491..20e3d4609583 100644
804 --- a/drivers/usb/usbip/vhci_hcd.c
805 +++ b/drivers/usb/usbip/vhci_hcd.c
806 @@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
807 if (vdev->ud.tcp_socket) {
808 sockfd_put(vdev->ud.tcp_socket);
809 vdev->ud.tcp_socket = NULL;
810 + vdev->ud.sockfd = -1;
811 }
812 pr_info("release socket\n");
813
814 @@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud)
815 if (ud->tcp_socket) {
816 sockfd_put(ud->tcp_socket);
817 ud->tcp_socket = NULL;
818 + ud->sockfd = -1;
819 }
820 ud->status = VDEV_ST_NULL;
821
822 diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
823 index a0f496049db7..3a6bb6561ba0 100644
824 --- a/drivers/video/fbdev/mmp/core.c
825 +++ b/drivers/video/fbdev/mmp/core.c
826 @@ -23,6 +23,7 @@
827 #include <linux/slab.h>
828 #include <linux/dma-mapping.h>
829 #include <linux/export.h>
830 +#include <linux/module.h>
831 #include <video/mmp_disp.h>
832
833 static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
834 @@ -249,3 +250,7 @@ void mmp_unregister_path(struct mmp_path *path)
835 mutex_unlock(&disp_lock);
836 }
837 EXPORT_SYMBOL_GPL(mmp_unregister_path);
838 +
839 +MODULE_AUTHOR("Zhou Zhu <zzhu3@marvell.com>");
840 +MODULE_DESCRIPTION("Marvell MMP display framework");
841 +MODULE_LICENSE("GPL");
842 diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
843 index d72b2e7dd500..59c77c1388ae 100644
844 --- a/include/linux/ptr_ring.h
845 +++ b/include/linux/ptr_ring.h
846 @@ -451,9 +451,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
847 __PTR_RING_PEEK_CALL_v; \
848 })
849
850 +/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
851 + * documentation for vmalloc for which of them are legal.
852 + */
853 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
854 {
855 - return kcalloc(size, sizeof(void *), gfp);
856 + if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
857 + return NULL;
858 + return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
859 }
860
861 static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
862 @@ -586,7 +591,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
863 spin_unlock(&(r)->producer_lock);
864 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
865
866 - kfree(old);
867 + kvfree(old);
868
869 return 0;
870 }
871 @@ -626,7 +631,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
872 }
873
874 for (i = 0; i < nrings; ++i)
875 - kfree(queues[i]);
876 + kvfree(queues[i]);
877
878 kfree(queues);
879
880 @@ -634,7 +639,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
881
882 nomem:
883 while (--i >= 0)
884 - kfree(queues[i]);
885 + kvfree(queues[i]);
886
887 kfree(queues);
888
889 @@ -649,7 +654,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
890 if (destroy)
891 while ((ptr = ptr_ring_consume(r)))
892 destroy(ptr);
893 - kfree(r->queue);
894 + kvfree(r->queue);
895 }
896
897 #endif /* _LINUX_PTR_RING_H */
898 diff --git a/kernel/kcov.c b/kernel/kcov.c
899 index 7594c033d98a..2c16f1ab5e10 100644
900 --- a/kernel/kcov.c
901 +++ b/kernel/kcov.c
902 @@ -358,7 +358,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
903 */
904 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
905 return -EINVAL;
906 - if (kcov->t != NULL)
907 + t = current;
908 + if (kcov->t != NULL || t->kcov != NULL)
909 return -EBUSY;
910 if (arg == KCOV_TRACE_PC)
911 kcov->mode = KCOV_MODE_TRACE_PC;
912 @@ -370,7 +371,6 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
913 #endif
914 else
915 return -EINVAL;
916 - t = current;
917 /* Cache in task struct for performance. */
918 t->kcov_size = kcov->size;
919 t->kcov_area = kcov->area;
920 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
921 index 673942094328..ebff729cc956 100644
922 --- a/mm/vmalloc.c
923 +++ b/mm/vmalloc.c
924 @@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size)
925 }
926
927 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
928 -#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
929 +#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
930 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
931 -#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
932 +#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
933 #else
934 -#define GFP_VMALLOC32 GFP_KERNEL
935 +/*
936 + * 64b systems should always have either DMA or DMA32 zones. For others
937 + * GFP_DMA32 should do the right thing and use the normal zone.
938 + */
939 +#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
940 #endif
941
942 /**
943 diff --git a/net/core/dev.c b/net/core/dev.c
944 index 613fb4066be7..c8c102a3467f 100644
945 --- a/net/core/dev.c
946 +++ b/net/core/dev.c
947 @@ -2815,7 +2815,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
948
949 segs = skb_mac_gso_segment(skb, features);
950
951 - if (unlikely(skb_needs_check(skb, tx_path)))
952 + if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
953 skb_warn_bad_offload(skb);
954
955 return segs;
956 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
957 index 9834cfa21b21..0a3f88f08727 100644
958 --- a/net/core/gen_estimator.c
959 +++ b/net/core/gen_estimator.c
960 @@ -159,7 +159,11 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
961 est->intvl_log = intvl_log;
962 est->cpu_bstats = cpu_bstats;
963
964 + if (stats_lock)
965 + local_bh_disable();
966 est_fetch_counters(est, &b);
967 + if (stats_lock)
968 + local_bh_enable();
969 est->last_bytes = b.bytes;
970 est->last_packets = b.packets;
971 old = rcu_dereference_protected(*rate_est, 1);
972 diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
973 index 518cea17b811..ea9b55309483 100644
974 --- a/net/decnet/af_decnet.c
975 +++ b/net/decnet/af_decnet.c
976 @@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
977 lock_sock(sk);
978 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
979 release_sock(sk);
980 +#ifdef CONFIG_NETFILTER
981 + /* we need to exclude all possible ENOPROTOOPTs except default case */
982 + if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
983 + optname != DSO_STREAM && optname != DSO_SEQPACKET)
984 + err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
985 +#endif
986
987 return err;
988 }
989 @@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
990 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
991 break;
992
993 - default:
994 -#ifdef CONFIG_NETFILTER
995 - return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
996 -#endif
997 - case DSO_LINKINFO:
998 - case DSO_STREAM:
999 - case DSO_SEQPACKET:
1000 - return -ENOPROTOOPT;
1001 -
1002 case DSO_MAXWINDOW:
1003 if (optlen != sizeof(unsigned long))
1004 return -EINVAL;
1005 @@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1006 return -EINVAL;
1007 scp->info_loc = u.info;
1008 break;
1009 +
1010 + case DSO_LINKINFO:
1011 + case DSO_STREAM:
1012 + case DSO_SEQPACKET:
1013 + default:
1014 + return -ENOPROTOOPT;
1015 }
1016
1017 return 0;
1018 @@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
1019 lock_sock(sk);
1020 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1021 release_sock(sk);
1022 +#ifdef CONFIG_NETFILTER
1023 + if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1024 + optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1025 + optname != DSO_CONREJECT) {
1026 + int len;
1027 +
1028 + if (get_user(len, optlen))
1029 + return -EFAULT;
1030 +
1031 + err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1032 + if (err >= 0)
1033 + err = put_user(len, optlen);
1034 + }
1035 +#endif
1036
1037 return err;
1038 }
1039 @@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1040 r_data = &link;
1041 break;
1042
1043 - default:
1044 -#ifdef CONFIG_NETFILTER
1045 - {
1046 - int ret, len;
1047 -
1048 - if (get_user(len, optlen))
1049 - return -EFAULT;
1050 -
1051 - ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1052 - if (ret >= 0)
1053 - ret = put_user(len, optlen);
1054 - return ret;
1055 - }
1056 -#endif
1057 - case DSO_STREAM:
1058 - case DSO_SEQPACKET:
1059 - case DSO_CONACCEPT:
1060 - case DSO_CONREJECT:
1061 - return -ENOPROTOOPT;
1062 -
1063 case DSO_MAXWINDOW:
1064 if (r_len > sizeof(unsigned long))
1065 r_len = sizeof(unsigned long);
1066 @@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1067 r_len = sizeof(unsigned char);
1068 r_data = &scp->info_rem;
1069 break;
1070 +
1071 + case DSO_STREAM:
1072 + case DSO_SEQPACKET:
1073 + case DSO_CONACCEPT:
1074 + case DSO_CONREJECT:
1075 + default:
1076 + return -ENOPROTOOPT;
1077 }
1078
1079 if (r_data) {
1080 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1081 index 60fb1eb7d7d8..c7df4969f80a 100644
1082 --- a/net/ipv4/ip_sockglue.c
1083 +++ b/net/ipv4/ip_sockglue.c
1084 @@ -1251,11 +1251,8 @@ int ip_setsockopt(struct sock *sk, int level,
1085 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1086 optname != IP_IPSEC_POLICY &&
1087 optname != IP_XFRM_POLICY &&
1088 - !ip_mroute_opt(optname)) {
1089 - lock_sock(sk);
1090 + !ip_mroute_opt(optname))
1091 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1092 - release_sock(sk);
1093 - }
1094 #endif
1095 return err;
1096 }
1097 @@ -1280,12 +1277,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1098 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1099 optname != IP_IPSEC_POLICY &&
1100 optname != IP_XFRM_POLICY &&
1101 - !ip_mroute_opt(optname)) {
1102 - lock_sock(sk);
1103 - err = compat_nf_setsockopt(sk, PF_INET, optname,
1104 - optval, optlen);
1105 - release_sock(sk);
1106 - }
1107 + !ip_mroute_opt(optname))
1108 + err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
1109 + optlen);
1110 #endif
1111 return err;
1112 }
1113 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
1114 index 69060e3abe85..1e4a7209a3d2 100644
1115 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
1116 +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
1117 @@ -431,7 +431,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
1118 struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
1119 const struct ipt_entry *e = par->entryinfo;
1120 struct clusterip_config *config;
1121 - int ret;
1122 + int ret, i;
1123
1124 if (par->nft_compat) {
1125 pr_err("cannot use CLUSTERIP target from nftables compat\n");
1126 @@ -450,8 +450,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
1127 pr_info("Please specify destination IP\n");
1128 return -EINVAL;
1129 }
1130 -
1131 - /* FIXME: further sanity checks */
1132 + if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
1133 + pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
1134 + return -EINVAL;
1135 + }
1136 + for (i = 0; i < cipinfo->num_local_nodes; i++) {
1137 + if (cipinfo->local_nodes[i] - 1 >=
1138 + sizeof(config->local_nodes) * 8) {
1139 + pr_info("bad local_nodes[%d] %u\n",
1140 + i, cipinfo->local_nodes[i]);
1141 + return -EINVAL;
1142 + }
1143 + }
1144
1145 config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
1146 if (!config) {
1147 diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1148 index 89af9d88ca21..a5727036a8a8 100644
1149 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1150 +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
1151 @@ -218,15 +218,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1152 struct nf_conntrack_tuple tuple;
1153
1154 memset(&tuple, 0, sizeof(tuple));
1155 +
1156 + lock_sock(sk);
1157 tuple.src.u3.ip = inet->inet_rcv_saddr;
1158 tuple.src.u.tcp.port = inet->inet_sport;
1159 tuple.dst.u3.ip = inet->inet_daddr;
1160 tuple.dst.u.tcp.port = inet->inet_dport;
1161 tuple.src.l3num = PF_INET;
1162 tuple.dst.protonum = sk->sk_protocol;
1163 + release_sock(sk);
1164
1165 /* We only do TCP and SCTP at the moment: is there a better way? */
1166 - if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
1167 + if (tuple.dst.protonum != IPPROTO_TCP &&
1168 + tuple.dst.protonum != IPPROTO_SCTP) {
1169 pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
1170 return -ENOPROTOOPT;
1171 }
1172 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
1173 index e8ffb5b5d84e..d78d41fc4b1a 100644
1174 --- a/net/ipv6/ipv6_sockglue.c
1175 +++ b/net/ipv6/ipv6_sockglue.c
1176 @@ -923,12 +923,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
1177 #ifdef CONFIG_NETFILTER
1178 /* we need to exclude all possible ENOPROTOOPTs except default case */
1179 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
1180 - optname != IPV6_XFRM_POLICY) {
1181 - lock_sock(sk);
1182 - err = nf_setsockopt(sk, PF_INET6, optname, optval,
1183 - optlen);
1184 - release_sock(sk);
1185 - }
1186 + optname != IPV6_XFRM_POLICY)
1187 + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
1188 #endif
1189 return err;
1190 }
1191 @@ -958,12 +954,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
1192 #ifdef CONFIG_NETFILTER
1193 /* we need to exclude all possible ENOPROTOOPTs except default case */
1194 if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
1195 - optname != IPV6_XFRM_POLICY) {
1196 - lock_sock(sk);
1197 - err = compat_nf_setsockopt(sk, PF_INET6, optname,
1198 - optval, optlen);
1199 - release_sock(sk);
1200 - }
1201 + optname != IPV6_XFRM_POLICY)
1202 + err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
1203 + optlen);
1204 #endif
1205 return err;
1206 }
1207 diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
1208 index 3b80a38f62b8..5863579800c1 100644
1209 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
1210 +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
1211 @@ -226,20 +226,27 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
1212 static int
1213 ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1214 {
1215 - const struct inet_sock *inet = inet_sk(sk);
1216 + struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
1217 const struct ipv6_pinfo *inet6 = inet6_sk(sk);
1218 + const struct inet_sock *inet = inet_sk(sk);
1219 const struct nf_conntrack_tuple_hash *h;
1220 struct sockaddr_in6 sin6;
1221 - struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
1222 struct nf_conn *ct;
1223 + __be32 flow_label;
1224 + int bound_dev_if;
1225
1226 + lock_sock(sk);
1227 tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
1228 tuple.src.u.tcp.port = inet->inet_sport;
1229 tuple.dst.u3.in6 = sk->sk_v6_daddr;
1230 tuple.dst.u.tcp.port = inet->inet_dport;
1231 tuple.dst.protonum = sk->sk_protocol;
1232 + bound_dev_if = sk->sk_bound_dev_if;
1233 + flow_label = inet6->flow_label;
1234 + release_sock(sk);
1235
1236 - if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
1237 + if (tuple.dst.protonum != IPPROTO_TCP &&
1238 + tuple.dst.protonum != IPPROTO_SCTP)
1239 return -ENOPROTOOPT;
1240
1241 if (*len < 0 || (unsigned int) *len < sizeof(sin6))
1242 @@ -257,14 +264,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1243
1244 sin6.sin6_family = AF_INET6;
1245 sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
1246 - sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
1247 + sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
1248 memcpy(&sin6.sin6_addr,
1249 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
1250 sizeof(sin6.sin6_addr));
1251
1252 nf_ct_put(ct);
1253 - sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
1254 - sk->sk_bound_dev_if);
1255 + sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
1256 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
1257 }
1258
1259 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
1260 index 55802e97f906..d7070d18db20 100644
1261 --- a/net/netfilter/x_tables.c
1262 +++ b/net/netfilter/x_tables.c
1263 @@ -39,7 +39,6 @@ MODULE_LICENSE("GPL");
1264 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1265 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
1266
1267 -#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
1268 #define XT_PCPU_BLOCK_SIZE 4096
1269
1270 struct compat_delta {
1271 @@ -210,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
1272 {
1273 struct xt_match *match;
1274
1275 + if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
1276 + return ERR_PTR(-EINVAL);
1277 +
1278 match = xt_find_match(nfproto, name, revision);
1279 if (IS_ERR(match)) {
1280 request_module("%st_%s", xt_prefix[nfproto], name);
1281 @@ -252,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
1282 {
1283 struct xt_target *target;
1284
1285 + if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
1286 + return ERR_PTR(-EINVAL);
1287 +
1288 target = xt_find_target(af, name, revision);
1289 if (IS_ERR(target)) {
1290 request_module("%st_%s", xt_prefix[af], name);
1291 @@ -1000,7 +1005,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
1292 return NULL;
1293
1294 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1295 - if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
1296 + if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
1297 return NULL;
1298
1299 info = kvmalloc(sz, GFP_KERNEL);
1300 diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
1301 index 498b54fd04d7..141c295191f6 100644
1302 --- a/net/netfilter/xt_RATEEST.c
1303 +++ b/net/netfilter/xt_RATEEST.c
1304 @@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
1305 hlist_add_head(&est->list, &rateest_hash[h]);
1306 }
1307
1308 -struct xt_rateest *xt_rateest_lookup(const char *name)
1309 +static struct xt_rateest *__xt_rateest_lookup(const char *name)
1310 {
1311 struct xt_rateest *est;
1312 unsigned int h;
1313
1314 h = xt_rateest_hash(name);
1315 - mutex_lock(&xt_rateest_mutex);
1316 hlist_for_each_entry(est, &rateest_hash[h], list) {
1317 if (strcmp(est->name, name) == 0) {
1318 est->refcnt++;
1319 - mutex_unlock(&xt_rateest_mutex);
1320 return est;
1321 }
1322 }
1323 - mutex_unlock(&xt_rateest_mutex);
1324 +
1325 return NULL;
1326 }
1327 +
1328 +struct xt_rateest *xt_rateest_lookup(const char *name)
1329 +{
1330 + struct xt_rateest *est;
1331 +
1332 + mutex_lock(&xt_rateest_mutex);
1333 + est = __xt_rateest_lookup(name);
1334 + mutex_unlock(&xt_rateest_mutex);
1335 + return est;
1336 +}
1337 EXPORT_SYMBOL_GPL(xt_rateest_lookup);
1338
1339 void xt_rateest_put(struct xt_rateest *est)
1340 @@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
1341
1342 net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
1343
1344 - est = xt_rateest_lookup(info->name);
1345 + mutex_lock(&xt_rateest_mutex);
1346 + est = __xt_rateest_lookup(info->name);
1347 if (est) {
1348 + mutex_unlock(&xt_rateest_mutex);
1349 /*
1350 * If estimator parameters are specified, they must match the
1351 * existing estimator.
1352 @@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
1353
1354 info->est = est;
1355 xt_rateest_hash_insert(est);
1356 + mutex_unlock(&xt_rateest_mutex);
1357 return 0;
1358
1359 err2:
1360 kfree(est);
1361 err1:
1362 + mutex_unlock(&xt_rateest_mutex);
1363 return ret;
1364 }
1365
1366 diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
1367 index 1db1ce59079f..891f4e7e8ea7 100644
1368 --- a/net/netfilter/xt_cgroup.c
1369 +++ b/net/netfilter/xt_cgroup.c
1370 @@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
1371 return -EINVAL;
1372 }
1373
1374 + info->priv = NULL;
1375 if (info->has_path) {
1376 cgrp = cgroup_get_from_path(info->path);
1377 if (IS_ERR(cgrp)) {
1378 diff --git a/net/rds/connection.c b/net/rds/connection.c
1379 index 7ee2d5d68b78..9efc82c665b5 100644
1380 --- a/net/rds/connection.c
1381 +++ b/net/rds/connection.c
1382 @@ -366,6 +366,8 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
1383 * to the conn hash, so we never trigger a reconnect on this
1384 * conn - the reconnect is always triggered by the active peer. */
1385 cancel_delayed_work_sync(&cp->cp_conn_w);
1386 + if (conn->c_destroy_in_prog)
1387 + return;
1388 rcu_read_lock();
1389 if (!hlist_unhashed(&conn->c_hash_node)) {
1390 rcu_read_unlock();
1391 @@ -445,7 +447,6 @@ void rds_conn_destroy(struct rds_connection *conn)
1392 */
1393 rds_cong_remove_conn(conn);
1394
1395 - put_net(conn->c_net);
1396 kfree(conn->c_path);
1397 kmem_cache_free(rds_conn_slab, conn);
1398
1399 diff --git a/net/rds/rds.h b/net/rds/rds.h
1400 index c349c71babff..d09f6c1facb4 100644
1401 --- a/net/rds/rds.h
1402 +++ b/net/rds/rds.h
1403 @@ -150,7 +150,7 @@ struct rds_connection {
1404
1405 /* Protocol version */
1406 unsigned int c_version;
1407 - struct net *c_net;
1408 + possible_net_t c_net;
1409
1410 struct list_head c_map_item;
1411 unsigned long c_map_queued;
1412 @@ -165,13 +165,13 @@ struct rds_connection {
1413 static inline
1414 struct net *rds_conn_net(struct rds_connection *conn)
1415 {
1416 - return conn->c_net;
1417 + return read_pnet(&conn->c_net);
1418 }
1419
1420 static inline
1421 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
1422 {
1423 - conn->c_net = get_net(net);
1424 + write_pnet(&conn->c_net, net);
1425 }
1426
1427 #define RDS_FLAG_CONG_BITMAP 0x01
1428 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
1429 index ab7356e0ba83..4df21e47d2ab 100644
1430 --- a/net/rds/tcp.c
1431 +++ b/net/rds/tcp.c
1432 @@ -307,7 +307,8 @@ static void rds_tcp_conn_free(void *arg)
1433 rdsdebug("freeing tc %p\n", tc);
1434
1435 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
1436 - list_del(&tc->t_tcp_node);
1437 + if (!tc->t_tcp_node_detached)
1438 + list_del(&tc->t_tcp_node);
1439 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
1440
1441 kmem_cache_free(rds_tcp_conn_slab, tc);
1442 @@ -528,12 +529,16 @@ static void rds_tcp_kill_sock(struct net *net)
1443 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
1444 spin_lock_irq(&rds_tcp_conn_lock);
1445 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
1446 - struct net *c_net = tc->t_cpath->cp_conn->c_net;
1447 + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
1448
1449 if (net != c_net || !tc->t_sock)
1450 continue;
1451 - if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
1452 + if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
1453 list_move_tail(&tc->t_tcp_node, &tmp_list);
1454 + } else {
1455 + list_del(&tc->t_tcp_node);
1456 + tc->t_tcp_node_detached = true;
1457 + }
1458 }
1459 spin_unlock_irq(&rds_tcp_conn_lock);
1460 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
1461 @@ -587,7 +592,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
1462
1463 spin_lock_irq(&rds_tcp_conn_lock);
1464 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
1465 - struct net *c_net = tc->t_cpath->cp_conn->c_net;
1466 + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
1467
1468 if (net != c_net || !tc->t_sock)
1469 continue;
1470 diff --git a/net/rds/tcp.h b/net/rds/tcp.h
1471 index 864ca7d8f019..c6fa080e9b6d 100644
1472 --- a/net/rds/tcp.h
1473 +++ b/net/rds/tcp.h
1474 @@ -12,6 +12,7 @@ struct rds_tcp_incoming {
1475 struct rds_tcp_connection {
1476
1477 struct list_head t_tcp_node;
1478 + bool t_tcp_node_detached;
1479 struct rds_conn_path *t_cpath;
1480 /* t_conn_path_lock synchronizes the connection establishment between
1481 * rds_tcp_accept_one and rds_tcp_conn_path_connect
1482 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
1483 index 33cfe5d3d6cb..8900ea5cbabf 100644
1484 --- a/security/selinux/ss/services.c
1485 +++ b/security/selinux/ss/services.c
1486 @@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
1487 int index;
1488 int rc;
1489
1490 + if (!ss_initialized)
1491 + return 0;
1492 +
1493 read_lock(&policy_rwlock);
1494
1495 rc = -EINVAL;
1496 @@ -1413,27 +1416,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
1497 if (!scontext_len)
1498 return -EINVAL;
1499
1500 + /* Copy the string to allow changes and ensure a NUL terminator */
1501 + scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
1502 + if (!scontext2)
1503 + return -ENOMEM;
1504 +
1505 if (!ss_initialized) {
1506 int i;
1507
1508 for (i = 1; i < SECINITSID_NUM; i++) {
1509 - if (!strcmp(initial_sid_to_string[i], scontext)) {
1510 + if (!strcmp(initial_sid_to_string[i], scontext2)) {
1511 *sid = i;
1512 - return 0;
1513 + goto out;
1514 }
1515 }
1516 *sid = SECINITSID_KERNEL;
1517 - return 0;
1518 + goto out;
1519 }
1520 *sid = SECSID_NULL;
1521
1522 - /* Copy the string so that we can modify the copy as we parse it. */
1523 - scontext2 = kmalloc(scontext_len + 1, gfp_flags);
1524 - if (!scontext2)
1525 - return -ENOMEM;
1526 - memcpy(scontext2, scontext, scontext_len);
1527 - scontext2[scontext_len] = 0;
1528 -
1529 if (force) {
1530 /* Save another copy for storing in uninterpreted form */
1531 rc = -ENOMEM;
1532 diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
1533 index 070a6880980e..c60a57797640 100644
1534 --- a/sound/soc/ux500/mop500.c
1535 +++ b/sound/soc/ux500/mop500.c
1536 @@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
1537 };
1538
1539 module_platform_driver(snd_soc_mop500_driver);
1540 +
1541 +MODULE_LICENSE("GPL v2");
1542 +MODULE_DESCRIPTION("ASoC MOP500 board driver");
1543 +MODULE_AUTHOR("Ola Lilja");
1544 diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
1545 index f12c01dddc8d..d35ba7700f46 100644
1546 --- a/sound/soc/ux500/ux500_pcm.c
1547 +++ b/sound/soc/ux500/ux500_pcm.c
1548 @@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
1549 return 0;
1550 }
1551 EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
1552 +
1553 +MODULE_AUTHOR("Ola Lilja");
1554 +MODULE_AUTHOR("Roger Nilsson");
1555 +MODULE_DESCRIPTION("ASoC UX500 driver");
1556 +MODULE_LICENSE("GPL v2");