Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0251-4.9.152-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3304 - (show annotations) (download)
Tue Mar 12 10:43:11 2019 UTC (5 years, 1 month ago) by niro
File size: 50373 byte(s)
-linux-4.9.152
1 diff --git a/Makefile b/Makefile
2 index f1aeb98f9ace..27a9292fc0ed 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 151
9 +SUBLEVEL = 152
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
14 index 68dedca5a47e..a11c8c2915c9 100644
15 --- a/arch/arm64/include/asm/kvm_arm.h
16 +++ b/arch/arm64/include/asm/kvm_arm.h
17 @@ -23,6 +23,8 @@
18 #include <asm/types.h>
19
20 /* Hyp Configuration Register (HCR) bits */
21 +#define HCR_API (UL(1) << 41)
22 +#define HCR_APK (UL(1) << 40)
23 #define HCR_E2H (UL(1) << 34)
24 #define HCR_ID (UL(1) << 33)
25 #define HCR_CD (UL(1) << 32)
26 @@ -82,6 +84,7 @@
27 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
28 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
29 #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
30 +#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
31 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
32
33 /* TCR_EL2 Registers bits */
34 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
35 index fa52817d84c5..3289d1458791 100644
36 --- a/arch/arm64/kernel/head.S
37 +++ b/arch/arm64/kernel/head.S
38 @@ -517,10 +517,9 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
39 #endif
40
41 /* Hyp configuration. */
42 - mov x0, #HCR_RW // 64-bit EL1
43 + mov_q x0, HCR_HOST_NVHE_FLAGS
44 cbz x2, set_hcr
45 - orr x0, x0, #HCR_TGE // Enable Host Extensions
46 - orr x0, x0, #HCR_E2H
47 + mov_q x0, HCR_HOST_VHE_FLAGS
48 set_hcr:
49 msr hcr_el2, x0
50 isb
51 diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
52 index d7e90d97f5c4..2a21318fed1d 100644
53 --- a/arch/arm64/kernel/kaslr.c
54 +++ b/arch/arm64/kernel/kaslr.c
55 @@ -14,6 +14,7 @@
56 #include <linux/sched.h>
57 #include <linux/types.h>
58
59 +#include <asm/cacheflush.h>
60 #include <asm/fixmap.h>
61 #include <asm/kernel-pgtable.h>
62 #include <asm/memory.h>
63 @@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
64 return ret;
65 }
66
67 -static __init const u8 *get_cmdline(void *fdt)
68 +static __init const u8 *kaslr_get_cmdline(void *fdt)
69 {
70 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
71
72 @@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
73 * Check if 'nokaslr' appears on the command line, and
74 * return 0 if that is the case.
75 */
76 - cmdline = get_cmdline(fdt);
77 + cmdline = kaslr_get_cmdline(fdt);
78 str = strstr(cmdline, "nokaslr");
79 if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
80 return 0;
81 @@ -178,5 +179,8 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
82 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
83 module_alloc_base &= PAGE_MASK;
84
85 + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
86 + __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
87 +
88 return offset;
89 }
90 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
91 index 12f9d1ecdf4c..115b0955715f 100644
92 --- a/arch/arm64/kvm/hyp/switch.c
93 +++ b/arch/arm64/kvm/hyp/switch.c
94 @@ -112,7 +112,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
95
96 static void __hyp_text __deactivate_traps_nvhe(void)
97 {
98 - write_sysreg(HCR_RW, hcr_el2);
99 + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
100 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
101 }
102
103 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
104 index 34fbbf8fdeaa..1d987061d1a1 100644
105 --- a/arch/mips/Kconfig
106 +++ b/arch/mips/Kconfig
107 @@ -3135,6 +3135,7 @@ config MIPS32_O32
108 config MIPS32_N32
109 bool "Kernel support for n32 binaries"
110 depends on 64BIT
111 + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
112 select COMPAT
113 select MIPS32_COMPAT
114 select SYSVIPC_COMPAT if SYSVIPC
115 diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
116 index 2a5bb849b10e..288b58b00dc8 100644
117 --- a/arch/mips/pci/msi-octeon.c
118 +++ b/arch/mips/pci/msi-octeon.c
119 @@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
120 int irq;
121 struct irq_chip *msi;
122
123 - if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
124 + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
125 + return 0;
126 + } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
127 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
128 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
129 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
130 diff --git a/crypto/authenc.c b/crypto/authenc.c
131 index c3180eb6d1ee..6bfec690ca5b 100644
132 --- a/crypto/authenc.c
133 +++ b/crypto/authenc.c
134 @@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
135 return -EINVAL;
136 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
137 return -EINVAL;
138 - if (RTA_PAYLOAD(rta) < sizeof(*param))
139 +
140 + /*
141 + * RTA_OK() didn't align the rtattr's payload when validating that it
142 + * fits in the buffer. Yet, the keys should start on the next 4-byte
143 + * aligned boundary. To avoid confusion, require that the rtattr
144 + * payload be exactly the param struct, which has a 4-byte aligned size.
145 + */
146 + if (RTA_PAYLOAD(rta) != sizeof(*param))
147 return -EINVAL;
148 + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
149
150 param = RTA_DATA(rta);
151 keys->enckeylen = be32_to_cpu(param->enckeylen);
152
153 - key += RTA_ALIGN(rta->rta_len);
154 - keylen -= RTA_ALIGN(rta->rta_len);
155 + key += rta->rta_len;
156 + keylen -= rta->rta_len;
157
158 if (keylen < keys->enckeylen)
159 return -EINVAL;
160 diff --git a/crypto/authencesn.c b/crypto/authencesn.c
161 index 49e7e85a23d5..73b12f128ae5 100644
162 --- a/crypto/authencesn.c
163 +++ b/crypto/authencesn.c
164 @@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
165 struct aead_request *req = areq->data;
166
167 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
168 - aead_request_complete(req, err);
169 + authenc_esn_request_complete(req, err);
170 }
171
172 static int crypto_authenc_esn_decrypt(struct aead_request *req)
173 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
174 index 9f840d9fdfcb..344f34746c10 100644
175 --- a/drivers/block/loop.c
176 +++ b/drivers/block/loop.c
177 @@ -81,7 +81,7 @@
178 #include <asm/uaccess.h>
179
180 static DEFINE_IDR(loop_index_idr);
181 -static DEFINE_MUTEX(loop_index_mutex);
182 +static DEFINE_MUTEX(loop_ctl_mutex);
183
184 static int max_part;
185 static int part_shift;
186 @@ -1033,7 +1033,7 @@ static int loop_clr_fd(struct loop_device *lo)
187 */
188 if (atomic_read(&lo->lo_refcnt) > 1) {
189 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
190 - mutex_unlock(&lo->lo_ctl_mutex);
191 + mutex_unlock(&loop_ctl_mutex);
192 return 0;
193 }
194
195 @@ -1082,12 +1082,12 @@ static int loop_clr_fd(struct loop_device *lo)
196 if (!part_shift)
197 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
198 loop_unprepare_queue(lo);
199 - mutex_unlock(&lo->lo_ctl_mutex);
200 + mutex_unlock(&loop_ctl_mutex);
201 /*
202 - * Need not hold lo_ctl_mutex to fput backing file.
203 - * Calling fput holding lo_ctl_mutex triggers a circular
204 + * Need not hold loop_ctl_mutex to fput backing file.
205 + * Calling fput holding loop_ctl_mutex triggers a circular
206 * lock dependency possibility warning as fput can take
207 - * bd_mutex which is usually taken before lo_ctl_mutex.
208 + * bd_mutex which is usually taken before loop_ctl_mutex.
209 */
210 fput(filp);
211 return 0;
212 @@ -1350,7 +1350,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
213 struct loop_device *lo = bdev->bd_disk->private_data;
214 int err;
215
216 - mutex_lock_nested(&lo->lo_ctl_mutex, 1);
217 + mutex_lock_nested(&loop_ctl_mutex, 1);
218 switch (cmd) {
219 case LOOP_SET_FD:
220 err = loop_set_fd(lo, mode, bdev, arg);
221 @@ -1359,7 +1359,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
222 err = loop_change_fd(lo, bdev, arg);
223 break;
224 case LOOP_CLR_FD:
225 - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
226 + /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
227 err = loop_clr_fd(lo);
228 if (!err)
229 goto out_unlocked;
230 @@ -1395,7 +1395,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
231 default:
232 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
233 }
234 - mutex_unlock(&lo->lo_ctl_mutex);
235 + mutex_unlock(&loop_ctl_mutex);
236
237 out_unlocked:
238 return err;
239 @@ -1528,16 +1528,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
240
241 switch(cmd) {
242 case LOOP_SET_STATUS:
243 - mutex_lock(&lo->lo_ctl_mutex);
244 + mutex_lock(&loop_ctl_mutex);
245 err = loop_set_status_compat(
246 lo, (const struct compat_loop_info __user *) arg);
247 - mutex_unlock(&lo->lo_ctl_mutex);
248 + mutex_unlock(&loop_ctl_mutex);
249 break;
250 case LOOP_GET_STATUS:
251 - mutex_lock(&lo->lo_ctl_mutex);
252 + mutex_lock(&loop_ctl_mutex);
253 err = loop_get_status_compat(
254 lo, (struct compat_loop_info __user *) arg);
255 - mutex_unlock(&lo->lo_ctl_mutex);
256 + mutex_unlock(&loop_ctl_mutex);
257 break;
258 case LOOP_SET_CAPACITY:
259 case LOOP_CLR_FD:
260 @@ -1559,9 +1559,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
261 static int lo_open(struct block_device *bdev, fmode_t mode)
262 {
263 struct loop_device *lo;
264 - int err = 0;
265 + int err;
266
267 - mutex_lock(&loop_index_mutex);
268 + err = mutex_lock_killable(&loop_ctl_mutex);
269 + if (err)
270 + return err;
271 lo = bdev->bd_disk->private_data;
272 if (!lo) {
273 err = -ENXIO;
274 @@ -1570,18 +1572,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
275
276 atomic_inc(&lo->lo_refcnt);
277 out:
278 - mutex_unlock(&loop_index_mutex);
279 + mutex_unlock(&loop_ctl_mutex);
280 return err;
281 }
282
283 -static void __lo_release(struct loop_device *lo)
284 +static void lo_release(struct gendisk *disk, fmode_t mode)
285 {
286 + struct loop_device *lo;
287 int err;
288
289 + mutex_lock(&loop_ctl_mutex);
290 + lo = disk->private_data;
291 if (atomic_dec_return(&lo->lo_refcnt))
292 - return;
293 + goto out_unlock;
294
295 - mutex_lock(&lo->lo_ctl_mutex);
296 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
297 /*
298 * In autoclear mode, stop the loop thread
299 @@ -1598,14 +1602,8 @@ static void __lo_release(struct loop_device *lo)
300 loop_flush(lo);
301 }
302
303 - mutex_unlock(&lo->lo_ctl_mutex);
304 -}
305 -
306 -static void lo_release(struct gendisk *disk, fmode_t mode)
307 -{
308 - mutex_lock(&loop_index_mutex);
309 - __lo_release(disk->private_data);
310 - mutex_unlock(&loop_index_mutex);
311 +out_unlock:
312 + mutex_unlock(&loop_ctl_mutex);
313 }
314
315 static const struct block_device_operations lo_fops = {
316 @@ -1644,10 +1642,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
317 struct loop_device *lo = ptr;
318 struct loop_func_table *xfer = data;
319
320 - mutex_lock(&lo->lo_ctl_mutex);
321 + mutex_lock(&loop_ctl_mutex);
322 if (lo->lo_encryption == xfer)
323 loop_release_xfer(lo);
324 - mutex_unlock(&lo->lo_ctl_mutex);
325 + mutex_unlock(&loop_ctl_mutex);
326 return 0;
327 }
328
329 @@ -1813,7 +1811,6 @@ static int loop_add(struct loop_device **l, int i)
330 if (!part_shift)
331 disk->flags |= GENHD_FL_NO_PART_SCAN;
332 disk->flags |= GENHD_FL_EXT_DEVT;
333 - mutex_init(&lo->lo_ctl_mutex);
334 atomic_set(&lo->lo_refcnt, 0);
335 lo->lo_number = i;
336 spin_lock_init(&lo->lo_lock);
337 @@ -1892,7 +1889,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
338 struct kobject *kobj;
339 int err;
340
341 - mutex_lock(&loop_index_mutex);
342 + mutex_lock(&loop_ctl_mutex);
343 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
344 if (err < 0)
345 err = loop_add(&lo, MINOR(dev) >> part_shift);
346 @@ -1900,7 +1897,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
347 kobj = NULL;
348 else
349 kobj = get_disk(lo->lo_disk);
350 - mutex_unlock(&loop_index_mutex);
351 + mutex_unlock(&loop_ctl_mutex);
352
353 *part = 0;
354 return kobj;
355 @@ -1910,9 +1907,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
356 unsigned long parm)
357 {
358 struct loop_device *lo;
359 - int ret = -ENOSYS;
360 + int ret;
361 +
362 + ret = mutex_lock_killable(&loop_ctl_mutex);
363 + if (ret)
364 + return ret;
365
366 - mutex_lock(&loop_index_mutex);
367 + ret = -ENOSYS;
368 switch (cmd) {
369 case LOOP_CTL_ADD:
370 ret = loop_lookup(&lo, parm);
371 @@ -1926,19 +1927,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
372 ret = loop_lookup(&lo, parm);
373 if (ret < 0)
374 break;
375 - mutex_lock(&lo->lo_ctl_mutex);
376 if (lo->lo_state != Lo_unbound) {
377 ret = -EBUSY;
378 - mutex_unlock(&lo->lo_ctl_mutex);
379 break;
380 }
381 if (atomic_read(&lo->lo_refcnt) > 0) {
382 ret = -EBUSY;
383 - mutex_unlock(&lo->lo_ctl_mutex);
384 break;
385 }
386 lo->lo_disk->private_data = NULL;
387 - mutex_unlock(&lo->lo_ctl_mutex);
388 idr_remove(&loop_index_idr, lo->lo_number);
389 loop_remove(lo);
390 break;
391 @@ -1948,7 +1945,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
392 break;
393 ret = loop_add(&lo, -1);
394 }
395 - mutex_unlock(&loop_index_mutex);
396 + mutex_unlock(&loop_ctl_mutex);
397
398 return ret;
399 }
400 @@ -2031,10 +2028,10 @@ static int __init loop_init(void)
401 THIS_MODULE, loop_probe, NULL, NULL);
402
403 /* pre-create number of devices given by config or max_loop */
404 - mutex_lock(&loop_index_mutex);
405 + mutex_lock(&loop_ctl_mutex);
406 for (i = 0; i < nr; i++)
407 loop_add(&lo, i);
408 - mutex_unlock(&loop_index_mutex);
409 + mutex_unlock(&loop_ctl_mutex);
410
411 printk(KERN_INFO "loop: module loaded\n");
412 return 0;
413 diff --git a/drivers/block/loop.h b/drivers/block/loop.h
414 index 60f0fd2c0c65..a923e74495ce 100644
415 --- a/drivers/block/loop.h
416 +++ b/drivers/block/loop.h
417 @@ -55,7 +55,6 @@ struct loop_device {
418
419 spinlock_t lo_lock;
420 int lo_state;
421 - struct mutex lo_ctl_mutex;
422 struct kthread_worker worker;
423 struct task_struct *worker_task;
424 bool use_dio;
425 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
426 index 42a53956aefe..394f8ec83cf0 100644
427 --- a/drivers/block/nbd.c
428 +++ b/drivers/block/nbd.c
429 @@ -108,7 +108,7 @@ static const char *nbdcmd_to_ascii(int cmd)
430
431 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
432 {
433 - bdev->bd_inode->i_size = 0;
434 + bd_set_size(bdev, 0);
435 set_capacity(nbd->disk, 0);
436 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
437
438 @@ -117,29 +117,21 @@ static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
439
440 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
441 {
442 - if (!nbd_is_connected(nbd))
443 - return;
444 -
445 - bdev->bd_inode->i_size = nbd->bytesize;
446 + blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
447 + blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
448 + bd_set_size(bdev, nbd->bytesize);
449 + set_blocksize(bdev, nbd->blksize);
450 set_capacity(nbd->disk, nbd->bytesize >> 9);
451 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
452 }
453
454 -static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
455 +static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
456 loff_t blocksize, loff_t nr_blocks)
457 {
458 - int ret;
459 -
460 - ret = set_blocksize(bdev, blocksize);
461 - if (ret)
462 - return ret;
463 -
464 nbd->blksize = blocksize;
465 nbd->bytesize = blocksize * nr_blocks;
466 -
467 - nbd_size_update(nbd, bdev);
468 -
469 - return 0;
470 + if (nbd_is_connected(nbd))
471 + nbd_size_update(nbd, bdev);
472 }
473
474 static void nbd_end_request(struct nbd_cmd *cmd)
475 @@ -655,16 +647,17 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
476 case NBD_SET_BLKSIZE: {
477 loff_t bsize = div_s64(nbd->bytesize, arg);
478
479 - return nbd_size_set(nbd, bdev, arg, bsize);
480 + nbd_size_set(nbd, bdev, arg, bsize);
481 + return 0;
482 }
483
484 case NBD_SET_SIZE:
485 - return nbd_size_set(nbd, bdev, nbd->blksize,
486 - div_s64(arg, nbd->blksize));
487 -
488 + nbd_size_set(nbd, bdev, nbd->blksize,
489 + div_s64(arg, nbd->blksize));
490 + return 0;
491 case NBD_SET_SIZE_BLOCKS:
492 - return nbd_size_set(nbd, bdev, nbd->blksize, arg);
493 -
494 + nbd_size_set(nbd, bdev, nbd->blksize, arg);
495 + return 0;
496 case NBD_SET_TIMEOUT:
497 if (arg) {
498 nbd->tag_set.timeout = arg * HZ;
499 diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
500 index 631337c2e4a7..9eac4dcb9971 100644
501 --- a/drivers/crypto/caam/caamhash.c
502 +++ b/drivers/crypto/caam/caamhash.c
503 @@ -1232,13 +1232,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
504
505 desc = edesc->hw_desc;
506
507 - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
508 - if (dma_mapping_error(jrdev, state->buf_dma)) {
509 - dev_err(jrdev, "unable to map src\n");
510 - goto unmap;
511 - }
512 + if (buflen) {
513 + state->buf_dma = dma_map_single(jrdev, buf, buflen,
514 + DMA_TO_DEVICE);
515 + if (dma_mapping_error(jrdev, state->buf_dma)) {
516 + dev_err(jrdev, "unable to map src\n");
517 + goto unmap;
518 + }
519
520 - append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
521 + append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
522 + }
523
524 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
525 digestsize);
526 diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
527 index 7c71722be395..463033b4db1d 100644
528 --- a/drivers/crypto/talitos.c
529 +++ b/drivers/crypto/talitos.c
530 @@ -1347,23 +1347,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
531 struct talitos_private *priv = dev_get_drvdata(dev);
532 bool is_sec1 = has_ftr_sec1(priv);
533 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
534 - void *err;
535
536 if (cryptlen + authsize > max_len) {
537 dev_err(dev, "length exceeds h/w max limit\n");
538 return ERR_PTR(-EINVAL);
539 }
540
541 - if (ivsize)
542 - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
543 -
544 if (!dst || dst == src) {
545 src_len = assoclen + cryptlen + authsize;
546 src_nents = sg_nents_for_len(src, src_len);
547 if (src_nents < 0) {
548 dev_err(dev, "Invalid number of src SG.\n");
549 - err = ERR_PTR(-EINVAL);
550 - goto error_sg;
551 + return ERR_PTR(-EINVAL);
552 }
553 src_nents = (src_nents == 1) ? 0 : src_nents;
554 dst_nents = dst ? src_nents : 0;
555 @@ -1373,16 +1368,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
556 src_nents = sg_nents_for_len(src, src_len);
557 if (src_nents < 0) {
558 dev_err(dev, "Invalid number of src SG.\n");
559 - err = ERR_PTR(-EINVAL);
560 - goto error_sg;
561 + return ERR_PTR(-EINVAL);
562 }
563 src_nents = (src_nents == 1) ? 0 : src_nents;
564 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
565 dst_nents = sg_nents_for_len(dst, dst_len);
566 if (dst_nents < 0) {
567 dev_err(dev, "Invalid number of dst SG.\n");
568 - err = ERR_PTR(-EINVAL);
569 - goto error_sg;
570 + return ERR_PTR(-EINVAL);
571 }
572 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
573 }
574 @@ -1405,12 +1398,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
575 dma_len = 0;
576 alloc_len += icv_stashing ? authsize : 0;
577 }
578 + alloc_len += ivsize;
579
580 edesc = kmalloc(alloc_len, GFP_DMA | flags);
581 - if (!edesc) {
582 - dev_err(dev, "could not allocate edescriptor\n");
583 - err = ERR_PTR(-ENOMEM);
584 - goto error_sg;
585 + if (!edesc)
586 + return ERR_PTR(-ENOMEM);
587 + if (ivsize) {
588 + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
589 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
590 }
591
592 edesc->src_nents = src_nents;
593 @@ -1423,10 +1418,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
594 DMA_BIDIRECTIONAL);
595
596 return edesc;
597 -error_sg:
598 - if (iv_dma)
599 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
600 - return err;
601 }
602
603 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
604 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
605 index 6a48d6637e5c..2e85e609f125 100644
606 --- a/drivers/gpu/drm/drm_fb_helper.c
607 +++ b/drivers/gpu/drm/drm_fb_helper.c
608 @@ -1238,9 +1238,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
609 struct drm_framebuffer *fb = fb_helper->fb;
610 int depth;
611
612 - if (var->pixclock != 0 || in_dbg_master())
613 + if (in_dbg_master())
614 return -EINVAL;
615
616 + if (var->pixclock != 0) {
617 + DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
618 + var->pixclock = 0;
619 + }
620 +
621 /* Need to resize the fb object !!! */
622 if (var->bits_per_pixel > fb->bits_per_pixel ||
623 var->xres > fb->width || var->yres > fb->height ||
624 diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
625 index 6ca71aabb576..d300e5e7eadc 100644
626 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
627 +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
628 @@ -877,8 +877,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
629 "%s-vid-cap", dev->v4l2_dev.name);
630
631 if (IS_ERR(dev->kthread_vid_cap)) {
632 + int err = PTR_ERR(dev->kthread_vid_cap);
633 +
634 + dev->kthread_vid_cap = NULL;
635 v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
636 - return PTR_ERR(dev->kthread_vid_cap);
637 + return err;
638 }
639 *pstreaming = true;
640 vivid_grab_controls(dev, true);
641 diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
642 index 98eed5889bc1..7c8d75852816 100644
643 --- a/drivers/media/platform/vivid/vivid-kthread-out.c
644 +++ b/drivers/media/platform/vivid/vivid-kthread-out.c
645 @@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
646 "%s-vid-out", dev->v4l2_dev.name);
647
648 if (IS_ERR(dev->kthread_vid_out)) {
649 + int err = PTR_ERR(dev->kthread_vid_out);
650 +
651 + dev->kthread_vid_out = NULL;
652 v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
653 - return PTR_ERR(dev->kthread_vid_out);
654 + return err;
655 }
656 *pstreaming = true;
657 vivid_grab_controls(dev, true);
658 diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
659 index fcda3ae4e6b0..f9a810e3f521 100644
660 --- a/drivers/media/platform/vivid/vivid-vid-common.c
661 +++ b/drivers/media/platform/vivid/vivid-vid-common.c
662 @@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
663 .type = V4L2_DV_BT_656_1120,
664 /* keep this initialization for compatibility with GCC < 4.4.6 */
665 .reserved = { 0 },
666 - V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
667 + V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
668 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
669 V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
670 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
671 diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
672 index 1ed7ba3dfdbe..ae959e01042a 100644
673 --- a/drivers/media/usb/em28xx/em28xx-video.c
674 +++ b/drivers/media/usb/em28xx/em28xx-video.c
675 @@ -1062,6 +1062,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
676
677 em28xx_videodbg("%s\n", __func__);
678
679 + dev->v4l2->field_count = 0;
680 +
681 /* Make sure streaming is not already in progress for this type
682 of filehandle (e.g. video, vbi) */
683 rc = res_get(dev, vq->type);
684 @@ -1290,8 +1292,6 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
685 {
686 struct em28xx *dev = priv;
687
688 - dev->v4l2->field_count = 0;
689 -
690 /*
691 * In the case of non-AC97 volume controls, we still need
692 * to do some setups at em28xx, in order to mute/unmute
693 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
694 index 4df4a1f402be..b1a4d4e2341b 100644
695 --- a/drivers/media/v4l2-core/videobuf2-core.c
696 +++ b/drivers/media/v4l2-core/videobuf2-core.c
697 @@ -1916,9 +1916,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
698 return -EINVAL;
699 }
700 }
701 +
702 + mutex_lock(&q->mmap_lock);
703 +
704 if (vb2_fileio_is_active(q)) {
705 dprintk(1, "mmap: file io in progress\n");
706 - return -EBUSY;
707 + ret = -EBUSY;
708 + goto unlock;
709 }
710
711 /*
712 @@ -1926,7 +1930,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
713 */
714 ret = __find_plane_by_offset(q, off, &buffer, &plane);
715 if (ret)
716 - return ret;
717 + goto unlock;
718
719 vb = q->bufs[buffer];
720
721 @@ -1939,11 +1943,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
722 if (length < (vma->vm_end - vma->vm_start)) {
723 dprintk(1,
724 "MMAP invalid, as it would overflow buffer length\n");
725 - return -EINVAL;
726 + ret = -EINVAL;
727 + goto unlock;
728 }
729
730 - mutex_lock(&q->mmap_lock);
731 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
732 +
733 +unlock:
734 mutex_unlock(&q->mmap_lock);
735 if (ret)
736 return ret;
737 diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
738 index 5628a6b5b19b..c5c320efc7b4 100644
739 --- a/drivers/mfd/tps6586x.c
740 +++ b/drivers/mfd/tps6586x.c
741 @@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
742 return 0;
743 }
744
745 +static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
746 +{
747 + struct tps6586x *tps6586x = dev_get_drvdata(dev);
748 +
749 + if (tps6586x->client->irq)
750 + disable_irq(tps6586x->client->irq);
751 +
752 + return 0;
753 +}
754 +
755 +static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
756 +{
757 + struct tps6586x *tps6586x = dev_get_drvdata(dev);
758 +
759 + if (tps6586x->client->irq)
760 + enable_irq(tps6586x->client->irq);
761 +
762 + return 0;
763 +}
764 +
765 +static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
766 + tps6586x_i2c_resume);
767 +
768 static const struct i2c_device_id tps6586x_id_table[] = {
769 { "tps6586x", 0 },
770 { },
771 @@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
772 .driver = {
773 .name = "tps6586x",
774 .of_match_table = of_match_ptr(tps6586x_of_match),
775 + .pm = &tps6586x_pm_ops,
776 },
777 .probe = tps6586x_i2c_probe,
778 .remove = tps6586x_i2c_remove,
779 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
780 index 389d1db69a32..24a3433f3944 100644
781 --- a/drivers/net/bonding/bond_main.c
782 +++ b/drivers/net/bonding/bond_main.c
783 @@ -1900,6 +1900,9 @@ static int __bond_release_one(struct net_device *bond_dev,
784 if (!bond_has_slaves(bond)) {
785 bond_set_carrier(bond);
786 eth_hw_addr_random(bond_dev);
787 + bond->nest_level = SINGLE_DEPTH_NESTING;
788 + } else {
789 + bond->nest_level = dev_get_nest_level(bond_dev) + 1;
790 }
791
792 unblock_netpoll_tx();
793 diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
794 index b44c1bb687a2..ebc193f7f7dd 100644
795 --- a/drivers/scsi/scsi_pm.c
796 +++ b/drivers/scsi/scsi_pm.c
797 @@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
798
799 if (err == 0) {
800 pm_runtime_disable(dev);
801 - pm_runtime_set_active(dev);
802 + err = pm_runtime_set_active(dev);
803 pm_runtime_enable(dev);
804 +
805 + /*
806 + * Forcibly set runtime PM status of request queue to "active"
807 + * to make sure we can again get requests from the queue
808 + * (see also blk_pm_peek_request()).
809 + *
810 + * The resume hook will correct runtime PM status of the disk.
811 + */
812 + if (!err && scsi_is_sdev_device(dev)) {
813 + struct scsi_device *sdev = to_scsi_device(dev);
814 +
815 + if (sdev->request_queue->dev)
816 + blk_set_runtime_active(sdev->request_queue);
817 + }
818 }
819
820 return err;
821 @@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
822 else
823 fn = NULL;
824
825 - /*
826 - * Forcibly set runtime PM status of request queue to "active" to
827 - * make sure we can again get requests from the queue (see also
828 - * blk_pm_peek_request()).
829 - *
830 - * The resume hook will correct runtime PM status of the disk.
831 - */
832 - if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
833 - blk_set_runtime_active(to_scsi_device(dev)->request_queue);
834 -
835 if (fn) {
836 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
837
838 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
839 index ab999c4444b8..867ae76f93f2 100644
840 --- a/drivers/scsi/sd.c
841 +++ b/drivers/scsi/sd.c
842 @@ -208,6 +208,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
843 sp = buffer_data[0] & 0x80 ? 1 : 0;
844 buffer_data[0] &= ~0x80;
845
846 + /*
847 + * Ensure WP, DPOFUA, and RESERVED fields are cleared in
848 + * received mode parameter buffer before doing MODE SELECT.
849 + */
850 + data.device_specific = 0;
851 +
852 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
853 SD_MAX_RETRIES, &data, &sshdr)) {
854 if (scsi_sense_valid(&sshdr))
855 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
856 index e6c4321d695c..f61f8650665f 100644
857 --- a/drivers/tty/tty_io.c
858 +++ b/drivers/tty/tty_io.c
859 @@ -1475,7 +1475,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
860 static int tty_reopen(struct tty_struct *tty)
861 {
862 struct tty_driver *driver = tty->driver;
863 - int retval;
864 + struct tty_ldisc *ld;
865 + int retval = 0;
866
867 if (driver->type == TTY_DRIVER_TYPE_PTY &&
868 driver->subtype == PTY_TYPE_MASTER)
869 @@ -1487,14 +1488,21 @@ static int tty_reopen(struct tty_struct *tty)
870 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
871 return -EBUSY;
872
873 - tty->count++;
874 + ld = tty_ldisc_ref_wait(tty);
875 + if (ld) {
876 + tty_ldisc_deref(ld);
877 + } else {
878 + retval = tty_ldisc_lock(tty, 5 * HZ);
879 + if (retval)
880 + return retval;
881
882 - if (tty->ldisc)
883 - return 0;
884 + if (!tty->ldisc)
885 + retval = tty_ldisc_reinit(tty, tty->termios.c_line);
886 + tty_ldisc_unlock(tty);
887 + }
888
889 - retval = tty_ldisc_reinit(tty, tty->termios.c_line);
890 - if (retval)
891 - tty->count--;
892 + if (retval == 0)
893 + tty->count++;
894
895 return retval;
896 }
897 diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
898 index 1bf8ed13f827..dbd7ba32caac 100644
899 --- a/drivers/tty/tty_ldsem.c
900 +++ b/drivers/tty/tty_ldsem.c
901 @@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
902 if (!locked)
903 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
904 list_del(&waiter.list);
905 +
906 + /*
907 + * In case of timeout, wake up every reader who gave the right of way
908 + * to writer. Prevent separation readers into two groups:
909 + * one that helds semaphore and another that sleeps.
910 + * (in case of no contention with a writer)
911 + */
912 + if (!locked && list_empty(&sem->write_wait))
913 + __ldsem_wake_readers(sem);
914 +
915 raw_spin_unlock_irq(&sem->wait_lock);
916
917 __set_task_state(tsk, TASK_RUNNING);
918 diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
919 index a3edb20ea4c3..a846d32ee653 100644
920 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
921 +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
922 @@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
923
924 int r = 0;
925
926 + memset(&p, 0, sizeof(p));
927 +
928 switch (cmd) {
929 case OMAPFB_SYNC_GFX:
930 DBG("ioctl SYNC_GFX\n");
931 diff --git a/fs/block_dev.c b/fs/block_dev.c
932 index cb936c90ae82..8a894cd4875b 100644
933 --- a/fs/block_dev.c
934 +++ b/fs/block_dev.c
935 @@ -114,6 +114,20 @@ void invalidate_bdev(struct block_device *bdev)
936 }
937 EXPORT_SYMBOL(invalidate_bdev);
938
939 +static void set_init_blocksize(struct block_device *bdev)
940 +{
941 + unsigned bsize = bdev_logical_block_size(bdev);
942 + loff_t size = i_size_read(bdev->bd_inode);
943 +
944 + while (bsize < PAGE_SIZE) {
945 + if (size & bsize)
946 + break;
947 + bsize <<= 1;
948 + }
949 + bdev->bd_block_size = bsize;
950 + bdev->bd_inode->i_blkbits = blksize_bits(bsize);
951 +}
952 +
953 int set_blocksize(struct block_device *bdev, int size)
954 {
955 /* Size must be a power of two, and between 512 and PAGE_SIZE */
956 @@ -1209,18 +1223,9 @@ EXPORT_SYMBOL(check_disk_change);
957
958 void bd_set_size(struct block_device *bdev, loff_t size)
959 {
960 - unsigned bsize = bdev_logical_block_size(bdev);
961 -
962 inode_lock(bdev->bd_inode);
963 i_size_write(bdev->bd_inode, size);
964 inode_unlock(bdev->bd_inode);
965 - while (bsize < PAGE_SIZE) {
966 - if (size & bsize)
967 - break;
968 - bsize <<= 1;
969 - }
970 - bdev->bd_block_size = bsize;
971 - bdev->bd_inode->i_blkbits = blksize_bits(bsize);
972 }
973 EXPORT_SYMBOL(bd_set_size);
974
975 @@ -1297,8 +1302,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
976 }
977 }
978
979 - if (!ret)
980 + if (!ret) {
981 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
982 + set_init_blocksize(bdev);
983 + }
984
985 /*
986 * If the device is invalidated, rescan partition
987 @@ -1333,6 +1340,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
988 goto out_clear;
989 }
990 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
991 + set_init_blocksize(bdev);
992 }
993 } else {
994 if (bdev->bd_contains == bdev) {
995 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
996 index 77b32415d9f2..9d3352fe8dc9 100644
997 --- a/fs/btrfs/disk-io.c
998 +++ b/fs/btrfs/disk-io.c
999 @@ -4193,6 +4193,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
1000 spin_lock(&fs_info->ordered_root_lock);
1001 }
1002 spin_unlock(&fs_info->ordered_root_lock);
1003 +
1004 + /*
1005 + * We need this here because if we've been flipped read-only we won't
1006 + * get sync() from the umount, so we need to make sure any ordered
1007 + * extents that haven't had their dirty pages IO start writeout yet
1008 + * actually get run and error out properly.
1009 + */
1010 + btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
1011 }
1012
1013 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
1014 diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
1015 index ab4cbb4be423..e59eeaf02eaa 100644
1016 --- a/fs/f2fs/recovery.c
1017 +++ b/fs/f2fs/recovery.c
1018 @@ -196,32 +196,6 @@ static void recover_inode(struct inode *inode, struct page *page)
1019 ino_of_node(page), name);
1020 }
1021
1022 -static bool is_same_inode(struct inode *inode, struct page *ipage)
1023 -{
1024 - struct f2fs_inode *ri = F2FS_INODE(ipage);
1025 - struct timespec disk;
1026 -
1027 - if (!IS_INODE(ipage))
1028 - return true;
1029 -
1030 - disk.tv_sec = le64_to_cpu(ri->i_ctime);
1031 - disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
1032 - if (timespec_compare(&inode->i_ctime, &disk) > 0)
1033 - return false;
1034 -
1035 - disk.tv_sec = le64_to_cpu(ri->i_atime);
1036 - disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
1037 - if (timespec_compare(&inode->i_atime, &disk) > 0)
1038 - return false;
1039 -
1040 - disk.tv_sec = le64_to_cpu(ri->i_mtime);
1041 - disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
1042 - if (timespec_compare(&inode->i_mtime, &disk) > 0)
1043 - return false;
1044 -
1045 - return true;
1046 -}
1047 -
1048 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
1049 {
1050 struct curseg_info *curseg;
1051 @@ -248,10 +222,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
1052 goto next;
1053
1054 entry = get_fsync_inode(head, ino_of_node(page));
1055 - if (entry) {
1056 - if (!is_same_inode(entry->inode, page))
1057 - goto next;
1058 - } else {
1059 + if (!entry) {
1060 if (IS_INODE(page) && is_dent_dnode(page)) {
1061 err = recover_inode_page(sbi, page);
1062 if (err)
1063 diff --git a/fs/proc/array.c b/fs/proc/array.c
1064 index 94f83e74db24..712b44c63701 100644
1065 --- a/fs/proc/array.c
1066 +++ b/fs/proc/array.c
1067 @@ -346,8 +346,9 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
1068 {
1069 #ifdef CONFIG_SECCOMP
1070 seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
1071 + seq_putc(m, '\n');
1072 #endif
1073 - seq_printf(m, "\nSpeculation_Store_Bypass:\t");
1074 + seq_printf(m, "Speculation_Store_Bypass:\t");
1075 switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
1076 case -EINVAL:
1077 seq_printf(m, "unknown");
1078 diff --git a/mm/memory.c b/mm/memory.c
1079 index f3fef1df7402..35d8217bb046 100644
1080 --- a/mm/memory.c
1081 +++ b/mm/memory.c
1082 @@ -2823,6 +2823,28 @@ static int __do_fault(struct fault_env *fe, pgoff_t pgoff,
1083 struct vm_fault vmf;
1084 int ret;
1085
1086 + /*
1087 + * Preallocate pte before we take page_lock because this might lead to
1088 + * deadlocks for memcg reclaim which waits for pages under writeback:
1089 + * lock_page(A)
1090 + * SetPageWriteback(A)
1091 + * unlock_page(A)
1092 + * lock_page(B)
1093 + * lock_page(B)
1094 + * pte_alloc_pne
1095 + * shrink_page_list
1096 + * wait_on_page_writeback(A)
1097 + * SetPageWriteback(B)
1098 + * unlock_page(B)
1099 + * # flush A, B to clear the writeback
1100 + */
1101 + if (pmd_none(*fe->pmd) && !fe->prealloc_pte) {
1102 + fe->prealloc_pte = pte_alloc_one(vma->vm_mm, fe->address);
1103 + if (!fe->prealloc_pte)
1104 + return VM_FAULT_OOM;
1105 + smp_wmb(); /* See comment in __pte_alloc() */
1106 + }
1107 +
1108 vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK);
1109 vmf.pgoff = pgoff;
1110 vmf.flags = fe->flags;
1111 diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
1112 index 82ce5713f744..7e42c0d1f55b 100644
1113 --- a/net/bridge/br_netfilter_hooks.c
1114 +++ b/net/bridge/br_netfilter_hooks.c
1115 @@ -275,7 +275,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
1116 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1117 int ret;
1118
1119 - if (neigh->hh.hh_len) {
1120 + if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
1121 neigh_hh_bridge(&neigh->hh, skb);
1122 skb->dev = nf_bridge->physindev;
1123 ret = br_handle_frame_finish(net, sk, skb);
1124 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1125 index 18c1f07e4f3b..c7e5aaf2eeb8 100644
1126 --- a/net/bridge/netfilter/ebtables.c
1127 +++ b/net/bridge/netfilter/ebtables.c
1128 @@ -1147,14 +1147,16 @@ static int do_replace(struct net *net, const void __user *user,
1129 tmp.name[sizeof(tmp.name) - 1] = 0;
1130
1131 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1132 - newinfo = vmalloc(sizeof(*newinfo) + countersize);
1133 + newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
1134 + PAGE_KERNEL);
1135 if (!newinfo)
1136 return -ENOMEM;
1137
1138 if (countersize)
1139 memset(newinfo->counters, 0, countersize);
1140
1141 - newinfo->entries = vmalloc(tmp.entries_size);
1142 + newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
1143 + PAGE_KERNEL);
1144 if (!newinfo->entries) {
1145 ret = -ENOMEM;
1146 goto free_newinfo;
1147 diff --git a/net/can/gw.c b/net/can/gw.c
1148 index 77c8af4047ef..81650affa3fa 100644
1149 --- a/net/can/gw.c
1150 +++ b/net/can/gw.c
1151 @@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
1152 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
1153 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
1154
1155 - /* check for checksum updates when the CAN frame has been modified */
1156 + /* Has the CAN frame been modified? */
1157 if (modidx) {
1158 - if (gwj->mod.csumfunc.crc8)
1159 + /* get available space for the processed CAN frame type */
1160 + int max_len = nskb->len - offsetof(struct can_frame, data);
1161 +
1162 + /* dlc may have changed, make sure it fits to the CAN frame */
1163 + if (cf->can_dlc > max_len)
1164 + goto out_delete;
1165 +
1166 + /* check for checksum updates in classic CAN length only */
1167 + if (gwj->mod.csumfunc.crc8) {
1168 + if (cf->can_dlc > 8)
1169 + goto out_delete;
1170 +
1171 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
1172 + }
1173 +
1174 + if (gwj->mod.csumfunc.xor) {
1175 + if (cf->can_dlc > 8)
1176 + goto out_delete;
1177
1178 - if (gwj->mod.csumfunc.xor)
1179 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
1180 + }
1181 }
1182
1183 /* clear the skb timestamp if not configured the other way */
1184 @@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
1185 gwj->dropped_frames++;
1186 else
1187 gwj->handled_frames++;
1188 +
1189 + return;
1190 +
1191 + out_delete:
1192 + /* delete frame due to misconfiguration */
1193 + gwj->deleted_frames++;
1194 + kfree_skb(nskb);
1195 + return;
1196 }
1197
1198 static inline int cgw_register_filter(struct cgw_job *gwj)
1199 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1200 index a5851c0bc278..e39895ea1b77 100644
1201 --- a/net/ipv4/ip_sockglue.c
1202 +++ b/net/ipv4/ip_sockglue.c
1203 @@ -133,19 +133,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
1204
1205 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
1206 {
1207 + __be16 _ports[2], *ports;
1208 struct sockaddr_in sin;
1209 - __be16 *ports;
1210 - int end;
1211 -
1212 - end = skb_transport_offset(skb) + 4;
1213 - if (end > 0 && !pskb_may_pull(skb, end))
1214 - return;
1215
1216 /* All current transport protocols have the port numbers in the
1217 * first four bytes of the transport header and this function is
1218 * written with this assumption in mind.
1219 */
1220 - ports = (__be16 *)skb_transport_header(skb);
1221 + ports = skb_header_pointer(skb, skb_transport_offset(skb),
1222 + sizeof(_ports), &_ports);
1223 + if (!ports)
1224 + return;
1225
1226 sin.sin_family = AF_INET;
1227 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
1228 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1229 index 2d3c8fe27583..956af11e9ba3 100644
1230 --- a/net/ipv6/datagram.c
1231 +++ b/net/ipv6/datagram.c
1232 @@ -335,6 +335,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
1233 skb_reset_network_header(skb);
1234 iph = ipv6_hdr(skb);
1235 iph->daddr = fl6->daddr;
1236 + ip6_flow_hdr(iph, 0, 0);
1237
1238 serr = SKB_EXT_ERR(skb);
1239 serr->ee.ee_errno = err;
1240 @@ -694,17 +695,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
1241 }
1242 if (np->rxopt.bits.rxorigdstaddr) {
1243 struct sockaddr_in6 sin6;
1244 - __be16 *ports;
1245 - int end;
1246 + __be16 _ports[2], *ports;
1247
1248 - end = skb_transport_offset(skb) + 4;
1249 - if (end <= 0 || pskb_may_pull(skb, end)) {
1250 + ports = skb_header_pointer(skb, skb_transport_offset(skb),
1251 + sizeof(_ports), &_ports);
1252 + if (ports) {
1253 /* All current transport protocols have the port numbers in the
1254 * first four bytes of the transport header and this function is
1255 * written with this assumption in mind.
1256 */
1257 - ports = (__be16 *)skb_transport_header(skb);
1258 -
1259 sin6.sin6_family = AF_INET6;
1260 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
1261 sin6.sin6_port = ports[1];
1262 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1263 index a9d0358d4f3b..82e222cd4845 100644
1264 --- a/net/packet/af_packet.c
1265 +++ b/net/packet/af_packet.c
1266 @@ -2663,7 +2663,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1267 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
1268 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1269 if (addr && dev && saddr->sll_halen < dev->addr_len)
1270 - goto out;
1271 + goto out_put;
1272 }
1273
1274 err = -ENXIO;
1275 @@ -2862,7 +2862,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1276 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
1277 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1278 if (addr && dev && saddr->sll_halen < dev->addr_len)
1279 - goto out;
1280 + goto out_unlock;
1281 }
1282
1283 err = -ENXIO;
1284 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1285 index e7866d47934d..31f461f955ec 100644
1286 --- a/net/sctp/ipv6.c
1287 +++ b/net/sctp/ipv6.c
1288 @@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
1289
1290 switch (ev) {
1291 case NETDEV_UP:
1292 - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
1293 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1294 if (addr) {
1295 addr->a.v6.sin6_family = AF_INET6;
1296 - addr->a.v6.sin6_port = 0;
1297 - addr->a.v6.sin6_flowinfo = 0;
1298 addr->a.v6.sin6_addr = ifa->addr;
1299 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
1300 addr->valid = 1;
1301 @@ -413,7 +411,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
1302 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1303 if (addr) {
1304 addr->a.v6.sin6_family = AF_INET6;
1305 - addr->a.v6.sin6_port = 0;
1306 addr->a.v6.sin6_addr = ifp->addr;
1307 addr->a.v6.sin6_scope_id = dev->ifindex;
1308 addr->valid = 1;
1309 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1310 index fb7b7632316a..8ea8217db960 100644
1311 --- a/net/sctp/protocol.c
1312 +++ b/net/sctp/protocol.c
1313 @@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
1314 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1315 if (addr) {
1316 addr->a.v4.sin_family = AF_INET;
1317 - addr->a.v4.sin_port = 0;
1318 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
1319 addr->valid = 1;
1320 INIT_LIST_HEAD(&addr->list);
1321 @@ -777,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
1322
1323 switch (ev) {
1324 case NETDEV_UP:
1325 - addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
1326 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1327 if (addr) {
1328 addr->a.v4.sin_family = AF_INET;
1329 - addr->a.v4.sin_port = 0;
1330 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
1331 addr->valid = 1;
1332 spin_lock_bh(&net->sctp.local_addr_lock);
1333 diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
1334 index 5b30603596d0..eafc78e063f1 100644
1335 --- a/net/sunrpc/rpcb_clnt.c
1336 +++ b/net/sunrpc/rpcb_clnt.c
1337 @@ -770,6 +770,12 @@ void rpcb_getport_async(struct rpc_task *task)
1338 case RPCBVERS_3:
1339 map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
1340 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
1341 + if (!map->r_addr) {
1342 + status = -ENOMEM;
1343 + dprintk("RPC: %5u %s: no memory available\n",
1344 + task->tk_pid, __func__);
1345 + goto bailout_free_args;
1346 + }
1347 map->r_owner = "";
1348 break;
1349 case RPCBVERS_2:
1350 @@ -792,6 +798,8 @@ void rpcb_getport_async(struct rpc_task *task)
1351 rpc_put_task(child);
1352 return;
1353
1354 +bailout_free_args:
1355 + kfree(map);
1356 bailout_release_client:
1357 rpc_release_client(rpcb_clnt);
1358 bailout_nofree:
1359 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1360 index aedc476fac02..d947b8210399 100644
1361 --- a/net/tipc/netlink_compat.c
1362 +++ b/net/tipc/netlink_compat.c
1363 @@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
1364 return limit;
1365 }
1366
1367 +static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
1368 +{
1369 + return TLV_GET_LEN(tlv) - TLV_SPACE(0);
1370 +}
1371 +
1372 static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
1373 {
1374 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
1375 @@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
1376 return buf;
1377 }
1378
1379 +static inline bool string_is_valid(char *s, int len)
1380 +{
1381 + return memchr(s, '\0', len) ? true : false;
1382 +}
1383 +
1384 static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
1385 struct tipc_nl_compat_msg *msg,
1386 struct sk_buff *arg)
1387 @@ -370,6 +380,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
1388 struct nlattr *prop;
1389 struct nlattr *bearer;
1390 struct tipc_bearer_config *b;
1391 + int len;
1392
1393 b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
1394
1395 @@ -377,6 +388,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
1396 if (!bearer)
1397 return -EMSGSIZE;
1398
1399 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
1400 + if (!string_is_valid(b->name, len))
1401 + return -EINVAL;
1402 +
1403 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
1404 return -EMSGSIZE;
1405
1406 @@ -402,6 +417,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
1407 {
1408 char *name;
1409 struct nlattr *bearer;
1410 + int len;
1411
1412 name = (char *)TLV_DATA(msg->req);
1413
1414 @@ -409,6 +425,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
1415 if (!bearer)
1416 return -EMSGSIZE;
1417
1418 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
1419 + if (!string_is_valid(name, len))
1420 + return -EINVAL;
1421 +
1422 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
1423 return -EMSGSIZE;
1424
1425 @@ -469,6 +489,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1426 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
1427 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
1428 int err;
1429 + int len;
1430
1431 if (!attrs[TIPC_NLA_LINK])
1432 return -EINVAL;
1433 @@ -495,6 +516,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1434 return err;
1435
1436 name = (char *)TLV_DATA(msg->req);
1437 +
1438 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1439 + if (!string_is_valid(name, len))
1440 + return -EINVAL;
1441 +
1442 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
1443 return 0;
1444
1445 @@ -635,6 +661,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
1446 struct nlattr *prop;
1447 struct nlattr *media;
1448 struct tipc_link_config *lc;
1449 + int len;
1450
1451 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
1452
1453 @@ -642,6 +669,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
1454 if (!media)
1455 return -EMSGSIZE;
1456
1457 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
1458 + if (!string_is_valid(lc->name, len))
1459 + return -EINVAL;
1460 +
1461 if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
1462 return -EMSGSIZE;
1463
1464 @@ -662,6 +693,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
1465 struct nlattr *prop;
1466 struct nlattr *bearer;
1467 struct tipc_link_config *lc;
1468 + int len;
1469
1470 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
1471
1472 @@ -669,6 +701,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
1473 if (!bearer)
1474 return -EMSGSIZE;
1475
1476 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
1477 + if (!string_is_valid(lc->name, len))
1478 + return -EINVAL;
1479 +
1480 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
1481 return -EMSGSIZE;
1482
1483 @@ -717,9 +753,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
1484 struct tipc_link_config *lc;
1485 struct tipc_bearer *bearer;
1486 struct tipc_media *media;
1487 + int len;
1488
1489 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
1490
1491 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1492 + if (!string_is_valid(lc->name, len))
1493 + return -EINVAL;
1494 +
1495 media = tipc_media_find(lc->name);
1496 if (media) {
1497 cmd->doit = &tipc_nl_media_set;
1498 @@ -741,6 +782,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
1499 {
1500 char *name;
1501 struct nlattr *link;
1502 + int len;
1503
1504 name = (char *)TLV_DATA(msg->req);
1505
1506 @@ -748,6 +790,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
1507 if (!link)
1508 return -EMSGSIZE;
1509
1510 + len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1511 + if (!string_is_valid(name, len))
1512 + return -EINVAL;
1513 +
1514 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
1515 return -EMSGSIZE;
1516
1517 @@ -769,6 +815,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
1518 };
1519
1520 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
1521 + if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
1522 + return -EINVAL;
1523
1524 depth = ntohl(ntq->depth);
1525
1526 @@ -1192,7 +1240,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1527 }
1528
1529 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1530 - if (len && !TLV_OK(msg.req, len)) {
1531 + if (!len || !TLV_OK(msg.req, len)) {
1532 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1533 err = -EOPNOTSUPP;
1534 goto send;
1535 diff --git a/security/security.c b/security/security.c
1536 index f825304f04a7..112df16be770 100644
1537 --- a/security/security.c
1538 +++ b/security/security.c
1539 @@ -904,6 +904,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1540
1541 void security_cred_free(struct cred *cred)
1542 {
1543 + /*
1544 + * There is a failure case in prepare_creds() that
1545 + * may result in a call here with ->security being NULL.
1546 + */
1547 + if (unlikely(cred->security == NULL))
1548 + return;
1549 +
1550 call_void_hook(cred_free, cred);
1551 }
1552
1553 diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
1554 index 175e4dce58df..c483de590ba3 100644
1555 --- a/security/selinux/ss/policydb.c
1556 +++ b/security/selinux/ss/policydb.c
1557 @@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
1558 kfree(key);
1559 if (datum) {
1560 levdatum = datum;
1561 - ebitmap_destroy(&levdatum->level->cat);
1562 + if (levdatum->level)
1563 + ebitmap_destroy(&levdatum->level->cat);
1564 kfree(levdatum->level);
1565 }
1566 kfree(datum);
1567 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
1568 index 0309f2111c70..5367f854fadc 100644
1569 --- a/security/yama/yama_lsm.c
1570 +++ b/security/yama/yama_lsm.c
1571 @@ -359,7 +359,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
1572 break;
1573 case YAMA_SCOPE_RELATIONAL:
1574 rcu_read_lock();
1575 - if (!task_is_descendant(current, child) &&
1576 + if (!pid_alive(child))
1577 + rc = -EPERM;
1578 + if (!rc && !task_is_descendant(current, child) &&
1579 !ptracer_exception_found(current, child) &&
1580 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
1581 rc = -EPERM;