Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0186-5.4.87-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 73448 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index e1a94c8d278e6..71968b4bb313d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 86
10 +SUBLEVEL = 87
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
15 index 603aed229af78..46338f2360046 100644
16 --- a/arch/powerpc/include/asm/bitops.h
17 +++ b/arch/powerpc/include/asm/bitops.h
18 @@ -217,15 +217,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
19 */
20 static __inline__ int fls(unsigned int x)
21 {
22 - return 32 - __builtin_clz(x);
23 + int lz;
24 +
25 + if (__builtin_constant_p(x))
26 + return x ? 32 - __builtin_clz(x) : 0;
27 + asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
28 + return 32 - lz;
29 }
30
31 #include <asm-generic/bitops/builtin-__fls.h>
32
33 +/*
34 + * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
35 + * instruction; for 32-bit we use the generic version, which does two
36 + * 32-bit fls calls.
37 + */
38 +#ifdef CONFIG_PPC64
39 static __inline__ int fls64(__u64 x)
40 {
41 - return 64 - __builtin_clzll(x);
42 + int lz;
43 +
44 + if (__builtin_constant_p(x))
45 + return x ? 64 - __builtin_clzll(x) : 0;
46 + asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
47 + return 64 - lz;
48 }
49 +#else
50 +#include <asm-generic/bitops/fls64.h>
51 +#endif
52
53 #ifdef CONFIG_PPC64
54 unsigned int __arch_hweight8(unsigned int w);
55 diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
56 index f6b253e2be409..36ec0bdd8b63c 100644
57 --- a/arch/powerpc/sysdev/mpic_msgr.c
58 +++ b/arch/powerpc/sysdev/mpic_msgr.c
59 @@ -191,7 +191,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
60
61 /* IO map the message register block. */
62 of_address_to_resource(np, 0, &rsrc);
63 - msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
64 + msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
65 if (!msgr_block_addr) {
66 dev_err(&dev->dev, "Failed to iomap MPIC message registers");
67 return -EFAULT;
68 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
69 index 0f5d0a699a49b..4e59ab817d3e7 100644
70 --- a/arch/um/drivers/ubd_kern.c
71 +++ b/arch/um/drivers/ubd_kern.c
72 @@ -47,18 +47,25 @@
73 /* Max request size is determined by sector mask - 32K */
74 #define UBD_MAX_REQUEST (8 * sizeof(long))
75
76 +struct io_desc {
77 + char *buffer;
78 + unsigned long length;
79 + unsigned long sector_mask;
80 + unsigned long long cow_offset;
81 + unsigned long bitmap_words[2];
82 +};
83 +
84 struct io_thread_req {
85 struct request *req;
86 int fds[2];
87 unsigned long offsets[2];
88 unsigned long long offset;
89 - unsigned long length;
90 - char *buffer;
91 int sectorsize;
92 - unsigned long sector_mask;
93 - unsigned long long cow_offset;
94 - unsigned long bitmap_words[2];
95 int error;
96 +
97 + int desc_cnt;
98 + /* io_desc has to be the last element of the struct */
99 + struct io_desc io_desc[];
100 };
101
102
103 @@ -524,12 +531,7 @@ static void ubd_handler(void)
104 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
105 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
106 }
107 - if ((io_req->error) || (io_req->buffer == NULL))
108 - blk_mq_end_request(io_req->req, io_req->error);
109 - else {
110 - if (!blk_update_request(io_req->req, io_req->error, io_req->length))
111 - __blk_mq_end_request(io_req->req, io_req->error);
112 - }
113 + blk_mq_end_request(io_req->req, io_req->error);
114 kfree(io_req);
115 }
116 }
117 @@ -945,6 +947,7 @@ static int ubd_add(int n, char **error_out)
118 blk_queue_write_cache(ubd_dev->queue, true, false);
119
120 blk_queue_max_segments(ubd_dev->queue, MAX_SG);
121 + blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
122 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
123 if(err){
124 *error_out = "Failed to register device";
125 @@ -1288,37 +1291,74 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
126 *cow_offset += bitmap_offset;
127 }
128
129 -static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
130 +static void cowify_req(struct io_thread_req *req, struct io_desc *segment,
131 + unsigned long offset, unsigned long *bitmap,
132 __u64 bitmap_offset, __u64 bitmap_len)
133 {
134 - __u64 sector = req->offset >> SECTOR_SHIFT;
135 + __u64 sector = offset >> SECTOR_SHIFT;
136 int i;
137
138 - if (req->length > (sizeof(req->sector_mask) * 8) << SECTOR_SHIFT)
139 + if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT)
140 panic("Operation too long");
141
142 if (req_op(req->req) == REQ_OP_READ) {
143 - for (i = 0; i < req->length >> SECTOR_SHIFT; i++) {
144 + for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) {
145 if(ubd_test_bit(sector + i, (unsigned char *) bitmap))
146 ubd_set_bit(i, (unsigned char *)
147 - &req->sector_mask);
148 + &segment->sector_mask);
149 + }
150 + } else {
151 + cowify_bitmap(offset, segment->length, &segment->sector_mask,
152 + &segment->cow_offset, bitmap, bitmap_offset,
153 + segment->bitmap_words, bitmap_len);
154 + }
155 +}
156 +
157 +static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
158 + struct request *req)
159 +{
160 + struct bio_vec bvec;
161 + struct req_iterator iter;
162 + int i = 0;
163 + unsigned long byte_offset = io_req->offset;
164 + int op = req_op(req);
165 +
166 + if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) {
167 + io_req->io_desc[0].buffer = NULL;
168 + io_req->io_desc[0].length = blk_rq_bytes(req);
169 + } else {
170 + rq_for_each_segment(bvec, req, iter) {
171 + BUG_ON(i >= io_req->desc_cnt);
172 +
173 + io_req->io_desc[i].buffer =
174 + page_address(bvec.bv_page) + bvec.bv_offset;
175 + io_req->io_desc[i].length = bvec.bv_len;
176 + i++;
177 + }
178 + }
179 +
180 + if (dev->cow.file) {
181 + for (i = 0; i < io_req->desc_cnt; i++) {
182 + cowify_req(io_req, &io_req->io_desc[i], byte_offset,
183 + dev->cow.bitmap, dev->cow.bitmap_offset,
184 + dev->cow.bitmap_len);
185 + byte_offset += io_req->io_desc[i].length;
186 }
187 +
188 }
189 - else cowify_bitmap(req->offset, req->length, &req->sector_mask,
190 - &req->cow_offset, bitmap, bitmap_offset,
191 - req->bitmap_words, bitmap_len);
192 }
193
194 -static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
195 - u64 off, struct bio_vec *bvec)
196 +static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
197 + int desc_cnt)
198 {
199 - struct ubd *dev = hctx->queue->queuedata;
200 struct io_thread_req *io_req;
201 - int ret;
202 + int i;
203
204 - io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
205 + io_req = kmalloc(sizeof(*io_req) +
206 + (desc_cnt * sizeof(struct io_desc)),
207 + GFP_ATOMIC);
208 if (!io_req)
209 - return -ENOMEM;
210 + return NULL;
211
212 io_req->req = req;
213 if (dev->cow.file)
214 @@ -1326,26 +1366,41 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
215 else
216 io_req->fds[0] = dev->fd;
217 io_req->error = 0;
218 -
219 - if (bvec != NULL) {
220 - io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
221 - io_req->length = bvec->bv_len;
222 - } else {
223 - io_req->buffer = NULL;
224 - io_req->length = blk_rq_bytes(req);
225 - }
226 -
227 io_req->sectorsize = SECTOR_SIZE;
228 io_req->fds[1] = dev->fd;
229 - io_req->cow_offset = -1;
230 - io_req->offset = off;
231 - io_req->sector_mask = 0;
232 + io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
233 io_req->offsets[0] = 0;
234 io_req->offsets[1] = dev->cow.data_offset;
235
236 - if (dev->cow.file)
237 - cowify_req(io_req, dev->cow.bitmap,
238 - dev->cow.bitmap_offset, dev->cow.bitmap_len);
239 + for (i = 0 ; i < desc_cnt; i++) {
240 + io_req->io_desc[i].sector_mask = 0;
241 + io_req->io_desc[i].cow_offset = -1;
242 + }
243 +
244 + return io_req;
245 +}
246 +
247 +static int ubd_submit_request(struct ubd *dev, struct request *req)
248 +{
249 + int segs = 0;
250 + struct io_thread_req *io_req;
251 + int ret;
252 + int op = req_op(req);
253 +
254 + if (op == REQ_OP_FLUSH)
255 + segs = 0;
256 + else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD)
257 + segs = 1;
258 + else
259 + segs = blk_rq_nr_phys_segments(req);
260 +
261 + io_req = ubd_alloc_req(dev, req, segs);
262 + if (!io_req)
263 + return -ENOMEM;
264 +
265 + io_req->desc_cnt = segs;
266 + if (segs)
267 + ubd_map_req(dev, io_req, req);
268
269 ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
270 if (ret != sizeof(io_req)) {
271 @@ -1356,22 +1411,6 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
272 return ret;
273 }
274
275 -static int queue_rw_req(struct blk_mq_hw_ctx *hctx, struct request *req)
276 -{
277 - struct req_iterator iter;
278 - struct bio_vec bvec;
279 - int ret;
280 - u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT;
281 -
282 - rq_for_each_segment(bvec, req, iter) {
283 - ret = ubd_queue_one_vec(hctx, req, off, &bvec);
284 - if (ret < 0)
285 - return ret;
286 - off += bvec.bv_len;
287 - }
288 - return 0;
289 -}
290 -
291 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
292 const struct blk_mq_queue_data *bd)
293 {
294 @@ -1384,17 +1423,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
295 spin_lock_irq(&ubd_dev->lock);
296
297 switch (req_op(req)) {
298 - /* operations with no lentgth/offset arguments */
299 case REQ_OP_FLUSH:
300 - ret = ubd_queue_one_vec(hctx, req, 0, NULL);
301 - break;
302 case REQ_OP_READ:
303 case REQ_OP_WRITE:
304 - ret = queue_rw_req(hctx, req);
305 - break;
306 case REQ_OP_DISCARD:
307 case REQ_OP_WRITE_ZEROES:
308 - ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL);
309 + ret = ubd_submit_request(ubd_dev, req);
310 break;
311 default:
312 WARN_ON_ONCE(1);
313 @@ -1482,22 +1516,22 @@ static int map_error(int error_code)
314 * will result in unpredictable behaviour and/or crashes.
315 */
316
317 -static int update_bitmap(struct io_thread_req *req)
318 +static int update_bitmap(struct io_thread_req *req, struct io_desc *segment)
319 {
320 int n;
321
322 - if(req->cow_offset == -1)
323 + if (segment->cow_offset == -1)
324 return map_error(0);
325
326 - n = os_pwrite_file(req->fds[1], &req->bitmap_words,
327 - sizeof(req->bitmap_words), req->cow_offset);
328 - if (n != sizeof(req->bitmap_words))
329 + n = os_pwrite_file(req->fds[1], &segment->bitmap_words,
330 + sizeof(segment->bitmap_words), segment->cow_offset);
331 + if (n != sizeof(segment->bitmap_words))
332 return map_error(-n);
333
334 return map_error(0);
335 }
336
337 -static void do_io(struct io_thread_req *req)
338 +static void do_io(struct io_thread_req *req, struct io_desc *desc)
339 {
340 char *buf = NULL;
341 unsigned long len;
342 @@ -1512,21 +1546,20 @@ static void do_io(struct io_thread_req *req)
343 return;
344 }
345
346 - nsectors = req->length / req->sectorsize;
347 + nsectors = desc->length / req->sectorsize;
348 start = 0;
349 do {
350 - bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask);
351 + bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask);
352 end = start;
353 while((end < nsectors) &&
354 - (ubd_test_bit(end, (unsigned char *)
355 - &req->sector_mask) == bit))
356 + (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit))
357 end++;
358
359 off = req->offset + req->offsets[bit] +
360 start * req->sectorsize;
361 len = (end - start) * req->sectorsize;
362 - if (req->buffer != NULL)
363 - buf = &req->buffer[start * req->sectorsize];
364 + if (desc->buffer != NULL)
365 + buf = &desc->buffer[start * req->sectorsize];
366
367 switch (req_op(req->req)) {
368 case REQ_OP_READ:
369 @@ -1566,7 +1599,8 @@ static void do_io(struct io_thread_req *req)
370 start = end;
371 } while(start < nsectors);
372
373 - req->error = update_bitmap(req);
374 + req->offset += len;
375 + req->error = update_bitmap(req, desc);
376 }
377
378 /* Changed in start_io_thread, which is serialized by being called only
379 @@ -1599,8 +1633,13 @@ int io_thread(void *arg)
380 }
381
382 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
383 + struct io_thread_req *req = (*io_req_buffer)[count];
384 + int i;
385 +
386 io_count++;
387 - do_io((*io_req_buffer)[count]);
388 + for (i = 0; !req->error && i < req->desc_cnt; i++)
389 + do_io(req, &(req->io_desc[i]));
390 +
391 }
392
393 written = 0;
394 diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
395 index d78a61408243f..7dec43b2c4205 100644
396 --- a/arch/x86/kvm/cpuid.h
397 +++ b/arch/x86/kvm/cpuid.h
398 @@ -154,6 +154,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
399 return x86_stepping(best->eax);
400 }
401
402 +static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
403 +{
404 + return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
405 + guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
406 + guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
407 + guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
408 +}
409 +
410 +static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
411 +{
412 + return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
413 + guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
414 +}
415 +
416 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
417 {
418 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
419 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
420 index c79c1a07f44b9..2b506904be024 100644
421 --- a/arch/x86/kvm/svm.c
422 +++ b/arch/x86/kvm/svm.c
423 @@ -4233,8 +4233,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
424 break;
425 case MSR_IA32_SPEC_CTRL:
426 if (!msr_info->host_initiated &&
427 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
428 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
429 + !guest_has_spec_ctrl_msr(vcpu))
430 return 1;
431
432 msr_info->data = svm->spec_ctrl;
433 @@ -4318,16 +4317,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
434 break;
435 case MSR_IA32_SPEC_CTRL:
436 if (!msr->host_initiated &&
437 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
438 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
439 + !guest_has_spec_ctrl_msr(vcpu))
440 return 1;
441
442 - /* The STIBP bit doesn't fault even if it's not advertised */
443 - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
444 + if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
445 return 1;
446
447 svm->spec_ctrl = data;
448 -
449 if (!data)
450 break;
451
452 @@ -4346,18 +4342,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
453 break;
454 case MSR_IA32_PRED_CMD:
455 if (!msr->host_initiated &&
456 - !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
457 + !guest_has_pred_cmd_msr(vcpu))
458 return 1;
459
460 if (data & ~PRED_CMD_IBPB)
461 return 1;
462 -
463 + if (!boot_cpu_has(X86_FEATURE_IBPB))
464 + return 1;
465 if (!data)
466 break;
467
468 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
469 - if (is_guest_mode(vcpu))
470 - break;
471 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
472 break;
473 case MSR_AMD64_VIRT_SPEC_CTRL:
474 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
475 index 2a1ed3aae100e..e7fd2f00edc11 100644
476 --- a/arch/x86/kvm/vmx/vmx.c
477 +++ b/arch/x86/kvm/vmx/vmx.c
478 @@ -1788,7 +1788,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
479 break;
480 case MSR_IA32_SPEC_CTRL:
481 if (!msr_info->host_initiated &&
482 - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
483 + !guest_has_spec_ctrl_msr(vcpu))
484 return 1;
485
486 msr_info->data = to_vmx(vcpu)->spec_ctrl;
487 @@ -1971,15 +1971,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
488 break;
489 case MSR_IA32_SPEC_CTRL:
490 if (!msr_info->host_initiated &&
491 - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
492 + !guest_has_spec_ctrl_msr(vcpu))
493 return 1;
494
495 - /* The STIBP bit doesn't fault even if it's not advertised */
496 - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
497 + if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
498 return 1;
499
500 vmx->spec_ctrl = data;
501 -
502 if (!data)
503 break;
504
505 @@ -2001,12 +1999,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
506 break;
507 case MSR_IA32_PRED_CMD:
508 if (!msr_info->host_initiated &&
509 - !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
510 + !guest_has_pred_cmd_msr(vcpu))
511 return 1;
512
513 if (data & ~PRED_CMD_IBPB)
514 return 1;
515 -
516 + if (!boot_cpu_has(X86_FEATURE_IBPB))
517 + return 1;
518 if (!data)
519 break;
520
521 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
522 index b7f86acb8c911..72990c3c6faf7 100644
523 --- a/arch/x86/kvm/x86.c
524 +++ b/arch/x86/kvm/x86.c
525 @@ -10369,6 +10369,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
526 }
527 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
528
529 +u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
530 +{
531 + uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
532 +
533 + /* The STIBP bit doesn't fault even if it's not advertised */
534 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
535 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
536 + bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
537 + if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
538 + !boot_cpu_has(X86_FEATURE_AMD_IBRS))
539 + bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
540 +
541 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
542 + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
543 + bits &= ~SPEC_CTRL_SSBD;
544 + if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
545 + !boot_cpu_has(X86_FEATURE_AMD_SSBD))
546 + bits &= ~SPEC_CTRL_SSBD;
547 +
548 + return bits;
549 +}
550 +EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
551
552 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
553 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
554 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
555 index de6b55484876a..301286d924320 100644
556 --- a/arch/x86/kvm/x86.h
557 +++ b/arch/x86/kvm/x86.h
558 @@ -368,5 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
559
560 void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
561 void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
562 +u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
563
564 #endif
565 diff --git a/block/blk-pm.c b/block/blk-pm.c
566 index 1adc1cd748b40..2ccf88dbaa40e 100644
567 --- a/block/blk-pm.c
568 +++ b/block/blk-pm.c
569 @@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
570
571 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
572
573 + spin_lock_irq(&q->queue_lock);
574 + q->rpm_status = RPM_SUSPENDING;
575 + spin_unlock_irq(&q->queue_lock);
576 +
577 /*
578 * Increase the pm_only counter before checking whether any
579 * non-PM blk_queue_enter() calls are in progress to avoid that any
580 @@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
581 /* Switch q_usage_counter back to per-cpu mode. */
582 blk_mq_unfreeze_queue(q);
583
584 - spin_lock_irq(&q->queue_lock);
585 - if (ret < 0)
586 + if (ret < 0) {
587 + spin_lock_irq(&q->queue_lock);
588 + q->rpm_status = RPM_ACTIVE;
589 pm_runtime_mark_last_busy(q->dev);
590 - else
591 - q->rpm_status = RPM_SUSPENDING;
592 - spin_unlock_irq(&q->queue_lock);
593 + spin_unlock_irq(&q->queue_lock);
594
595 - if (ret)
596 blk_clear_pm_only(q);
597 + }
598
599 return ret;
600 }
601 diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
602 index 2553e05e07253..5f1376578ea32 100644
603 --- a/drivers/block/null_blk_zoned.c
604 +++ b/drivers/block/null_blk_zoned.c
605 @@ -2,8 +2,7 @@
606 #include <linux/vmalloc.h>
607 #include "null_blk.h"
608
609 -/* zone_size in MBs to sectors. */
610 -#define ZONE_SIZE_SHIFT 11
611 +#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
612
613 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
614 {
615 @@ -12,7 +11,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
616
617 int null_zone_init(struct nullb_device *dev)
618 {
619 - sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
620 + sector_t dev_capacity_sects;
621 sector_t sector = 0;
622 unsigned int i;
623
624 @@ -25,9 +24,12 @@ int null_zone_init(struct nullb_device *dev)
625 return -EINVAL;
626 }
627
628 - dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
629 - dev->nr_zones = dev_size >>
630 - (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
631 + dev_capacity_sects = MB_TO_SECTS(dev->size);
632 + dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
633 + dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
634 + if (dev_capacity_sects & (dev->zone_size_sects - 1))
635 + dev->nr_zones++;
636 +
637 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
638 GFP_KERNEL | __GFP_ZERO);
639 if (!dev->zones)
640 @@ -55,7 +57,10 @@ int null_zone_init(struct nullb_device *dev)
641 struct blk_zone *zone = &dev->zones[i];
642
643 zone->start = zone->wp = sector;
644 - zone->len = dev->zone_size_sects;
645 + if (zone->start + dev->zone_size_sects > dev_capacity_sects)
646 + zone->len = dev_capacity_sects - zone->start;
647 + else
648 + zone->len = dev->zone_size_sects;
649 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
650 zone->cond = BLK_ZONE_COND_EMPTY;
651
652 diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
653 index e11af747395dd..17b0f1b793ec8 100644
654 --- a/drivers/bluetooth/hci_h5.c
655 +++ b/drivers/bluetooth/hci_h5.c
656 @@ -250,8 +250,12 @@ static int h5_close(struct hci_uart *hu)
657 if (h5->vnd && h5->vnd->close)
658 h5->vnd->close(h5);
659
660 - if (!hu->serdev)
661 - kfree(h5);
662 + if (hu->serdev)
663 + serdev_device_close(hu->serdev);
664 +
665 + kfree_skb(h5->rx_skb);
666 + kfree(h5);
667 + h5 = NULL;
668
669 return 0;
670 }
671 diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
672 index 6cc71c90f85ea..19337aed9f235 100644
673 --- a/drivers/i3c/master.c
674 +++ b/drivers/i3c/master.c
675 @@ -2492,7 +2492,7 @@ int i3c_master_register(struct i3c_master_controller *master,
676
677 ret = i3c_master_bus_init(master);
678 if (ret)
679 - goto err_put_dev;
680 + goto err_destroy_wq;
681
682 ret = device_add(&master->dev);
683 if (ret)
684 @@ -2523,6 +2523,9 @@ err_del_dev:
685 err_cleanup_bus:
686 i3c_master_bus_cleanup(master);
687
688 +err_destroy_wq:
689 + destroy_workqueue(master->wq);
690 +
691 err_put_dev:
692 put_device(&master->dev);
693
694 diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
695 index 4fb33e7562c52..2aeb922e2365c 100644
696 --- a/drivers/md/dm-verity-target.c
697 +++ b/drivers/md/dm-verity-target.c
698 @@ -533,6 +533,15 @@ static int verity_verify_io(struct dm_verity_io *io)
699 return 0;
700 }
701
702 +/*
703 + * Skip verity work in response to I/O error when system is shutting down.
704 + */
705 +static inline bool verity_is_system_shutting_down(void)
706 +{
707 + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
708 + || system_state == SYSTEM_RESTART;
709 +}
710 +
711 /*
712 * End one "io" structure with a given error.
713 */
714 @@ -560,7 +569,8 @@ static void verity_end_io(struct bio *bio)
715 {
716 struct dm_verity_io *io = bio->bi_private;
717
718 - if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
719 + if (bio->bi_status &&
720 + (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
721 verity_finish_io(io, bio->bi_status);
722 return;
723 }
724 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
725 index ec136e44aef7f..a195a85cc366a 100644
726 --- a/drivers/md/raid10.c
727 +++ b/drivers/md/raid10.c
728 @@ -1145,7 +1145,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
729 struct md_rdev *err_rdev = NULL;
730 gfp_t gfp = GFP_NOIO;
731
732 - if (r10_bio->devs[slot].rdev) {
733 + if (slot >= 0 && r10_bio->devs[slot].rdev) {
734 /*
735 * This is an error retry, but we cannot
736 * safely dereference the rdev in the r10_bio,
737 @@ -1510,6 +1510,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
738 r10_bio->mddev = mddev;
739 r10_bio->sector = bio->bi_iter.bi_sector;
740 r10_bio->state = 0;
741 + r10_bio->read_slot = -1;
742 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
743
744 if (bio_data_dir(bio) == READ)
745 diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
746 index 1282f701f1857..ac8b8bf6ee1d3 100644
747 --- a/drivers/media/usb/dvb-usb/gp8psk.c
748 +++ b/drivers/media/usb/dvb-usb/gp8psk.c
749 @@ -182,7 +182,7 @@ out_rel_fw:
750
751 static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
752 {
753 - u8 status, buf;
754 + u8 status = 0, buf;
755 int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
756
757 if (onoff) {
758 diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
759 index 16695366ec926..26ff49fdf0f7d 100644
760 --- a/drivers/misc/vmw_vmci/vmci_context.c
761 +++ b/drivers/misc/vmw_vmci/vmci_context.c
762 @@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
763 return VMCI_ERROR_MORE_DATA;
764 }
765
766 - dbells = kmalloc(data_size, GFP_ATOMIC);
767 + dbells = kzalloc(data_size, GFP_ATOMIC);
768 if (!dbells)
769 return VMCI_ERROR_NO_MEM;
770
771 diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
772 index 180caebbd3552..9566958476dfc 100644
773 --- a/drivers/rtc/rtc-pl031.c
774 +++ b/drivers/rtc/rtc-pl031.c
775 @@ -379,8 +379,10 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
776
777 device_init_wakeup(&adev->dev, true);
778 ldata->rtc = devm_rtc_allocate_device(&adev->dev);
779 - if (IS_ERR(ldata->rtc))
780 - return PTR_ERR(ldata->rtc);
781 + if (IS_ERR(ldata->rtc)) {
782 + ret = PTR_ERR(ldata->rtc);
783 + goto out;
784 + }
785
786 ldata->rtc->ops = ops;
787
788 diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
789 index fc32be687606c..c41bc8084d7cc 100644
790 --- a/drivers/rtc/rtc-sun6i.c
791 +++ b/drivers/rtc/rtc-sun6i.c
792 @@ -276,7 +276,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
793 300000000);
794 if (IS_ERR(rtc->int_osc)) {
795 pr_crit("Couldn't register the internal oscillator\n");
796 - return;
797 + goto err;
798 }
799
800 parents[0] = clk_hw_get_name(rtc->int_osc);
801 @@ -292,7 +292,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
802 rtc->losc = clk_register(NULL, &rtc->hw);
803 if (IS_ERR(rtc->losc)) {
804 pr_crit("Couldn't register the LOSC clock\n");
805 - return;
806 + goto err_register;
807 }
808
809 of_property_read_string_index(node, "clock-output-names", 1,
810 @@ -303,7 +303,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
811 &rtc->lock);
812 if (IS_ERR(rtc->ext_losc)) {
813 pr_crit("Couldn't register the LOSC external gate\n");
814 - return;
815 + goto err_register;
816 }
817
818 clk_data->num = 2;
819 @@ -316,6 +316,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
820 of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
821 return;
822
823 +err_register:
824 + clk_hw_unregister_fixed_rate(rtc->int_osc);
825 err:
826 kfree(clk_data);
827 }
828 diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
829 index d1f1baba3285d..d1bdd754c6a47 100644
830 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
831 +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
832 @@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
833 depends on PCI && INET && (IPV6 || IPV6=n)
834 depends on THERMAL || !THERMAL
835 depends on ETHERNET
836 + depends on TLS || TLS=n
837 select NET_VENDOR_CHELSIO
838 select CHELSIO_T4
839 select CHELSIO_LIB
840 diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
841 index c37886a267124..9d24bc05df0da 100644
842 --- a/drivers/thermal/cpu_cooling.c
843 +++ b/drivers/thermal/cpu_cooling.c
844 @@ -320,6 +320,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
845 unsigned long state)
846 {
847 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
848 + int ret;
849
850 /* Request state should be less than max_level */
851 if (WARN_ON(state > cpufreq_cdev->max_level))
852 @@ -329,10 +330,12 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
853 if (cpufreq_cdev->cpufreq_state == state)
854 return 0;
855
856 - cpufreq_cdev->cpufreq_state = state;
857 + ret = freq_qos_update_request(&cpufreq_cdev->qos_req,
858 + cpufreq_cdev->freq_table[state].frequency);
859 + if (ret > 0)
860 + cpufreq_cdev->cpufreq_state = state;
861
862 - return freq_qos_update_request(&cpufreq_cdev->qos_req,
863 - cpufreq_cdev->freq_table[state].frequency);
864 + return ret;
865 }
866
867 /**
868 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
869 index 632653cd70e3b..2372e161cd5e8 100644
870 --- a/drivers/vfio/pci/vfio_pci.c
871 +++ b/drivers/vfio/pci/vfio_pci.c
872 @@ -114,8 +114,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
873 int bar;
874 struct vfio_pci_dummy_resource *dummy_res;
875
876 - INIT_LIST_HEAD(&vdev->dummy_resources_list);
877 -
878 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
879 res = vdev->pdev->resource + bar;
880
881 @@ -1606,6 +1604,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
882 mutex_init(&vdev->igate);
883 spin_lock_init(&vdev->irqlock);
884 mutex_init(&vdev->ioeventfds_lock);
885 + INIT_LIST_HEAD(&vdev->dummy_resources_list);
886 INIT_LIST_HEAD(&vdev->ioeventfds_list);
887 mutex_init(&vdev->vma_lock);
888 INIT_LIST_HEAD(&vdev->vma_list);
889 diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
890 index f8ce1368218b2..1a8f3c8ab32c6 100644
891 --- a/fs/bfs/inode.c
892 +++ b/fs/bfs/inode.c
893 @@ -351,7 +351,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
894
895 info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1;
896 if (info->si_lasti == BFS_MAX_LASTI)
897 - printf("WARNING: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
898 + printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id);
899 else if (info->si_lasti > BFS_MAX_LASTI) {
900 printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id);
901 goto out1;
902 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
903 index f58e03d1775d8..8ed71b3b25466 100644
904 --- a/fs/btrfs/ioctl.c
905 +++ b/fs/btrfs/ioctl.c
906 @@ -1256,6 +1256,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
907 u64 page_end;
908 u64 page_cnt;
909 u64 start = (u64)start_index << PAGE_SHIFT;
910 + u64 search_start;
911 int ret;
912 int i;
913 int i_done;
914 @@ -1352,6 +1353,40 @@ again:
915
916 lock_extent_bits(&BTRFS_I(inode)->io_tree,
917 page_start, page_end - 1, &cached_state);
918 +
919 + /*
920 + * When defragmenting we skip ranges that have holes or inline extents,
921 + * (check should_defrag_range()), to avoid unnecessary IO and wasting
922 + * space. At btrfs_defrag_file(), we check if a range should be defragged
923 + * before locking the inode and then, if it should, we trigger a sync
924 + * page cache readahead - we lock the inode only after that to avoid
925 + * blocking for too long other tasks that possibly want to operate on
926 + * other file ranges. But before we were able to get the inode lock,
927 + * some other task may have punched a hole in the range, or we may have
928 + * now an inline extent, in which case we should not defrag. So check
929 + * for that here, where we have the inode and the range locked, and bail
930 + * out if that happened.
931 + */
932 + search_start = page_start;
933 + while (search_start < page_end) {
934 + struct extent_map *em;
935 +
936 + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
937 + page_end - search_start, 0);
938 + if (IS_ERR(em)) {
939 + ret = PTR_ERR(em);
940 + goto out_unlock_range;
941 + }
942 + if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
943 + free_extent_map(em);
944 + /* Ok, 0 means we did not defrag anything */
945 + ret = 0;
946 + goto out_unlock_range;
947 + }
948 + search_start = extent_map_end(em);
949 + free_extent_map(em);
950 + }
951 +
952 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
953 page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
954 EXTENT_DEFRAG, 0, 0, &cached_state);
955 @@ -1382,6 +1417,10 @@ again:
956 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
957 extent_changeset_free(data_reserved);
958 return i_done;
959 +
960 +out_unlock_range:
961 + unlock_extent_cached(&BTRFS_I(inode)->io_tree,
962 + page_start, page_end - 1, &cached_state);
963 out:
964 for (i = 0; i < i_done; i++) {
965 unlock_page(pages[i]);
966 diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
967 index e84efc01512e4..ec73872661902 100644
968 --- a/fs/crypto/fscrypt_private.h
969 +++ b/fs/crypto/fscrypt_private.h
970 @@ -23,6 +23,9 @@
971 #define FSCRYPT_CONTEXT_V1 1
972 #define FSCRYPT_CONTEXT_V2 2
973
974 +/* Keep this in sync with include/uapi/linux/fscrypt.h */
975 +#define FSCRYPT_MODE_MAX FSCRYPT_MODE_ADIANTUM
976 +
977 struct fscrypt_context_v1 {
978 u8 version; /* FSCRYPT_CONTEXT_V1 */
979 u8 contents_encryption_mode;
980 @@ -387,7 +390,7 @@ struct fscrypt_master_key {
981 spinlock_t mk_decrypted_inodes_lock;
982
983 /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */
984 - struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1];
985 + struct crypto_skcipher *mk_mode_keys[FSCRYPT_MODE_MAX + 1];
986
987 } __randomize_layout;
988
989 diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
990 index bb3b7fcfdd48a..a5a40a76b8ed7 100644
991 --- a/fs/crypto/hooks.c
992 +++ b/fs/crypto/hooks.c
993 @@ -58,8 +58,8 @@ int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
994 if (err)
995 return err;
996
997 - /* ... in case we looked up ciphertext name before key was added */
998 - if (dentry->d_flags & DCACHE_ENCRYPTED_NAME)
999 + /* ... in case we looked up no-key name before key was added */
1000 + if (fscrypt_is_nokey_name(dentry))
1001 return -ENOKEY;
1002
1003 if (!fscrypt_has_permitted_context(dir, inode))
1004 @@ -83,9 +83,9 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
1005 if (err)
1006 return err;
1007
1008 - /* ... in case we looked up ciphertext name(s) before key was added */
1009 - if ((old_dentry->d_flags | new_dentry->d_flags) &
1010 - DCACHE_ENCRYPTED_NAME)
1011 + /* ... in case we looked up no-key name(s) before key was added */
1012 + if (fscrypt_is_nokey_name(old_dentry) ||
1013 + fscrypt_is_nokey_name(new_dentry))
1014 return -ENOKEY;
1015
1016 if (old_dir != new_dir) {
1017 diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
1018 index 75898340eb468..3e86f75b532a2 100644
1019 --- a/fs/crypto/keysetup.c
1020 +++ b/fs/crypto/keysetup.c
1021 @@ -55,6 +55,8 @@ static struct fscrypt_mode *
1022 select_encryption_mode(const union fscrypt_policy *policy,
1023 const struct inode *inode)
1024 {
1025 + BUILD_BUG_ON(ARRAY_SIZE(available_modes) != FSCRYPT_MODE_MAX + 1);
1026 +
1027 if (S_ISREG(inode->i_mode))
1028 return &available_modes[fscrypt_policy_contents_mode(policy)];
1029
1030 diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
1031 index 4072ba644595b..8e1b10861c104 100644
1032 --- a/fs/crypto/policy.c
1033 +++ b/fs/crypto/policy.c
1034 @@ -55,7 +55,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
1035 return false;
1036 }
1037
1038 - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
1039 + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
1040 + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
1041 fscrypt_warn(inode,
1042 "Unsupported encryption flags (0x%02x)",
1043 policy->flags);
1044 @@ -76,7 +77,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
1045 return false;
1046 }
1047
1048 - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
1049 + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
1050 + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
1051 fscrypt_warn(inode,
1052 "Unsupported encryption flags (0x%02x)",
1053 policy->flags);
1054 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1055 index 36a81b57012a5..59038e361337c 100644
1056 --- a/fs/ext4/namei.c
1057 +++ b/fs/ext4/namei.c
1058 @@ -2192,6 +2192,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1059 if (!dentry->d_name.len)
1060 return -EINVAL;
1061
1062 + if (fscrypt_is_nokey_name(dentry))
1063 + return -ENOKEY;
1064 +
1065 #ifdef CONFIG_UNICODE
1066 if (ext4_has_strict_mode(sbi) && IS_CASEFOLDED(dir) &&
1067 sbi->s_encoding && utf8_validate(sbi->s_encoding, &dentry->d_name))
1068 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1069 index 920658ca8777d..06568467b0c27 100644
1070 --- a/fs/ext4/super.c
1071 +++ b/fs/ext4/super.c
1072 @@ -455,19 +455,17 @@ static bool system_going_down(void)
1073
1074 static void ext4_handle_error(struct super_block *sb)
1075 {
1076 + journal_t *journal = EXT4_SB(sb)->s_journal;
1077 +
1078 if (test_opt(sb, WARN_ON_ERROR))
1079 WARN_ON_ONCE(1);
1080
1081 - if (sb_rdonly(sb))
1082 + if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
1083 return;
1084
1085 - if (!test_opt(sb, ERRORS_CONT)) {
1086 - journal_t *journal = EXT4_SB(sb)->s_journal;
1087 -
1088 - EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
1089 - if (journal)
1090 - jbd2_journal_abort(journal, -EIO);
1091 - }
1092 + EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
1093 + if (journal)
1094 + jbd2_journal_abort(journal, -EIO);
1095 /*
1096 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
1097 * could panic during 'reboot -f' as the underlying device got already
1098 diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1099 index c966ccc44c157..a57219c51c01a 100644
1100 --- a/fs/f2fs/checkpoint.c
1101 +++ b/fs/f2fs/checkpoint.c
1102 @@ -1596,7 +1596,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1103 goto out;
1104 }
1105
1106 - if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1107 + if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1108 SIT_I(sbi)->dirty_sentries == 0 &&
1109 prefree_segments(sbi) == 0) {
1110 f2fs_flush_sit_entries(sbi, cpc);
1111 diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
1112 index 9b0bedd82581b..d8d64447bc947 100644
1113 --- a/fs/f2fs/debug.c
1114 +++ b/fs/f2fs/debug.c
1115 @@ -107,8 +107,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
1116 si->node_pages = NODE_MAPPING(sbi)->nrpages;
1117 if (sbi->meta_inode)
1118 si->meta_pages = META_MAPPING(sbi)->nrpages;
1119 - si->nats = NM_I(sbi)->nat_cnt;
1120 - si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
1121 + si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
1122 + si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
1123 si->sits = MAIN_SEGS(sbi);
1124 si->dirty_sits = SIT_I(sbi)->dirty_sentries;
1125 si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
1126 @@ -254,9 +254,10 @@ get_cache:
1127 si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] +
1128 NM_I(sbi)->nid_cnt[PREALLOC_NID]) *
1129 sizeof(struct free_nid);
1130 - si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
1131 - si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
1132 - sizeof(struct nat_entry_set);
1133 + si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] *
1134 + sizeof(struct nat_entry);
1135 + si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
1136 + sizeof(struct nat_entry_set);
1137 si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
1138 for (i = 0; i < MAX_INO_ENTRY; i++)
1139 si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
1140 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
1141 index 63440abe58c42..4ca3c2a0a0f5b 100644
1142 --- a/fs/f2fs/f2fs.h
1143 +++ b/fs/f2fs/f2fs.h
1144 @@ -797,6 +797,13 @@ enum nid_state {
1145 MAX_NID_STATE,
1146 };
1147
1148 +enum nat_state {
1149 + TOTAL_NAT,
1150 + DIRTY_NAT,
1151 + RECLAIMABLE_NAT,
1152 + MAX_NAT_STATE,
1153 +};
1154 +
1155 struct f2fs_nm_info {
1156 block_t nat_blkaddr; /* base disk address of NAT */
1157 nid_t max_nid; /* maximum possible node ids */
1158 @@ -812,8 +819,7 @@ struct f2fs_nm_info {
1159 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
1160 struct list_head nat_entries; /* cached nat entry list (clean) */
1161 spinlock_t nat_list_lock; /* protect clean nat entry list */
1162 - unsigned int nat_cnt; /* the # of cached nat entries */
1163 - unsigned int dirty_nat_cnt; /* total num of nat entries in set */
1164 + unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
1165 unsigned int nat_blocks; /* # of nat blocks */
1166
1167 /* free node ids management */
1168 @@ -2998,6 +3004,8 @@ bool f2fs_empty_dir(struct inode *dir);
1169
1170 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
1171 {
1172 + if (fscrypt_is_nokey_name(dentry))
1173 + return -ENOKEY;
1174 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
1175 inode, inode->i_ino, inode->i_mode);
1176 }
1177 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
1178 index 3ac2a4b32375d..7ce33698ae381 100644
1179 --- a/fs/f2fs/node.c
1180 +++ b/fs/f2fs/node.c
1181 @@ -62,8 +62,8 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
1182 sizeof(struct free_nid)) >> PAGE_SHIFT;
1183 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
1184 } else if (type == NAT_ENTRIES) {
1185 - mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
1186 - PAGE_SHIFT;
1187 + mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
1188 + sizeof(struct nat_entry)) >> PAGE_SHIFT;
1189 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
1190 if (excess_cached_nats(sbi))
1191 res = false;
1192 @@ -177,7 +177,8 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
1193 list_add_tail(&ne->list, &nm_i->nat_entries);
1194 spin_unlock(&nm_i->nat_list_lock);
1195
1196 - nm_i->nat_cnt++;
1197 + nm_i->nat_cnt[TOTAL_NAT]++;
1198 + nm_i->nat_cnt[RECLAIMABLE_NAT]++;
1199 return ne;
1200 }
1201
1202 @@ -207,7 +208,8 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
1203 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
1204 {
1205 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
1206 - nm_i->nat_cnt--;
1207 + nm_i->nat_cnt[TOTAL_NAT]--;
1208 + nm_i->nat_cnt[RECLAIMABLE_NAT]--;
1209 __free_nat_entry(e);
1210 }
1211
1212 @@ -253,7 +255,8 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
1213 if (get_nat_flag(ne, IS_DIRTY))
1214 goto refresh_list;
1215
1216 - nm_i->dirty_nat_cnt++;
1217 + nm_i->nat_cnt[DIRTY_NAT]++;
1218 + nm_i->nat_cnt[RECLAIMABLE_NAT]--;
1219 set_nat_flag(ne, IS_DIRTY, true);
1220 refresh_list:
1221 spin_lock(&nm_i->nat_list_lock);
1222 @@ -273,7 +276,8 @@ static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
1223
1224 set_nat_flag(ne, IS_DIRTY, false);
1225 set->entry_cnt--;
1226 - nm_i->dirty_nat_cnt--;
1227 + nm_i->nat_cnt[DIRTY_NAT]--;
1228 + nm_i->nat_cnt[RECLAIMABLE_NAT]++;
1229 }
1230
1231 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
1232 @@ -2881,14 +2885,17 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1233 LIST_HEAD(sets);
1234 int err = 0;
1235
1236 - /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
1237 + /*
1238 + * during unmount, let's flush nat_bits before checking
1239 + * nat_cnt[DIRTY_NAT].
1240 + */
1241 if (enabled_nat_bits(sbi, cpc)) {
1242 down_write(&nm_i->nat_tree_lock);
1243 remove_nats_in_journal(sbi);
1244 up_write(&nm_i->nat_tree_lock);
1245 }
1246
1247 - if (!nm_i->dirty_nat_cnt)
1248 + if (!nm_i->nat_cnt[DIRTY_NAT])
1249 return 0;
1250
1251 down_write(&nm_i->nat_tree_lock);
1252 @@ -2899,7 +2906,8 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1253 * into nat entry set.
1254 */
1255 if (enabled_nat_bits(sbi, cpc) ||
1256 - !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
1257 + !__has_cursum_space(journal,
1258 + nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
1259 remove_nats_in_journal(sbi);
1260
1261 while ((found = __gang_lookup_nat_set(nm_i,
1262 @@ -3023,7 +3031,6 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
1263 F2FS_RESERVED_NODE_NUM;
1264 nm_i->nid_cnt[FREE_NID] = 0;
1265 nm_i->nid_cnt[PREALLOC_NID] = 0;
1266 - nm_i->nat_cnt = 0;
1267 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
1268 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
1269 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
1270 @@ -3160,7 +3167,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
1271 __del_from_nat_cache(nm_i, natvec[idx]);
1272 }
1273 }
1274 - f2fs_bug_on(sbi, nm_i->nat_cnt);
1275 + f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
1276
1277 /* destroy nat set cache */
1278 nid = 0;
1279 diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
1280 index e05af5df56485..4a2e7eaf2b028 100644
1281 --- a/fs/f2fs/node.h
1282 +++ b/fs/f2fs/node.h
1283 @@ -123,13 +123,13 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
1284
1285 static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
1286 {
1287 - return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
1288 + return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
1289 NM_I(sbi)->dirty_nats_ratio / 100;
1290 }
1291
1292 static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
1293 {
1294 - return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
1295 + return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
1296 }
1297
1298 static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
1299 diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
1300 index a467aca29cfef..3ceebaaee3840 100644
1301 --- a/fs/f2fs/shrinker.c
1302 +++ b/fs/f2fs/shrinker.c
1303 @@ -18,9 +18,7 @@ static unsigned int shrinker_run_no;
1304
1305 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
1306 {
1307 - long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
1308 -
1309 - return count > 0 ? count : 0;
1310 + return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
1311 }
1312
1313 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
1314 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
1315 index fa461db696e79..a9a083232bcfc 100644
1316 --- a/fs/f2fs/super.c
1317 +++ b/fs/f2fs/super.c
1318 @@ -2523,7 +2523,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1319 block_t total_sections, blocks_per_seg;
1320 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1321 (bh->b_data + F2FS_SUPER_OFFSET);
1322 - unsigned int blocksize;
1323 size_t crc_offset = 0;
1324 __u32 crc = 0;
1325
1326 @@ -2557,10 +2556,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
1327 }
1328
1329 /* Currently, support only 4KB block size */
1330 - blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1331 - if (blocksize != F2FS_BLKSIZE) {
1332 - f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
1333 - blocksize);
1334 + if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
1335 + f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
1336 + le32_to_cpu(raw_super->log_blocksize),
1337 + F2FS_BLKSIZE_BITS);
1338 return -EFSCORRUPTED;
1339 }
1340
1341 diff --git a/fs/fcntl.c b/fs/fcntl.c
1342 index 3d40771e8e7cf..3dc90e5293e65 100644
1343 --- a/fs/fcntl.c
1344 +++ b/fs/fcntl.c
1345 @@ -779,9 +779,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
1346 {
1347 struct task_struct *p;
1348 enum pid_type type;
1349 + unsigned long flags;
1350 struct pid *pid;
1351
1352 - read_lock(&fown->lock);
1353 + read_lock_irqsave(&fown->lock, flags);
1354
1355 type = fown->pid_type;
1356 pid = fown->pid;
1357 @@ -802,7 +803,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
1358 read_unlock(&tasklist_lock);
1359 }
1360 out_unlock_fown:
1361 - read_unlock(&fown->lock);
1362 + read_unlock_irqrestore(&fown->lock, flags);
1363 }
1364
1365 static void send_sigurg_to_task(struct task_struct *p,
1366 @@ -817,9 +818,10 @@ int send_sigurg(struct fown_struct *fown)
1367 struct task_struct *p;
1368 enum pid_type type;
1369 struct pid *pid;
1370 + unsigned long flags;
1371 int ret = 0;
1372
1373 - read_lock(&fown->lock);
1374 + read_lock_irqsave(&fown->lock, flags);
1375
1376 type = fown->pid_type;
1377 pid = fown->pid;
1378 @@ -842,7 +844,7 @@ int send_sigurg(struct fown_struct *fown)
1379 read_unlock(&tasklist_lock);
1380 }
1381 out_unlock_fown:
1382 - read_unlock(&fown->lock);
1383 + read_unlock_irqrestore(&fown->lock, flags);
1384 return ret;
1385 }
1386
1387 diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
1388 index 778275f48a879..5a7091746f68b 100644
1389 --- a/fs/jffs2/jffs2_fs_sb.h
1390 +++ b/fs/jffs2/jffs2_fs_sb.h
1391 @@ -38,6 +38,7 @@ struct jffs2_mount_opts {
1392 * users. This is implemented simply by means of not allowing the
1393 * latter users to write to the file system if the amount if the
1394 * available space is less then 'rp_size'. */
1395 + bool set_rp_size;
1396 unsigned int rp_size;
1397 };
1398
1399 diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
1400 index 60636b2e35ea4..6839a61e8ff1e 100644
1401 --- a/fs/jffs2/super.c
1402 +++ b/fs/jffs2/super.c
1403 @@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
1404
1405 if (opts->override_compr)
1406 seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
1407 - if (opts->rp_size)
1408 + if (opts->set_rp_size)
1409 seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
1410
1411 return 0;
1412 @@ -208,11 +208,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
1413 case Opt_rp_size:
1414 if (result.uint_32 > UINT_MAX / 1024)
1415 return invalf(fc, "jffs2: rp_size unrepresentable");
1416 - opt = result.uint_32 * 1024;
1417 - if (opt > c->mtd->size)
1418 - return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
1419 - c->mtd->size / 1024);
1420 - c->mount_opts.rp_size = opt;
1421 + c->mount_opts.rp_size = result.uint_32 * 1024;
1422 + c->mount_opts.set_rp_size = true;
1423 break;
1424 default:
1425 return -EINVAL;
1426 @@ -231,8 +228,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc)
1427 c->mount_opts.override_compr = new_c->mount_opts.override_compr;
1428 c->mount_opts.compr = new_c->mount_opts.compr;
1429 }
1430 - if (new_c->mount_opts.rp_size)
1431 + if (new_c->mount_opts.set_rp_size) {
1432 + c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
1433 c->mount_opts.rp_size = new_c->mount_opts.rp_size;
1434 + }
1435 mutex_unlock(&c->alloc_sem);
1436 }
1437
1438 @@ -272,6 +271,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
1439 c->mtd = sb->s_mtd;
1440 c->os_priv = sb;
1441
1442 + if (c->mount_opts.rp_size > c->mtd->size)
1443 + return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
1444 + c->mtd->size / 1024);
1445 +
1446 /* Initialize JFFS2 superblock locks, the further initialization will
1447 * be done later */
1448 mutex_init(&c->alloc_sem);
1449 diff --git a/fs/namespace.c b/fs/namespace.c
1450 index 2adfe7b166a3e..76ea92994d26d 100644
1451 --- a/fs/namespace.c
1452 +++ b/fs/namespace.c
1453 @@ -156,10 +156,10 @@ static inline void mnt_add_count(struct mount *mnt, int n)
1454 /*
1455 * vfsmount lock must be held for write
1456 */
1457 -unsigned int mnt_get_count(struct mount *mnt)
1458 +int mnt_get_count(struct mount *mnt)
1459 {
1460 #ifdef CONFIG_SMP
1461 - unsigned int count = 0;
1462 + int count = 0;
1463 int cpu;
1464
1465 for_each_possible_cpu(cpu) {
1466 @@ -1123,6 +1123,7 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1467 static void mntput_no_expire(struct mount *mnt)
1468 {
1469 LIST_HEAD(list);
1470 + int count;
1471
1472 rcu_read_lock();
1473 if (likely(READ_ONCE(mnt->mnt_ns))) {
1474 @@ -1146,7 +1147,9 @@ static void mntput_no_expire(struct mount *mnt)
1475 */
1476 smp_mb();
1477 mnt_add_count(mnt, -1);
1478 - if (mnt_get_count(mnt)) {
1479 + count = mnt_get_count(mnt);
1480 + if (count != 0) {
1481 + WARN_ON(count < 0);
1482 rcu_read_unlock();
1483 unlock_mount_hash();
1484 return;
1485 diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
1486 index 04c57066a11af..b90642b022eb9 100644
1487 --- a/fs/nfs/nfs4super.c
1488 +++ b/fs/nfs/nfs4super.c
1489 @@ -96,7 +96,7 @@ static void nfs4_evict_inode(struct inode *inode)
1490 nfs_inode_return_delegation_noreclaim(inode);
1491 /* Note that above delegreturn would trigger pnfs return-on-close */
1492 pnfs_return_layout(inode);
1493 - pnfs_destroy_layout(NFS_I(inode));
1494 + pnfs_destroy_layout_final(NFS_I(inode));
1495 /* First call standard NFS clear_inode() code */
1496 nfs_clear_inode(inode);
1497 }
1498 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
1499 index 9c2b07ce57b27..9fd115c4d0a2f 100644
1500 --- a/fs/nfs/pnfs.c
1501 +++ b/fs/nfs/pnfs.c
1502 @@ -294,6 +294,7 @@ void
1503 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
1504 {
1505 struct inode *inode;
1506 + unsigned long i_state;
1507
1508 if (!lo)
1509 return;
1510 @@ -304,8 +305,12 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
1511 if (!list_empty(&lo->plh_segs))
1512 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
1513 pnfs_detach_layout_hdr(lo);
1514 + i_state = inode->i_state;
1515 spin_unlock(&inode->i_lock);
1516 pnfs_free_layout_hdr(lo);
1517 + /* Notify pnfs_destroy_layout_final() that we're done */
1518 + if (i_state & (I_FREEING | I_CLEAR))
1519 + wake_up_var(lo);
1520 }
1521 }
1522
1523 @@ -723,8 +728,7 @@ pnfs_free_lseg_list(struct list_head *free_me)
1524 }
1525 }
1526
1527 -void
1528 -pnfs_destroy_layout(struct nfs_inode *nfsi)
1529 +static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
1530 {
1531 struct pnfs_layout_hdr *lo;
1532 LIST_HEAD(tmp_list);
1533 @@ -742,9 +746,34 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
1534 pnfs_put_layout_hdr(lo);
1535 } else
1536 spin_unlock(&nfsi->vfs_inode.i_lock);
1537 + return lo;
1538 +}
1539 +
1540 +void pnfs_destroy_layout(struct nfs_inode *nfsi)
1541 +{
1542 + __pnfs_destroy_layout(nfsi);
1543 }
1544 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
1545
1546 +static bool pnfs_layout_removed(struct nfs_inode *nfsi,
1547 + struct pnfs_layout_hdr *lo)
1548 +{
1549 + bool ret;
1550 +
1551 + spin_lock(&nfsi->vfs_inode.i_lock);
1552 + ret = nfsi->layout != lo;
1553 + spin_unlock(&nfsi->vfs_inode.i_lock);
1554 + return ret;
1555 +}
1556 +
1557 +void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
1558 +{
1559 + struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
1560 +
1561 + if (lo)
1562 + wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
1563 +}
1564 +
1565 static bool
1566 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
1567 struct list_head *layout_list)
1568 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
1569 index f8a38065c7e47..63da33a92d831 100644
1570 --- a/fs/nfs/pnfs.h
1571 +++ b/fs/nfs/pnfs.h
1572 @@ -255,6 +255,7 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
1573 void pnfs_layoutget_free(struct nfs4_layoutget *lgp);
1574 void pnfs_free_lseg_list(struct list_head *tmp_list);
1575 void pnfs_destroy_layout(struct nfs_inode *);
1576 +void pnfs_destroy_layout_final(struct nfs_inode *);
1577 void pnfs_destroy_all_layouts(struct nfs_client *);
1578 int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
1579 struct nfs_fsid *fsid,
1580 @@ -651,6 +652,10 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
1581 {
1582 }
1583
1584 +static inline void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
1585 +{
1586 +}
1587 +
1588 static inline struct pnfs_layout_segment *
1589 pnfs_get_lseg(struct pnfs_layout_segment *lseg)
1590 {
1591 diff --git a/fs/pnode.h b/fs/pnode.h
1592 index 49a058c73e4c7..26f74e092bd98 100644
1593 --- a/fs/pnode.h
1594 +++ b/fs/pnode.h
1595 @@ -44,7 +44,7 @@ int propagate_mount_busy(struct mount *, int);
1596 void propagate_mount_unlock(struct mount *);
1597 void mnt_release_group_id(struct mount *);
1598 int get_dominating_id(struct mount *mnt, const struct path *root);
1599 -unsigned int mnt_get_count(struct mount *mnt);
1600 +int mnt_get_count(struct mount *mnt);
1601 void mnt_set_mountpoint(struct mount *, struct mountpoint *,
1602 struct mount *);
1603 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
1604 diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
1605 index a6f856f341dc7..c5562c871c8be 100644
1606 --- a/fs/quota/quota_tree.c
1607 +++ b/fs/quota/quota_tree.c
1608 @@ -62,7 +62,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
1609
1610 memset(buf, 0, info->dqi_usable_bs);
1611 return sb->s_op->quota_read(sb, info->dqi_type, buf,
1612 - info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
1613 + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
1614 }
1615
1616 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
1617 @@ -71,7 +71,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
1618 ssize_t ret;
1619
1620 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
1621 - info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
1622 + info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
1623 if (ret != info->dqi_usable_bs) {
1624 quota_error(sb, "dquota write failed");
1625 if (ret >= 0)
1626 @@ -284,7 +284,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
1627 blk);
1628 goto out_buf;
1629 }
1630 - dquot->dq_off = (blk << info->dqi_blocksize_bits) +
1631 + dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
1632 sizeof(struct qt_disk_dqdbheader) +
1633 i * info->dqi_entry_size;
1634 kfree(buf);
1635 @@ -559,7 +559,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
1636 ret = -EIO;
1637 goto out_buf;
1638 } else {
1639 - ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
1640 + ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
1641 qt_disk_dqdbheader) + i * info->dqi_entry_size;
1642 }
1643 out_buf:
1644 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
1645 index bb4973aefbb18..9e64e23014e8e 100644
1646 --- a/fs/reiserfs/stree.c
1647 +++ b/fs/reiserfs/stree.c
1648 @@ -454,6 +454,12 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
1649 "(second one): %h", ih);
1650 return 0;
1651 }
1652 + if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
1653 + reiserfs_warning(NULL, "reiserfs-5093",
1654 + "item entry count seems wrong %h",
1655 + ih);
1656 + return 0;
1657 + }
1658 prev_location = ih_location(ih);
1659 }
1660
1661 diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
1662 index 6c0e19f7a21f4..a5e5e9b9d4e31 100644
1663 --- a/fs/ubifs/dir.c
1664 +++ b/fs/ubifs/dir.c
1665 @@ -278,6 +278,15 @@ done:
1666 return d_splice_alias(inode, dentry);
1667 }
1668
1669 +static int ubifs_prepare_create(struct inode *dir, struct dentry *dentry,
1670 + struct fscrypt_name *nm)
1671 +{
1672 + if (fscrypt_is_nokey_name(dentry))
1673 + return -ENOKEY;
1674 +
1675 + return fscrypt_setup_filename(dir, &dentry->d_name, 0, nm);
1676 +}
1677 +
1678 static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1679 bool excl)
1680 {
1681 @@ -301,7 +310,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1682 if (err)
1683 return err;
1684
1685 - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1686 + err = ubifs_prepare_create(dir, dentry, &nm);
1687 if (err)
1688 goto out_budg;
1689
1690 @@ -961,7 +970,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1691 if (err)
1692 return err;
1693
1694 - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1695 + err = ubifs_prepare_create(dir, dentry, &nm);
1696 if (err)
1697 goto out_budg;
1698
1699 @@ -1046,7 +1055,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1700 return err;
1701 }
1702
1703 - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1704 + err = ubifs_prepare_create(dir, dentry, &nm);
1705 if (err) {
1706 kfree(dev);
1707 goto out_budg;
1708 @@ -1130,7 +1139,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1709 if (err)
1710 return err;
1711
1712 - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1713 + err = ubifs_prepare_create(dir, dentry, &nm);
1714 if (err)
1715 goto out_budg;
1716
1717 diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
1718 index f622f7460ed8c..032e5bcf97012 100644
1719 --- a/include/linux/fscrypt.h
1720 +++ b/include/linux/fscrypt.h
1721 @@ -100,6 +100,35 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
1722 dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
1723 }
1724
1725 +/**
1726 + * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
1727 + * @dentry: the dentry to check
1728 + *
1729 + * This returns true if the dentry is a no-key dentry. A no-key dentry is a
1730 + * dentry that was created in an encrypted directory that hasn't had its
1731 + * encryption key added yet. Such dentries may be either positive or negative.
1732 + *
1733 + * When a filesystem is asked to create a new filename in an encrypted directory
1734 + * and the new filename's dentry is a no-key dentry, it must fail the operation
1735 + * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
1736 + * ->rename(), and ->link(). (However, ->rename() and ->link() are already
1737 + * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
1738 + *
1739 + * This is necessary because creating a filename requires the directory's
1740 + * encryption key, but just checking for the key on the directory inode during
1741 + * the final filesystem operation doesn't guarantee that the key was available
1742 + * during the preceding dentry lookup. And the key must have already been
1743 + * available during the dentry lookup in order for it to have been checked
1744 + * whether the filename already exists in the directory and for the new file's
1745 + * dentry not to be invalidated due to it incorrectly having the no-key flag.
1746 + *
1747 + * Return: %true if the dentry is a no-key name
1748 + */
1749 +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
1750 +{
1751 + return dentry->d_flags & DCACHE_ENCRYPTED_NAME;
1752 +}
1753 +
1754 /* crypto.c */
1755 extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
1756 extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
1757 @@ -290,6 +319,11 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
1758 {
1759 }
1760
1761 +static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
1762 +{
1763 + return false;
1764 +}
1765 +
1766 /* crypto.c */
1767 static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
1768 {
1769 diff --git a/include/linux/of.h b/include/linux/of.h
1770 index 844f89e1b0391..a7621e2b440ad 100644
1771 --- a/include/linux/of.h
1772 +++ b/include/linux/of.h
1773 @@ -1282,6 +1282,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
1774 #define _OF_DECLARE(table, name, compat, fn, fn_type) \
1775 static const struct of_device_id __of_table_##name \
1776 __used __section(__##table##_of_table) \
1777 + __aligned(__alignof__(struct of_device_id)) \
1778 = { .compatible = compat, \
1779 .data = (fn == (fn_type)NULL) ? fn : fn }
1780 #else
1781 diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
1782 index 5ed721ad5b198..af2a44c08683d 100644
1783 --- a/include/uapi/linux/const.h
1784 +++ b/include/uapi/linux/const.h
1785 @@ -28,4 +28,9 @@
1786 #define _BITUL(x) (_UL(1) << (x))
1787 #define _BITULL(x) (_ULL(1) << (x))
1788
1789 +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
1790 +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
1791 +
1792 +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
1793 +
1794 #endif /* _UAPI_LINUX_CONST_H */
1795 diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
1796 index 8938b76c4ee3f..7857aa4136276 100644
1797 --- a/include/uapi/linux/ethtool.h
1798 +++ b/include/uapi/linux/ethtool.h
1799 @@ -14,7 +14,7 @@
1800 #ifndef _UAPI_LINUX_ETHTOOL_H
1801 #define _UAPI_LINUX_ETHTOOL_H
1802
1803 -#include <linux/kernel.h>
1804 +#include <linux/const.h>
1805 #include <linux/types.h>
1806 #include <linux/if_ether.h>
1807
1808 diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
1809 index 39ccfe9311c38..b14f436f4ebd3 100644
1810 --- a/include/uapi/linux/fscrypt.h
1811 +++ b/include/uapi/linux/fscrypt.h
1812 @@ -17,7 +17,6 @@
1813 #define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
1814 #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
1815 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
1816 -#define FSCRYPT_POLICY_FLAGS_VALID 0x07
1817
1818 /* Encryption algorithms */
1819 #define FSCRYPT_MODE_AES_256_XTS 1
1820 @@ -25,7 +24,7 @@
1821 #define FSCRYPT_MODE_AES_128_CBC 5
1822 #define FSCRYPT_MODE_AES_128_CTS 6
1823 #define FSCRYPT_MODE_ADIANTUM 9
1824 -#define __FSCRYPT_MODE_MAX 9
1825 +/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */
1826
1827 /*
1828 * Legacy policy version; ad-hoc KDF and no key verification.
1829 @@ -162,7 +161,7 @@ struct fscrypt_get_key_status_arg {
1830 #define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
1831 #define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
1832 #define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
1833 -#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
1834 +#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */
1835 #define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
1836 #define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
1837 #define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */
1838 diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h
1839 index 0ff8f7477847c..fadf2db71fe8a 100644
1840 --- a/include/uapi/linux/kernel.h
1841 +++ b/include/uapi/linux/kernel.h
1842 @@ -3,13 +3,6 @@
1843 #define _UAPI_LINUX_KERNEL_H
1844
1845 #include <linux/sysinfo.h>
1846 -
1847 -/*
1848 - * 'kernel.h' contains some often-used function prototypes etc
1849 - */
1850 -#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
1851 -#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
1852 -
1853 -#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
1854 +#include <linux/const.h>
1855
1856 #endif /* _UAPI_LINUX_KERNEL_H */
1857 diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
1858 index f9a1be7fc6962..ead2e72e5c88e 100644
1859 --- a/include/uapi/linux/lightnvm.h
1860 +++ b/include/uapi/linux/lightnvm.h
1861 @@ -21,7 +21,7 @@
1862 #define _UAPI_LINUX_LIGHTNVM_H
1863
1864 #ifdef __KERNEL__
1865 -#include <linux/kernel.h>
1866 +#include <linux/const.h>
1867 #include <linux/ioctl.h>
1868 #else /* __KERNEL__ */
1869 #include <stdio.h>
1870 diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
1871 index c36177a86516e..a1fd6173e2dbe 100644
1872 --- a/include/uapi/linux/mroute6.h
1873 +++ b/include/uapi/linux/mroute6.h
1874 @@ -2,7 +2,7 @@
1875 #ifndef _UAPI__LINUX_MROUTE6_H
1876 #define _UAPI__LINUX_MROUTE6_H
1877
1878 -#include <linux/kernel.h>
1879 +#include <linux/const.h>
1880 #include <linux/types.h>
1881 #include <linux/sockios.h>
1882 #include <linux/in6.h> /* For struct sockaddr_in6. */
1883 diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h
1884 index a8283f7dbc519..b8c6bb233ac1c 100644
1885 --- a/include/uapi/linux/netfilter/x_tables.h
1886 +++ b/include/uapi/linux/netfilter/x_tables.h
1887 @@ -1,7 +1,7 @@
1888 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1889 #ifndef _UAPI_X_TABLES_H
1890 #define _UAPI_X_TABLES_H
1891 -#include <linux/kernel.h>
1892 +#include <linux/const.h>
1893 #include <linux/types.h>
1894
1895 #define XT_FUNCTION_MAXNAMELEN 30
1896 diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
1897 index 0a4d73317759c..622c78c821aa4 100644
1898 --- a/include/uapi/linux/netlink.h
1899 +++ b/include/uapi/linux/netlink.h
1900 @@ -2,7 +2,7 @@
1901 #ifndef _UAPI__LINUX_NETLINK_H
1902 #define _UAPI__LINUX_NETLINK_H
1903
1904 -#include <linux/kernel.h>
1905 +#include <linux/const.h>
1906 #include <linux/socket.h> /* for __kernel_sa_family_t */
1907 #include <linux/types.h>
1908
1909 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
1910 index 87aa2a6d91256..cc453ed0e65e8 100644
1911 --- a/include/uapi/linux/sysctl.h
1912 +++ b/include/uapi/linux/sysctl.h
1913 @@ -23,7 +23,7 @@
1914 #ifndef _UAPI_LINUX_SYSCTL_H
1915 #define _UAPI_LINUX_SYSCTL_H
1916
1917 -#include <linux/kernel.h>
1918 +#include <linux/const.h>
1919 #include <linux/types.h>
1920 #include <linux/compiler.h>
1921
1922 diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
1923 index f684c82efc2ea..79682c23407c9 100644
1924 --- a/kernel/cgroup/cgroup-v1.c
1925 +++ b/kernel/cgroup/cgroup-v1.c
1926 @@ -914,6 +914,8 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
1927 opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
1928 if (opt == -ENOPARAM) {
1929 if (strcmp(param->key, "source") == 0) {
1930 + if (fc->source)
1931 + return invalf(fc, "Multiple sources not supported");
1932 fc->source = param->string;
1933 param->string = NULL;
1934 return 0;
1935 diff --git a/kernel/module.c b/kernel/module.c
1936 index 45513909b01d5..9e9af40698ffe 100644
1937 --- a/kernel/module.c
1938 +++ b/kernel/module.c
1939 @@ -1863,7 +1863,6 @@ static int mod_sysfs_init(struct module *mod)
1940 if (err)
1941 mod_kobject_put(mod);
1942
1943 - /* delay uevent until full sysfs population */
1944 out:
1945 return err;
1946 }
1947 @@ -1900,7 +1899,6 @@ static int mod_sysfs_setup(struct module *mod,
1948 add_sect_attrs(mod, info);
1949 add_notes_attrs(mod, info);
1950
1951 - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1952 return 0;
1953
1954 out_unreg_modinfo_attrs:
1955 @@ -3608,6 +3606,9 @@ static noinline int do_init_module(struct module *mod)
1956 blocking_notifier_call_chain(&module_notify_list,
1957 MODULE_STATE_LIVE, mod);
1958
1959 + /* Delay uevent until module has finished its init routine */
1960 + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1961 +
1962 /*
1963 * We need to finish all async code before the module init sequence
1964 * is done. This has potential to deadlock. For example, a newly
1965 @@ -3953,6 +3954,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
1966 MODULE_STATE_GOING, mod);
1967 klp_module_going(mod);
1968 bug_cleanup:
1969 + mod->state = MODULE_STATE_GOING;
1970 /* module_bug_cleanup needs module_mutex protection */
1971 mutex_lock(&module_mutex);
1972 module_bug_cleanup(mod);
1973 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1974 index 5c9fcc72460df..4419486d7413c 100644
1975 --- a/kernel/time/tick-sched.c
1976 +++ b/kernel/time/tick-sched.c
1977 @@ -916,13 +916,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
1978 */
1979 if (tick_do_timer_cpu == cpu)
1980 return false;
1981 - /*
1982 - * Boot safety: make sure the timekeeping duty has been
1983 - * assigned before entering dyntick-idle mode,
1984 - * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
1985 - */
1986 - if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
1987 - return false;
1988
1989 /* Should not happen for nohz-full */
1990 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
1991 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
1992 index 2b797a71e9bda..f2b1305e79d2f 100644
1993 --- a/net/sched/sch_taprio.c
1994 +++ b/net/sched/sch_taprio.c
1995 @@ -1597,6 +1597,21 @@ free_sched:
1996 return err;
1997 }
1998
1999 +static void taprio_reset(struct Qdisc *sch)
2000 +{
2001 + struct taprio_sched *q = qdisc_priv(sch);
2002 + struct net_device *dev = qdisc_dev(sch);
2003 + int i;
2004 +
2005 + hrtimer_cancel(&q->advance_timer);
2006 + if (q->qdiscs) {
2007 + for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
2008 + qdisc_reset(q->qdiscs[i]);
2009 + }
2010 + sch->qstats.backlog = 0;
2011 + sch->q.qlen = 0;
2012 +}
2013 +
2014 static void taprio_destroy(struct Qdisc *sch)
2015 {
2016 struct taprio_sched *q = qdisc_priv(sch);
2017 @@ -1607,7 +1622,6 @@ static void taprio_destroy(struct Qdisc *sch)
2018 list_del(&q->taprio_list);
2019 spin_unlock(&taprio_list_lock);
2020
2021 - hrtimer_cancel(&q->advance_timer);
2022
2023 taprio_disable_offload(dev, q, NULL);
2024
2025 @@ -1954,6 +1968,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2026 .init = taprio_init,
2027 .change = taprio_change,
2028 .destroy = taprio_destroy,
2029 + .reset = taprio_reset,
2030 .peek = taprio_peek,
2031 .dequeue = taprio_dequeue,
2032 .enqueue = taprio_enqueue,
2033 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
2034 index ec501fbaabe49..0c5b7a54ca81c 100644
2035 --- a/sound/core/pcm_native.c
2036 +++ b/sound/core/pcm_native.c
2037 @@ -717,8 +717,13 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
2038 runtime->boundary *= 2;
2039
2040 /* clear the buffer for avoiding possible kernel info leaks */
2041 - if (runtime->dma_area && !substream->ops->copy_user)
2042 - memset(runtime->dma_area, 0, runtime->dma_bytes);
2043 + if (runtime->dma_area && !substream->ops->copy_user) {
2044 + size_t size = runtime->dma_bytes;
2045 +
2046 + if (runtime->info & SNDRV_PCM_INFO_MMAP)
2047 + size = PAGE_ALIGN(size);
2048 + memset(runtime->dma_area, 0, size);
2049 + }
2050
2051 snd_pcm_timer_resolution_change(substream);
2052 snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
2053 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
2054 index 94db4683cfaff..6a3543b8455fc 100644
2055 --- a/sound/core/rawmidi.c
2056 +++ b/sound/core/rawmidi.c
2057 @@ -72,11 +72,21 @@ static inline unsigned short snd_rawmidi_file_flags(struct file *file)
2058 }
2059 }
2060
2061 -static inline int snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
2062 +static inline bool __snd_rawmidi_ready(struct snd_rawmidi_runtime *runtime)
2063 +{
2064 + return runtime->avail >= runtime->avail_min;
2065 +}
2066 +
2067 +static bool snd_rawmidi_ready(struct snd_rawmidi_substream *substream)
2068 {
2069 struct snd_rawmidi_runtime *runtime = substream->runtime;
2070 + unsigned long flags;
2071 + bool ready;
2072
2073 - return runtime->avail >= runtime->avail_min;
2074 + spin_lock_irqsave(&runtime->lock, flags);
2075 + ready = __snd_rawmidi_ready(runtime);
2076 + spin_unlock_irqrestore(&runtime->lock, flags);
2077 + return ready;
2078 }
2079
2080 static inline int snd_rawmidi_ready_append(struct snd_rawmidi_substream *substream,
2081 @@ -945,7 +955,7 @@ int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
2082 if (result > 0) {
2083 if (runtime->event)
2084 schedule_work(&runtime->event_work);
2085 - else if (snd_rawmidi_ready(substream))
2086 + else if (__snd_rawmidi_ready(runtime))
2087 wake_up(&runtime->sleep);
2088 }
2089 spin_unlock_irqrestore(&runtime->lock, flags);
2090 @@ -1024,7 +1034,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
2091 result = 0;
2092 while (count > 0) {
2093 spin_lock_irq(&runtime->lock);
2094 - while (!snd_rawmidi_ready(substream)) {
2095 + while (!__snd_rawmidi_ready(runtime)) {
2096 wait_queue_entry_t wait;
2097
2098 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
2099 @@ -1041,9 +1051,11 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
2100 return -ENODEV;
2101 if (signal_pending(current))
2102 return result > 0 ? result : -ERESTARTSYS;
2103 - if (!runtime->avail)
2104 - return result > 0 ? result : -EIO;
2105 spin_lock_irq(&runtime->lock);
2106 + if (!runtime->avail) {
2107 + spin_unlock_irq(&runtime->lock);
2108 + return result > 0 ? result : -EIO;
2109 + }
2110 }
2111 spin_unlock_irq(&runtime->lock);
2112 count1 = snd_rawmidi_kernel_read1(substream,
2113 @@ -1181,7 +1193,7 @@ int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int coun
2114 runtime->avail += count;
2115 substream->bytes += count;
2116 if (count > 0) {
2117 - if (runtime->drain || snd_rawmidi_ready(substream))
2118 + if (runtime->drain || __snd_rawmidi_ready(runtime))
2119 wake_up(&runtime->sleep);
2120 }
2121 return count;
2122 @@ -1370,9 +1382,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
2123 return -ENODEV;
2124 if (signal_pending(current))
2125 return result > 0 ? result : -ERESTARTSYS;
2126 - if (!runtime->avail && !timeout)
2127 - return result > 0 ? result : -EIO;
2128 spin_lock_irq(&runtime->lock);
2129 + if (!runtime->avail && !timeout) {
2130 + spin_unlock_irq(&runtime->lock);
2131 + return result > 0 ? result : -EIO;
2132 + }
2133 }
2134 spin_unlock_irq(&runtime->lock);
2135 count1 = snd_rawmidi_kernel_write1(substream, buf, NULL, count);
2136 @@ -1452,6 +1466,7 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
2137 struct snd_rawmidi *rmidi;
2138 struct snd_rawmidi_substream *substream;
2139 struct snd_rawmidi_runtime *runtime;
2140 + unsigned long buffer_size, avail, xruns;
2141
2142 rmidi = entry->private_data;
2143 snd_iprintf(buffer, "%s\n\n", rmidi->name);
2144 @@ -1470,13 +1485,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
2145 " Owner PID : %d\n",
2146 pid_vnr(substream->pid));
2147 runtime = substream->runtime;
2148 + spin_lock_irq(&runtime->lock);
2149 + buffer_size = runtime->buffer_size;
2150 + avail = runtime->avail;
2151 + spin_unlock_irq(&runtime->lock);
2152 snd_iprintf(buffer,
2153 " Mode : %s\n"
2154 " Buffer size : %lu\n"
2155 " Avail : %lu\n",
2156 runtime->oss ? "OSS compatible" : "native",
2157 - (unsigned long) runtime->buffer_size,
2158 - (unsigned long) runtime->avail);
2159 + buffer_size, avail);
2160 }
2161 }
2162 }
2163 @@ -1494,13 +1512,16 @@ static void snd_rawmidi_proc_info_read(struct snd_info_entry *entry,
2164 " Owner PID : %d\n",
2165 pid_vnr(substream->pid));
2166 runtime = substream->runtime;
2167 + spin_lock_irq(&runtime->lock);
2168 + buffer_size = runtime->buffer_size;
2169 + avail = runtime->avail;
2170 + xruns = runtime->xruns;
2171 + spin_unlock_irq(&runtime->lock);
2172 snd_iprintf(buffer,
2173 " Buffer size : %lu\n"
2174 " Avail : %lu\n"
2175 " Overruns : %lu\n",
2176 - (unsigned long) runtime->buffer_size,
2177 - (unsigned long) runtime->avail,
2178 - (unsigned long) runtime->xruns);
2179 + buffer_size, avail, xruns);
2180 }
2181 }
2182 }
2183 diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
2184 index 9254c8dbe5e37..25d2d6b610079 100644
2185 --- a/sound/core/seq/seq_queue.h
2186 +++ b/sound/core/seq/seq_queue.h
2187 @@ -26,10 +26,10 @@ struct snd_seq_queue {
2188
2189 struct snd_seq_timer *timer; /* time keeper for this queue */
2190 int owner; /* client that 'owns' the timer */
2191 - unsigned int locked:1, /* timer is only accesibble by owner if set */
2192 - klocked:1, /* kernel lock (after START) */
2193 - check_again:1,
2194 - check_blocked:1;
2195 + bool locked; /* timer is only accesibble by owner if set */
2196 + bool klocked; /* kernel lock (after START) */
2197 + bool check_again; /* concurrent access happened during check */
2198 + bool check_blocked; /* queue being checked */
2199
2200 unsigned int flags; /* status flags */
2201 unsigned int info_flags; /* info for sync */
2202 diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
2203 index 5ed721ad5b198..af2a44c08683d 100644
2204 --- a/tools/include/uapi/linux/const.h
2205 +++ b/tools/include/uapi/linux/const.h
2206 @@ -28,4 +28,9 @@
2207 #define _BITUL(x) (_UL(1) << (x))
2208 #define _BITULL(x) (_ULL(1) << (x))
2209
2210 +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
2211 +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
2212 +
2213 +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
2214 +
2215 #endif /* _UAPI_LINUX_CONST_H */