Annotation of /trunk/kernel-alx/patches-4.14/0154-4.14.55-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(hide annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 73528 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 73528 byte(s)
-added up to patches-4.14.79
1 | niro | 3238 | diff --git a/Makefile b/Makefile |
2 | index de0955d8dfa3..0700feaaa6cf 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,7 +1,7 @@ | ||
6 | # SPDX-License-Identifier: GPL-2.0 | ||
7 | VERSION = 4 | ||
8 | PATCHLEVEL = 14 | ||
9 | -SUBLEVEL = 54 | ||
10 | +SUBLEVEL = 55 | ||
11 | EXTRAVERSION = | ||
12 | NAME = Petit Gorille | ||
13 | |||
14 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S | ||
15 | index be20b1f73384..e928c2af6a10 100644 | ||
16 | --- a/arch/s390/kernel/entry.S | ||
17 | +++ b/arch/s390/kernel/entry.S | ||
18 | @@ -1244,7 +1244,7 @@ cleanup_critical: | ||
19 | jl 0f | ||
20 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end | ||
21 | jl .Lcleanup_load_fpu_regs | ||
22 | -0: BR_EX %r14 | ||
23 | +0: BR_EX %r14,%r11 | ||
24 | |||
25 | .align 8 | ||
26 | .Lcleanup_table: | ||
27 | @@ -1280,7 +1280,7 @@ cleanup_critical: | ||
28 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE | ||
29 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
30 | larl %r9,sie_exit # skip forward to sie_exit | ||
31 | - BR_EX %r14 | ||
32 | + BR_EX %r14,%r11 | ||
33 | #endif | ||
34 | |||
35 | .Lcleanup_system_call: | ||
36 | diff --git a/block/blk-lib.c b/block/blk-lib.c | ||
37 | index 63fb971d6574..2bc544ce3d2e 100644 | ||
38 | --- a/block/blk-lib.c | ||
39 | +++ b/block/blk-lib.c | ||
40 | @@ -275,6 +275,40 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | ||
41 | return min(pages, (sector_t)BIO_MAX_PAGES); | ||
42 | } | ||
43 | |||
44 | +static int __blkdev_issue_zero_pages(struct block_device *bdev, | ||
45 | + sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | ||
46 | + struct bio **biop) | ||
47 | +{ | ||
48 | + struct request_queue *q = bdev_get_queue(bdev); | ||
49 | + struct bio *bio = *biop; | ||
50 | + int bi_size = 0; | ||
51 | + unsigned int sz; | ||
52 | + | ||
53 | + if (!q) | ||
54 | + return -ENXIO; | ||
55 | + | ||
56 | + while (nr_sects != 0) { | ||
57 | + bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), | ||
58 | + gfp_mask); | ||
59 | + bio->bi_iter.bi_sector = sector; | ||
60 | + bio_set_dev(bio, bdev); | ||
61 | + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
62 | + | ||
63 | + while (nr_sects != 0) { | ||
64 | + sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | ||
65 | + bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | ||
66 | + nr_sects -= bi_size >> 9; | ||
67 | + sector += bi_size >> 9; | ||
68 | + if (bi_size < sz) | ||
69 | + break; | ||
70 | + } | ||
71 | + cond_resched(); | ||
72 | + } | ||
73 | + | ||
74 | + *biop = bio; | ||
75 | + return 0; | ||
76 | +} | ||
77 | + | ||
78 | /** | ||
79 | * __blkdev_issue_zeroout - generate number of zero filed write bios | ||
80 | * @bdev: blockdev to issue | ||
81 | @@ -288,12 +322,6 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | ||
82 | * Zero-fill a block range, either using hardware offload or by explicitly | ||
83 | * writing zeroes to the device. | ||
84 | * | ||
85 | - * Note that this function may fail with -EOPNOTSUPP if the driver signals | ||
86 | - * zeroing offload support, but the device fails to process the command (for | ||
87 | - * some devices there is no non-destructive way to verify whether this | ||
88 | - * operation is actually supported). In this case the caller should call | ||
89 | - * retry the call to blkdev_issue_zeroout() and the fallback path will be used. | ||
90 | - * | ||
91 | * If a device is using logical block provisioning, the underlying space will | ||
92 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | ||
93 | * | ||
94 | @@ -305,9 +333,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | ||
95 | unsigned flags) | ||
96 | { | ||
97 | int ret; | ||
98 | - int bi_size = 0; | ||
99 | - struct bio *bio = *biop; | ||
100 | - unsigned int sz; | ||
101 | sector_t bs_mask; | ||
102 | |||
103 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | ||
104 | @@ -317,30 +342,10 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | ||
105 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, | ||
106 | biop, flags); | ||
107 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) | ||
108 | - goto out; | ||
109 | - | ||
110 | - ret = 0; | ||
111 | - while (nr_sects != 0) { | ||
112 | - bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), | ||
113 | - gfp_mask); | ||
114 | - bio->bi_iter.bi_sector = sector; | ||
115 | - bio_set_dev(bio, bdev); | ||
116 | - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
117 | - | ||
118 | - while (nr_sects != 0) { | ||
119 | - sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | ||
120 | - bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | ||
121 | - nr_sects -= bi_size >> 9; | ||
122 | - sector += bi_size >> 9; | ||
123 | - if (bi_size < sz) | ||
124 | - break; | ||
125 | - } | ||
126 | - cond_resched(); | ||
127 | - } | ||
128 | + return ret; | ||
129 | |||
130 | - *biop = bio; | ||
131 | -out: | ||
132 | - return ret; | ||
133 | + return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, | ||
134 | + biop); | ||
135 | } | ||
136 | EXPORT_SYMBOL(__blkdev_issue_zeroout); | ||
137 | |||
138 | @@ -360,18 +365,49 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout); | ||
139 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | ||
140 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) | ||
141 | { | ||
142 | - int ret; | ||
143 | - struct bio *bio = NULL; | ||
144 | + int ret = 0; | ||
145 | + sector_t bs_mask; | ||
146 | + struct bio *bio; | ||
147 | struct blk_plug plug; | ||
148 | + bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); | ||
149 | |||
150 | + bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | ||
151 | + if ((sector | nr_sects) & bs_mask) | ||
152 | + return -EINVAL; | ||
153 | + | ||
154 | +retry: | ||
155 | + bio = NULL; | ||
156 | blk_start_plug(&plug); | ||
157 | - ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, | ||
158 | - &bio, flags); | ||
159 | + if (try_write_zeroes) { | ||
160 | + ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | ||
161 | + gfp_mask, &bio, flags); | ||
162 | + } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | ||
163 | + ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | ||
164 | + gfp_mask, &bio); | ||
165 | + } else { | ||
166 | + /* No zeroing offload support */ | ||
167 | + ret = -EOPNOTSUPP; | ||
168 | + } | ||
169 | if (ret == 0 && bio) { | ||
170 | ret = submit_bio_wait(bio); | ||
171 | bio_put(bio); | ||
172 | } | ||
173 | blk_finish_plug(&plug); | ||
174 | + if (ret && try_write_zeroes) { | ||
175 | + if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | ||
176 | + try_write_zeroes = false; | ||
177 | + goto retry; | ||
178 | + } | ||
179 | + if (!bdev_write_zeroes_sectors(bdev)) { | ||
180 | + /* | ||
181 | + * Zeroing offload support was indicated, but the | ||
182 | + * device reported ILLEGAL REQUEST (for some devices | ||
183 | + * there is no non-destructive way to verify whether | ||
184 | + * WRITE ZEROES is actually supported). | ||
185 | + */ | ||
186 | + ret = -EOPNOTSUPP; | ||
187 | + } | ||
188 | + } | ||
189 | |||
190 | return ret; | ||
191 | } | ||
192 | diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c | ||
193 | index 03471b3fce86..c2042f822b03 100644 | ||
194 | --- a/drivers/block/drbd/drbd_worker.c | ||
195 | +++ b/drivers/block/drbd/drbd_worker.c | ||
196 | @@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) | ||
197 | what = COMPLETED_OK; | ||
198 | } | ||
199 | |||
200 | - bio_put(req->private_bio); | ||
201 | req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); | ||
202 | + bio_put(bio); | ||
203 | |||
204 | /* not req_mod(), we need irqsave here! */ | ||
205 | spin_lock_irqsave(&device->resource->req_lock, flags); | ||
206 | diff --git a/drivers/dax/super.c b/drivers/dax/super.c | ||
207 | index c4cd034a3820..6c179c2a9ff9 100644 | ||
208 | --- a/drivers/dax/super.c | ||
209 | +++ b/drivers/dax/super.c | ||
210 | @@ -73,42 +73,50 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); | ||
211 | |||
212 | /** | ||
213 | * __bdev_dax_supported() - Check if the device supports dax for filesystem | ||
214 | - * @sb: The superblock of the device | ||
215 | + * @bdev: block device to check | ||
216 | * @blocksize: The block size of the device | ||
217 | * | ||
218 | * This is a library function for filesystems to check if the block device | ||
219 | * can be mounted with dax option. | ||
220 | * | ||
221 | - * Return: negative errno if unsupported, 0 if supported. | ||
222 | + * Return: true if supported, false if unsupported | ||
223 | */ | ||
224 | -int __bdev_dax_supported(struct super_block *sb, int blocksize) | ||
225 | +bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | ||
226 | { | ||
227 | - struct block_device *bdev = sb->s_bdev; | ||
228 | struct dax_device *dax_dev; | ||
229 | + struct request_queue *q; | ||
230 | pgoff_t pgoff; | ||
231 | int err, id; | ||
232 | void *kaddr; | ||
233 | pfn_t pfn; | ||
234 | long len; | ||
235 | + char buf[BDEVNAME_SIZE]; | ||
236 | |||
237 | if (blocksize != PAGE_SIZE) { | ||
238 | - pr_err("VFS (%s): error: unsupported blocksize for dax\n", | ||
239 | - sb->s_id); | ||
240 | - return -EINVAL; | ||
241 | + pr_debug("%s: error: unsupported blocksize for dax\n", | ||
242 | + bdevname(bdev, buf)); | ||
243 | + return false; | ||
244 | + } | ||
245 | + | ||
246 | + q = bdev_get_queue(bdev); | ||
247 | + if (!q || !blk_queue_dax(q)) { | ||
248 | + pr_debug("%s: error: request queue doesn't support dax\n", | ||
249 | + bdevname(bdev, buf)); | ||
250 | + return false; | ||
251 | } | ||
252 | |||
253 | err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); | ||
254 | if (err) { | ||
255 | - pr_err("VFS (%s): error: unaligned partition for dax\n", | ||
256 | - sb->s_id); | ||
257 | - return err; | ||
258 | + pr_debug("%s: error: unaligned partition for dax\n", | ||
259 | + bdevname(bdev, buf)); | ||
260 | + return false; | ||
261 | } | ||
262 | |||
263 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | ||
264 | if (!dax_dev) { | ||
265 | - pr_err("VFS (%s): error: device does not support dax\n", | ||
266 | - sb->s_id); | ||
267 | - return -EOPNOTSUPP; | ||
268 | + pr_debug("%s: error: device does not support dax\n", | ||
269 | + bdevname(bdev, buf)); | ||
270 | + return false; | ||
271 | } | ||
272 | |||
273 | id = dax_read_lock(); | ||
274 | @@ -118,12 +126,12 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) | ||
275 | put_dax(dax_dev); | ||
276 | |||
277 | if (len < 1) { | ||
278 | - pr_err("VFS (%s): error: dax access failed (%ld)", | ||
279 | - sb->s_id, len); | ||
280 | - return len < 0 ? len : -EIO; | ||
281 | + pr_debug("%s: error: dax access failed (%ld)\n", | ||
282 | + bdevname(bdev, buf), len); | ||
283 | + return false; | ||
284 | } | ||
285 | |||
286 | - return 0; | ||
287 | + return true; | ||
288 | } | ||
289 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); | ||
290 | #endif | ||
291 | diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c | ||
292 | index bc5128203056..78e630771214 100644 | ||
293 | --- a/drivers/gpu/drm/drm_property.c | ||
294 | +++ b/drivers/gpu/drm/drm_property.c | ||
295 | @@ -516,7 +516,7 @@ static void drm_property_free_blob(struct kref *kref) | ||
296 | |||
297 | drm_mode_object_unregister(blob->dev, &blob->base); | ||
298 | |||
299 | - kfree(blob); | ||
300 | + kvfree(blob); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | @@ -543,7 +543,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, | ||
305 | if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) | ||
306 | return ERR_PTR(-EINVAL); | ||
307 | |||
308 | - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); | ||
309 | + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); | ||
310 | if (!blob) | ||
311 | return ERR_PTR(-ENOMEM); | ||
312 | |||
313 | @@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, | ||
314 | ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, | ||
315 | true, drm_property_free_blob); | ||
316 | if (ret) { | ||
317 | - kfree(blob); | ||
318 | + kvfree(blob); | ||
319 | return ERR_PTR(-EINVAL); | ||
320 | } | ||
321 | |||
322 | diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c | ||
323 | index 2ebdc6d5a76e..d5583190f3e4 100644 | ||
324 | --- a/drivers/gpu/drm/udl/udl_fb.c | ||
325 | +++ b/drivers/gpu/drm/udl/udl_fb.c | ||
326 | @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | ||
327 | |||
328 | if (cmd > (char *) urb->transfer_buffer) { | ||
329 | /* Send partial buffer remaining before exiting */ | ||
330 | - int len = cmd - (char *) urb->transfer_buffer; | ||
331 | + int len; | ||
332 | + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) | ||
333 | + *cmd++ = 0xAF; | ||
334 | + len = cmd - (char *) urb->transfer_buffer; | ||
335 | ret = udl_submit_urb(dev, urb, len); | ||
336 | bytes_sent += len; | ||
337 | } else | ||
338 | diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c | ||
339 | index 0c87b1ac6b68..b992644c17e6 100644 | ||
340 | --- a/drivers/gpu/drm/udl/udl_transfer.c | ||
341 | +++ b/drivers/gpu/drm/udl/udl_transfer.c | ||
342 | @@ -153,11 +153,11 @@ static void udl_compress_hline16( | ||
343 | raw_pixels_count_byte = cmd++; /* we'll know this later */ | ||
344 | raw_pixel_start = pixel; | ||
345 | |||
346 | - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, | ||
347 | - min((int)(pixel_end - pixel) / bpp, | ||
348 | - (int)(cmd_buffer_end - cmd) / 2))) * bpp; | ||
349 | + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, | ||
350 | + (unsigned long)(pixel_end - pixel) / bpp, | ||
351 | + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; | ||
352 | |||
353 | - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); | ||
354 | + prefetch_range((void *) pixel, cmd_pixel_end - pixel); | ||
355 | pixel_val16 = get_pixel_val16(pixel, bpp); | ||
356 | |||
357 | while (pixel < cmd_pixel_end) { | ||
358 | @@ -193,6 +193,9 @@ static void udl_compress_hline16( | ||
359 | if (pixel > raw_pixel_start) { | ||
360 | /* finalize last RAW span */ | ||
361 | *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; | ||
362 | + } else { | ||
363 | + /* undo unused byte */ | ||
364 | + cmd--; | ||
365 | } | ||
366 | |||
367 | *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; | ||
368 | diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c | ||
369 | index 5271db593478..ae8c8e66a6c4 100644 | ||
370 | --- a/drivers/hid/hid-debug.c | ||
371 | +++ b/drivers/hid/hid-debug.c | ||
372 | @@ -1154,6 +1154,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, | ||
373 | goto out; | ||
374 | if (list->tail > list->head) { | ||
375 | len = list->tail - list->head; | ||
376 | + if (len > count) | ||
377 | + len = count; | ||
378 | |||
379 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { | ||
380 | ret = -EFAULT; | ||
381 | @@ -1163,6 +1165,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, | ||
382 | list->head += len; | ||
383 | } else { | ||
384 | len = HID_DEBUG_BUFSIZE - list->head; | ||
385 | + if (len > count) | ||
386 | + len = count; | ||
387 | |||
388 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { | ||
389 | ret = -EFAULT; | ||
390 | @@ -1170,7 +1174,9 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, | ||
391 | } | ||
392 | list->head = 0; | ||
393 | ret += len; | ||
394 | - goto copy_rest; | ||
395 | + count -= len; | ||
396 | + if (count > 0) | ||
397 | + goto copy_rest; | ||
398 | } | ||
399 | |||
400 | } | ||
401 | diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c | ||
402 | index 3535073a9a7d..d92827556389 100644 | ||
403 | --- a/drivers/hid/i2c-hid/i2c-hid.c | ||
404 | +++ b/drivers/hid/i2c-hid/i2c-hid.c | ||
405 | @@ -476,7 +476,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) | ||
406 | return; | ||
407 | } | ||
408 | |||
409 | - if ((ret_size > size) || (ret_size <= 2)) { | ||
410 | + if ((ret_size > size) || (ret_size < 2)) { | ||
411 | dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", | ||
412 | __func__, size, ret_size); | ||
413 | return; | ||
414 | diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c | ||
415 | index 7d749b19c27c..cf307bdc3d53 100644 | ||
416 | --- a/drivers/hid/usbhid/hiddev.c | ||
417 | +++ b/drivers/hid/usbhid/hiddev.c | ||
418 | @@ -36,6 +36,7 @@ | ||
419 | #include <linux/hiddev.h> | ||
420 | #include <linux/compat.h> | ||
421 | #include <linux/vmalloc.h> | ||
422 | +#include <linux/nospec.h> | ||
423 | #include "usbhid.h" | ||
424 | |||
425 | #ifdef CONFIG_USB_DYNAMIC_MINORS | ||
426 | @@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | ||
427 | |||
428 | if (uref->field_index >= report->maxfield) | ||
429 | goto inval; | ||
430 | + uref->field_index = array_index_nospec(uref->field_index, | ||
431 | + report->maxfield); | ||
432 | |||
433 | field = report->field[uref->field_index]; | ||
434 | if (uref->usage_index >= field->maxusage) | ||
435 | goto inval; | ||
436 | + uref->usage_index = array_index_nospec(uref->usage_index, | ||
437 | + field->maxusage); | ||
438 | |||
439 | uref->usage_code = field->usage[uref->usage_index].hid; | ||
440 | |||
441 | @@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | ||
442 | |||
443 | if (uref->field_index >= report->maxfield) | ||
444 | goto inval; | ||
445 | + uref->field_index = array_index_nospec(uref->field_index, | ||
446 | + report->maxfield); | ||
447 | |||
448 | field = report->field[uref->field_index]; | ||
449 | |||
450 | @@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
451 | |||
452 | if (finfo.field_index >= report->maxfield) | ||
453 | break; | ||
454 | + finfo.field_index = array_index_nospec(finfo.field_index, | ||
455 | + report->maxfield); | ||
456 | |||
457 | field = report->field[finfo.field_index]; | ||
458 | memset(&finfo, 0, sizeof(finfo)); | ||
459 | @@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
460 | |||
461 | if (cinfo.index >= hid->maxcollection) | ||
462 | break; | ||
463 | + cinfo.index = array_index_nospec(cinfo.index, | ||
464 | + hid->maxcollection); | ||
465 | |||
466 | cinfo.type = hid->collection[cinfo.index].type; | ||
467 | cinfo.usage = hid->collection[cinfo.index].usage; | ||
468 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c | ||
469 | index 4287fc9f3527..f9cd81375f28 100644 | ||
470 | --- a/drivers/md/dm-table.c | ||
471 | +++ b/drivers/md/dm-table.c | ||
472 | @@ -883,9 +883,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); | ||
473 | static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, | ||
474 | sector_t start, sector_t len, void *data) | ||
475 | { | ||
476 | - struct request_queue *q = bdev_get_queue(dev->bdev); | ||
477 | - | ||
478 | - return q && blk_queue_dax(q); | ||
479 | + return bdev_dax_supported(dev->bdev, PAGE_SIZE); | ||
480 | } | ||
481 | |||
482 | static bool dm_table_supports_dax(struct dm_table *t) | ||
483 | @@ -1813,6 +1811,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | ||
484 | } | ||
485 | blk_queue_write_cache(q, wc, fua); | ||
486 | |||
487 | + if (dm_table_supports_dax(t)) | ||
488 | + queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); | ||
489 | + else | ||
490 | + queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q); | ||
491 | + | ||
492 | if (dm_table_supports_dax_write_cache(t)) | ||
493 | dax_write_cache(t->md->dax_dev, true); | ||
494 | |||
495 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c | ||
496 | index 1dfc855ac708..24ec6e039448 100644 | ||
497 | --- a/drivers/md/dm.c | ||
498 | +++ b/drivers/md/dm.c | ||
499 | @@ -961,8 +961,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, | ||
500 | if (len < 1) | ||
501 | goto out; | ||
502 | nr_pages = min(len, nr_pages); | ||
503 | - if (ti->type->direct_access) | ||
504 | - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); | ||
505 | + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); | ||
506 | |||
507 | out: | ||
508 | dm_put_live_table(md, srcu_idx); | ||
509 | @@ -2050,9 +2049,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) | ||
510 | */ | ||
511 | bioset_free(md->queue->bio_split); | ||
512 | md->queue->bio_split = NULL; | ||
513 | - | ||
514 | - if (type == DM_TYPE_DAX_BIO_BASED) | ||
515 | - queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); | ||
516 | break; | ||
517 | case DM_TYPE_NONE: | ||
518 | WARN_ON_ONCE(true); | ||
519 | diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c | ||
520 | index 39f51daa7558..c5642813eff1 100644 | ||
521 | --- a/drivers/media/i2c/cx25840/cx25840-core.c | ||
522 | +++ b/drivers/media/i2c/cx25840/cx25840-core.c | ||
523 | @@ -463,8 +463,13 @@ static void cx23885_initialize(struct i2c_client *client) | ||
524 | { | ||
525 | DEFINE_WAIT(wait); | ||
526 | struct cx25840_state *state = to_state(i2c_get_clientdata(client)); | ||
527 | + u32 clk_freq = 0; | ||
528 | struct workqueue_struct *q; | ||
529 | |||
530 | + /* cx23885 sets hostdata to clk_freq pointer */ | ||
531 | + if (v4l2_get_subdev_hostdata(&state->sd)) | ||
532 | + clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd)); | ||
533 | + | ||
534 | /* | ||
535 | * Come out of digital power down | ||
536 | * The CX23888, at least, needs this, otherwise registers aside from | ||
537 | @@ -500,8 +505,13 @@ static void cx23885_initialize(struct i2c_client *client) | ||
538 | * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz | ||
539 | * 572.73 MHz before post divide | ||
540 | */ | ||
541 | - /* HVR1850 or 50MHz xtal */ | ||
542 | - cx25840_write(client, 0x2, 0x71); | ||
543 | + if (clk_freq == 25000000) { | ||
544 | + /* 888/ImpactVCBe or 25Mhz xtal */ | ||
545 | + ; /* nothing to do */ | ||
546 | + } else { | ||
547 | + /* HVR1850 or 50MHz xtal */ | ||
548 | + cx25840_write(client, 0x2, 0x71); | ||
549 | + } | ||
550 | cx25840_write4(client, 0x11c, 0x01d1744c); | ||
551 | cx25840_write4(client, 0x118, 0x00000416); | ||
552 | cx25840_write4(client, 0x404, 0x0010253e); | ||
553 | @@ -544,9 +554,15 @@ static void cx23885_initialize(struct i2c_client *client) | ||
554 | /* HVR1850 */ | ||
555 | switch (state->id) { | ||
556 | case CX23888_AV: | ||
557 | - /* 888/HVR1250 specific */ | ||
558 | - cx25840_write4(client, 0x10c, 0x13333333); | ||
559 | - cx25840_write4(client, 0x108, 0x00000515); | ||
560 | + if (clk_freq == 25000000) { | ||
561 | + /* 888/ImpactVCBe or 25MHz xtal */ | ||
562 | + cx25840_write4(client, 0x10c, 0x01b6db7b); | ||
563 | + cx25840_write4(client, 0x108, 0x00000512); | ||
564 | + } else { | ||
565 | + /* 888/HVR1250 or 50MHz xtal */ | ||
566 | + cx25840_write4(client, 0x10c, 0x13333333); | ||
567 | + cx25840_write4(client, 0x108, 0x00000515); | ||
568 | + } | ||
569 | break; | ||
570 | default: | ||
571 | cx25840_write4(client, 0x10c, 0x002be2c9); | ||
572 | @@ -576,7 +592,7 @@ static void cx23885_initialize(struct i2c_client *client) | ||
573 | * 368.64 MHz before post divide | ||
574 | * 122.88 MHz / 0xa = 12.288 MHz | ||
575 | */ | ||
576 | - /* HVR1850 or 50MHz xtal */ | ||
577 | + /* HVR1850 or 50MHz xtal or 25MHz xtal */ | ||
578 | cx25840_write4(client, 0x114, 0x017dbf48); | ||
579 | cx25840_write4(client, 0x110, 0x000a030e); | ||
580 | break; | ||
581 | diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c | ||
582 | index 6d9adcaa26ba..ffbb178c6918 100644 | ||
583 | --- a/drivers/media/v4l2-core/videobuf2-core.c | ||
584 | +++ b/drivers/media/v4l2-core/videobuf2-core.c | ||
585 | @@ -1689,6 +1689,15 @@ static void __vb2_queue_cancel(struct vb2_queue *q) | ||
586 | for (i = 0; i < q->num_buffers; ++i) { | ||
587 | struct vb2_buffer *vb = q->bufs[i]; | ||
588 | |||
589 | + if (vb->state == VB2_BUF_STATE_PREPARED || | ||
590 | + vb->state == VB2_BUF_STATE_QUEUED) { | ||
591 | + unsigned int plane; | ||
592 | + | ||
593 | + for (plane = 0; plane < vb->num_planes; ++plane) | ||
594 | + call_void_memop(vb, finish, | ||
595 | + vb->planes[plane].mem_priv); | ||
596 | + } | ||
597 | + | ||
598 | if (vb->state != VB2_BUF_STATE_DEQUEUED) { | ||
599 | vb->state = VB2_BUF_STATE_PREPARED; | ||
600 | call_void_vb_qop(vb, buf_finish, vb); | ||
601 | diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c | ||
602 | index ac76c10c042f..af3d207c9cc4 100644 | ||
603 | --- a/drivers/mtd/chips/cfi_cmdset_0002.c | ||
604 | +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | ||
605 | @@ -42,7 +42,7 @@ | ||
606 | #define AMD_BOOTLOC_BUG | ||
607 | #define FORCE_WORD_WRITE 0 | ||
608 | |||
609 | -#define MAX_WORD_RETRIES 3 | ||
610 | +#define MAX_RETRIES 3 | ||
611 | |||
612 | #define SST49LF004B 0x0060 | ||
613 | #define SST49LF040B 0x0050 | ||
614 | @@ -1647,7 +1647,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | ||
615 | map_write( map, CMD(0xF0), chip->start ); | ||
616 | /* FIXME - should have reset delay before continuing */ | ||
617 | |||
618 | - if (++retry_cnt <= MAX_WORD_RETRIES) | ||
619 | + if (++retry_cnt <= MAX_RETRIES) | ||
620 | goto retry; | ||
621 | |||
622 | ret = -EIO; | ||
623 | @@ -2106,7 +2106,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, | ||
624 | map_write(map, CMD(0xF0), chip->start); | ||
625 | /* FIXME - should have reset delay before continuing */ | ||
626 | |||
627 | - if (++retry_cnt <= MAX_WORD_RETRIES) | ||
628 | + if (++retry_cnt <= MAX_RETRIES) | ||
629 | goto retry; | ||
630 | |||
631 | ret = -EIO; | ||
632 | @@ -2241,6 +2241,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | ||
633 | unsigned long int adr; | ||
634 | DECLARE_WAITQUEUE(wait, current); | ||
635 | int ret = 0; | ||
636 | + int retry_cnt = 0; | ||
637 | |||
638 | adr = cfi->addr_unlock1; | ||
639 | |||
640 | @@ -2258,6 +2259,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | ||
641 | ENABLE_VPP(map); | ||
642 | xip_disable(map, chip, adr); | ||
643 | |||
644 | + retry: | ||
645 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
646 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
647 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
648 | @@ -2294,12 +2296,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | ||
649 | chip->erase_suspended = 0; | ||
650 | } | ||
651 | |||
652 | - if (chip_ready(map, adr)) | ||
653 | + if (chip_good(map, adr, map_word_ff(map))) | ||
654 | break; | ||
655 | |||
656 | if (time_after(jiffies, timeo)) { | ||
657 | printk(KERN_WARNING "MTD %s(): software timeout\n", | ||
658 | __func__ ); | ||
659 | + ret = -EIO; | ||
660 | break; | ||
661 | } | ||
662 | |||
663 | @@ -2307,12 +2310,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | ||
664 | UDELAY(map, chip, adr, 1000000/HZ); | ||
665 | } | ||
666 | /* Did we succeed? */ | ||
667 | - if (!chip_good(map, adr, map_word_ff(map))) { | ||
668 | + if (ret) { | ||
669 | /* reset on all failures. */ | ||
670 | map_write( map, CMD(0xF0), chip->start ); | ||
671 | /* FIXME - should have reset delay before continuing */ | ||
672 | |||
673 | - ret = -EIO; | ||
674 | + if (++retry_cnt <= MAX_RETRIES) { | ||
675 | + ret = 0; | ||
676 | + goto retry; | ||
677 | + } | ||
678 | } | ||
679 | |||
680 | chip->state = FL_READY; | ||
681 | @@ -2331,6 +2337,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | ||
682 | unsigned long timeo = jiffies + HZ; | ||
683 | DECLARE_WAITQUEUE(wait, current); | ||
684 | int ret = 0; | ||
685 | + int retry_cnt = 0; | ||
686 | |||
687 | adr += chip->start; | ||
688 | |||
689 | @@ -2348,6 +2355,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | ||
690 | ENABLE_VPP(map); | ||
691 | xip_disable(map, chip, adr); | ||
692 | |||
693 | + retry: | ||
694 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
695 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
696 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
697 | @@ -2384,7 +2392,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | ||
698 | chip->erase_suspended = 0; | ||
699 | } | ||
700 | |||
701 | - if (chip_ready(map, adr)) { | ||
702 | + if (chip_good(map, adr, map_word_ff(map))) { | ||
703 | xip_enable(map, chip, adr); | ||
704 | break; | ||
705 | } | ||
706 | @@ -2393,6 +2401,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | ||
707 | xip_enable(map, chip, adr); | ||
708 | printk(KERN_WARNING "MTD %s(): software timeout\n", | ||
709 | __func__ ); | ||
710 | + ret = -EIO; | ||
711 | break; | ||
712 | } | ||
713 | |||
714 | @@ -2400,12 +2409,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | ||
715 | UDELAY(map, chip, adr, 1000000/HZ); | ||
716 | } | ||
717 | /* Did we succeed? */ | ||
718 | - if (!chip_good(map, adr, map_word_ff(map))) { | ||
719 | + if (ret) { | ||
720 | /* reset on all failures. */ | ||
721 | map_write( map, CMD(0xF0), chip->start ); | ||
722 | /* FIXME - should have reset delay before continuing */ | ||
723 | |||
724 | - ret = -EIO; | ||
725 | + if (++retry_cnt <= MAX_RETRIES) { | ||
726 | + ret = 0; | ||
727 | + goto retry; | ||
728 | + } | ||
729 | } | ||
730 | |||
731 | chip->state = FL_READY; | ||
732 | diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c | ||
733 | index 53e5e0337c3e..fcb575d55b89 100644 | ||
734 | --- a/drivers/mtd/nand/mxc_nand.c | ||
735 | +++ b/drivers/mtd/nand/mxc_nand.c | ||
736 | @@ -48,7 +48,7 @@ | ||
737 | #define NFC_V1_V2_CONFIG (host->regs + 0x0a) | ||
738 | #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) | ||
739 | #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) | ||
740 | -#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) | ||
741 | +#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10) | ||
742 | #define NFC_V1_V2_WRPROT (host->regs + 0x12) | ||
743 | #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) | ||
744 | #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) | ||
745 | @@ -1119,6 +1119,9 @@ static void preset_v2(struct mtd_info *mtd) | ||
746 | writew(config1, NFC_V1_V2_CONFIG1); | ||
747 | /* preset operation */ | ||
748 | |||
749 | + /* spare area size in 16-bit half-words */ | ||
750 | + writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA); | ||
751 | + | ||
752 | /* Unlock the internal RAM Buffer */ | ||
753 | writew(0x2, NFC_V1_V2_CONFIG); | ||
754 | |||
755 | diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | ||
756 | index 5b4f05805006..519a021c0a25 100644 | ||
757 | --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | ||
758 | +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | ||
759 | @@ -2863,7 +2863,7 @@ static int dpaa_remove(struct platform_device *pdev) | ||
760 | struct device *dev; | ||
761 | int err; | ||
762 | |||
763 | - dev = pdev->dev.parent; | ||
764 | + dev = &pdev->dev; | ||
765 | net_dev = dev_get_drvdata(dev); | ||
766 | |||
767 | priv = netdev_priv(net_dev); | ||
768 | diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c | ||
769 | index 17a4cc138b00..4d49fb8f2bbc 100644 | ||
770 | --- a/drivers/scsi/sg.c | ||
771 | +++ b/drivers/scsi/sg.c | ||
772 | @@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ | ||
773 | #include <linux/atomic.h> | ||
774 | #include <linux/ratelimit.h> | ||
775 | #include <linux/uio.h> | ||
776 | +#include <linux/cred.h> /* for sg_check_file_access() */ | ||
777 | |||
778 | #include "scsi.h" | ||
779 | #include <scsi/scsi_dbg.h> | ||
780 | @@ -210,6 +211,33 @@ static void sg_device_destroy(struct kref *kref); | ||
781 | sdev_prefix_printk(prefix, (sdp)->device, \ | ||
782 | (sdp)->disk->disk_name, fmt, ##a) | ||
783 | |||
784 | +/* | ||
785 | + * The SCSI interfaces that use read() and write() as an asynchronous variant of | ||
786 | + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways | ||
787 | + * to trigger read() and write() calls from various contexts with elevated | ||
788 | + * privileges. This can lead to kernel memory corruption (e.g. if these | ||
789 | + * interfaces are called through splice()) and privilege escalation inside | ||
790 | + * userspace (e.g. if a process with access to such a device passes a file | ||
791 | + * descriptor to a SUID binary as stdin/stdout/stderr). | ||
792 | + * | ||
793 | + * This function provides protection for the legacy API by restricting the | ||
794 | + * calling context. | ||
795 | + */ | ||
796 | +static int sg_check_file_access(struct file *filp, const char *caller) | ||
797 | +{ | ||
798 | + if (filp->f_cred != current_real_cred()) { | ||
799 | + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", | ||
800 | + caller, task_tgid_vnr(current), current->comm); | ||
801 | + return -EPERM; | ||
802 | + } | ||
803 | + if (uaccess_kernel()) { | ||
804 | + pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", | ||
805 | + caller, task_tgid_vnr(current), current->comm); | ||
806 | + return -EACCES; | ||
807 | + } | ||
808 | + return 0; | ||
809 | +} | ||
810 | + | ||
811 | static int sg_allow_access(struct file *filp, unsigned char *cmd) | ||
812 | { | ||
813 | struct sg_fd *sfp = filp->private_data; | ||
814 | @@ -394,6 +422,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | ||
815 | struct sg_header *old_hdr = NULL; | ||
816 | int retval = 0; | ||
817 | |||
818 | + /* | ||
819 | + * This could cause a response to be stranded. Close the associated | ||
820 | + * file descriptor to free up any resources being held. | ||
821 | + */ | ||
822 | + retval = sg_check_file_access(filp, __func__); | ||
823 | + if (retval) | ||
824 | + return retval; | ||
825 | + | ||
826 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | ||
827 | return -ENXIO; | ||
828 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, | ||
829 | @@ -581,9 +617,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | ||
830 | struct sg_header old_hdr; | ||
831 | sg_io_hdr_t *hp; | ||
832 | unsigned char cmnd[SG_MAX_CDB_SIZE]; | ||
833 | + int retval; | ||
834 | |||
835 | - if (unlikely(uaccess_kernel())) | ||
836 | - return -EINVAL; | ||
837 | + retval = sg_check_file_access(filp, __func__); | ||
838 | + if (retval) | ||
839 | + return retval; | ||
840 | |||
841 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | ||
842 | return -ENXIO; | ||
843 | diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c | ||
844 | index 802f51e46405..171960568356 100644 | ||
845 | --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c | ||
846 | +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c | ||
847 | @@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, | ||
848 | /* Make sure D/A update mode is direct update */ | ||
849 | outb(0, dev->iobase + DAQP_AUX_REG); | ||
850 | |||
851 | - for (i = 0; i > insn->n; i++) { | ||
852 | + for (i = 0; i < insn->n; i++) { | ||
853 | unsigned int val = data[i]; | ||
854 | int ret; | ||
855 | |||
856 | diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c | ||
857 | index 4ba5004a069e..fd6ce9996488 100644 | ||
858 | --- a/drivers/target/target_core_pr.c | ||
859 | +++ b/drivers/target/target_core_pr.c | ||
860 | @@ -3729,11 +3729,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) | ||
861 | * Check for overflow of 8byte PRI READ_KEYS payload and | ||
862 | * next reservation key list descriptor. | ||
863 | */ | ||
864 | - if ((add_len + 8) > (cmd->data_length - 8)) | ||
865 | - break; | ||
866 | - | ||
867 | - put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); | ||
868 | - off += 8; | ||
869 | + if (off + 8 <= cmd->data_length) { | ||
870 | + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); | ||
871 | + off += 8; | ||
872 | + } | ||
873 | + /* | ||
874 | + * SPC5r17: 6.16.2 READ KEYS service action | ||
875 | + * The ADDITIONAL LENGTH field indicates the number of bytes in | ||
876 | + * the Reservation key list. The contents of the ADDITIONAL | ||
877 | + * LENGTH field are not altered based on the allocation length | ||
878 | + */ | ||
879 | add_len += 8; | ||
880 | } | ||
881 | spin_unlock(&dev->t10_pr.registration_lock); | ||
882 | diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c | ||
883 | index fb4e6a7ee521..d639378e36ac 100644 | ||
884 | --- a/drivers/vfio/vfio_iommu_type1.c | ||
885 | +++ b/drivers/vfio/vfio_iommu_type1.c | ||
886 | @@ -339,18 +339,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | ||
887 | struct page *page[1]; | ||
888 | struct vm_area_struct *vma; | ||
889 | struct vm_area_struct *vmas[1]; | ||
890 | + unsigned int flags = 0; | ||
891 | int ret; | ||
892 | |||
893 | + if (prot & IOMMU_WRITE) | ||
894 | + flags |= FOLL_WRITE; | ||
895 | + | ||
896 | + down_read(&mm->mmap_sem); | ||
897 | if (mm == current->mm) { | ||
898 | - ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), | ||
899 | - page, vmas); | ||
900 | + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); | ||
901 | } else { | ||
902 | - unsigned int flags = 0; | ||
903 | - | ||
904 | - if (prot & IOMMU_WRITE) | ||
905 | - flags |= FOLL_WRITE; | ||
906 | - | ||
907 | - down_read(&mm->mmap_sem); | ||
908 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, | ||
909 | vmas, NULL); | ||
910 | /* | ||
911 | @@ -364,8 +362,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | ||
912 | ret = -EOPNOTSUPP; | ||
913 | put_page(page[0]); | ||
914 | } | ||
915 | - up_read(&mm->mmap_sem); | ||
916 | } | ||
917 | + up_read(&mm->mmap_sem); | ||
918 | |||
919 | if (ret == 1) { | ||
920 | *pfn = page_to_pfn(page[0]); | ||
921 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h | ||
922 | index 33d6eb58ce34..f29cdb1cdeb7 100644 | ||
923 | --- a/fs/cifs/cifsglob.h | ||
924 | +++ b/fs/cifs/cifsglob.h | ||
925 | @@ -1340,6 +1340,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server, | ||
926 | /* one of these for every pending CIFS request to the server */ | ||
927 | struct mid_q_entry { | ||
928 | struct list_head qhead; /* mids waiting on reply from this server */ | ||
929 | + struct kref refcount; | ||
930 | struct TCP_Server_Info *server; /* server corresponding to this mid */ | ||
931 | __u64 mid; /* multiplex id */ | ||
932 | __u32 pid; /* process id */ | ||
933 | diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h | ||
934 | index 762d513a5087..ccdb42f71b2e 100644 | ||
935 | --- a/fs/cifs/cifsproto.h | ||
936 | +++ b/fs/cifs/cifsproto.h | ||
937 | @@ -76,6 +76,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, | ||
938 | struct TCP_Server_Info *server); | ||
939 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); | ||
940 | extern void cifs_delete_mid(struct mid_q_entry *mid); | ||
941 | +extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); | ||
942 | extern void cifs_wake_up_task(struct mid_q_entry *mid); | ||
943 | extern int cifs_handle_standard(struct TCP_Server_Info *server, | ||
944 | struct mid_q_entry *mid); | ||
945 | diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c | ||
946 | index 7fd39ea6e22e..b5a436583469 100644 | ||
947 | --- a/fs/cifs/cifssmb.c | ||
948 | +++ b/fs/cifs/cifssmb.c | ||
949 | @@ -150,8 +150,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) | ||
950 | * greater than cifs socket timeout which is 7 seconds | ||
951 | */ | ||
952 | while (server->tcpStatus == CifsNeedReconnect) { | ||
953 | - wait_event_interruptible_timeout(server->response_q, | ||
954 | - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); | ||
955 | + rc = wait_event_interruptible_timeout(server->response_q, | ||
956 | + (server->tcpStatus != CifsNeedReconnect), | ||
957 | + 10 * HZ); | ||
958 | + if (rc < 0) { | ||
959 | + cifs_dbg(FYI, "%s: aborting reconnect due to a received" | ||
960 | + " signal by the process\n", __func__); | ||
961 | + return -ERESTARTSYS; | ||
962 | + } | ||
963 | |||
964 | /* are we still trying to reconnect? */ | ||
965 | if (server->tcpStatus != CifsNeedReconnect) | ||
966 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c | ||
967 | index f7db2fedfa8c..fd24c72bd2cd 100644 | ||
968 | --- a/fs/cifs/connect.c | ||
969 | +++ b/fs/cifs/connect.c | ||
970 | @@ -889,6 +889,7 @@ cifs_demultiplex_thread(void *p) | ||
971 | continue; | ||
972 | server->total_read += length; | ||
973 | |||
974 | + mid_entry = NULL; | ||
975 | if (server->ops->is_transform_hdr && | ||
976 | server->ops->receive_transform && | ||
977 | server->ops->is_transform_hdr(buf)) { | ||
978 | @@ -903,8 +904,11 @@ cifs_demultiplex_thread(void *p) | ||
979 | length = mid_entry->receive(server, mid_entry); | ||
980 | } | ||
981 | |||
982 | - if (length < 0) | ||
983 | + if (length < 0) { | ||
984 | + if (mid_entry) | ||
985 | + cifs_mid_q_entry_release(mid_entry); | ||
986 | continue; | ||
987 | + } | ||
988 | |||
989 | if (server->large_buf) | ||
990 | buf = server->bigbuf; | ||
991 | @@ -920,6 +924,8 @@ cifs_demultiplex_thread(void *p) | ||
992 | |||
993 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | ||
994 | mid_entry->callback(mid_entry); | ||
995 | + | ||
996 | + cifs_mid_q_entry_release(mid_entry); | ||
997 | } else if (server->ops->is_oplock_break && | ||
998 | server->ops->is_oplock_break(buf, server)) { | ||
999 | cifs_dbg(FYI, "Received oplock break\n"); | ||
1000 | diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c | ||
1001 | index a723df3e0197..d8cd82001c1c 100644 | ||
1002 | --- a/fs/cifs/smb1ops.c | ||
1003 | +++ b/fs/cifs/smb1ops.c | ||
1004 | @@ -105,6 +105,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) | ||
1005 | if (compare_mid(mid->mid, buf) && | ||
1006 | mid->mid_state == MID_REQUEST_SUBMITTED && | ||
1007 | le16_to_cpu(mid->command) == buf->Command) { | ||
1008 | + kref_get(&mid->refcount); | ||
1009 | spin_unlock(&GlobalMid_Lock); | ||
1010 | return mid; | ||
1011 | } | ||
1012 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c | ||
1013 | index 36bc9a7eb8ea..83267ac3a3f0 100644 | ||
1014 | --- a/fs/cifs/smb2ops.c | ||
1015 | +++ b/fs/cifs/smb2ops.c | ||
1016 | @@ -202,6 +202,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) | ||
1017 | if ((mid->mid == wire_mid) && | ||
1018 | (mid->mid_state == MID_REQUEST_SUBMITTED) && | ||
1019 | (mid->command == shdr->Command)) { | ||
1020 | + kref_get(&mid->refcount); | ||
1021 | spin_unlock(&GlobalMid_Lock); | ||
1022 | return mid; | ||
1023 | } | ||
1024 | @@ -635,6 +636,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, | ||
1025 | |||
1026 | rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, | ||
1027 | len); | ||
1028 | + kfree(ea); | ||
1029 | + | ||
1030 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | ||
1031 | |||
1032 | return rc; | ||
1033 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c | ||
1034 | index 5247b40e57f6..0480cd9a9e81 100644 | ||
1035 | --- a/fs/cifs/smb2pdu.c | ||
1036 | +++ b/fs/cifs/smb2pdu.c | ||
1037 | @@ -153,7 +153,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, | ||
1038 | static int | ||
1039 | smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | ||
1040 | { | ||
1041 | - int rc = 0; | ||
1042 | + int rc; | ||
1043 | struct nls_table *nls_codepage; | ||
1044 | struct cifs_ses *ses; | ||
1045 | struct TCP_Server_Info *server; | ||
1046 | @@ -164,10 +164,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | ||
1047 | * for those three - in the calling routine. | ||
1048 | */ | ||
1049 | if (tcon == NULL) | ||
1050 | - return rc; | ||
1051 | + return 0; | ||
1052 | |||
1053 | if (smb2_command == SMB2_TREE_CONNECT) | ||
1054 | - return rc; | ||
1055 | + return 0; | ||
1056 | |||
1057 | if (tcon->tidStatus == CifsExiting) { | ||
1058 | /* | ||
1059 | @@ -210,8 +210,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | ||
1060 | return -EAGAIN; | ||
1061 | } | ||
1062 | |||
1063 | - wait_event_interruptible_timeout(server->response_q, | ||
1064 | - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); | ||
1065 | + rc = wait_event_interruptible_timeout(server->response_q, | ||
1066 | + (server->tcpStatus != CifsNeedReconnect), | ||
1067 | + 10 * HZ); | ||
1068 | + if (rc < 0) { | ||
1069 | + cifs_dbg(FYI, "%s: aborting reconnect due to a received" | ||
1070 | + " signal by the process\n", __func__); | ||
1071 | + return -ERESTARTSYS; | ||
1072 | + } | ||
1073 | |||
1074 | /* are we still trying to reconnect? */ | ||
1075 | if (server->tcpStatus != CifsNeedReconnect) | ||
1076 | @@ -229,7 +235,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) | ||
1077 | } | ||
1078 | |||
1079 | if (!tcon->ses->need_reconnect && !tcon->need_reconnect) | ||
1080 | - return rc; | ||
1081 | + return 0; | ||
1082 | |||
1083 | nls_codepage = load_nls_default(); | ||
1084 | |||
1085 | @@ -332,7 +338,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, | ||
1086 | return rc; | ||
1087 | |||
1088 | /* BB eventually switch this to SMB2 specific small buf size */ | ||
1089 | - *request_buf = cifs_small_buf_get(); | ||
1090 | + if (smb2_command == SMB2_SET_INFO) | ||
1091 | + *request_buf = cifs_buf_get(); | ||
1092 | + else | ||
1093 | + *request_buf = cifs_small_buf_get(); | ||
1094 | if (*request_buf == NULL) { | ||
1095 | /* BB should we add a retry in here if not a writepage? */ | ||
1096 | return -ENOMEM; | ||
1097 | @@ -3162,7 +3171,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | ||
1098 | } | ||
1099 | |||
1100 | rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); | ||
1101 | - cifs_small_buf_release(req); | ||
1102 | + cifs_buf_release(req); | ||
1103 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; | ||
1104 | |||
1105 | if (rc != 0) | ||
1106 | diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c | ||
1107 | index bf49cb73b9e6..a41fc4a63a59 100644 | ||
1108 | --- a/fs/cifs/smb2transport.c | ||
1109 | +++ b/fs/cifs/smb2transport.c | ||
1110 | @@ -548,6 +548,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, | ||
1111 | |||
1112 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); | ||
1113 | memset(temp, 0, sizeof(struct mid_q_entry)); | ||
1114 | + kref_init(&temp->refcount); | ||
1115 | temp->mid = le64_to_cpu(shdr->MessageId); | ||
1116 | temp->pid = current->pid; | ||
1117 | temp->command = shdr->Command; /* Always LE */ | ||
1118 | diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c | ||
1119 | index 7efbab013957..a10f51dfa7f5 100644 | ||
1120 | --- a/fs/cifs/transport.c | ||
1121 | +++ b/fs/cifs/transport.c | ||
1122 | @@ -56,6 +56,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | ||
1123 | |||
1124 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); | ||
1125 | memset(temp, 0, sizeof(struct mid_q_entry)); | ||
1126 | + kref_init(&temp->refcount); | ||
1127 | temp->mid = get_mid(smb_buffer); | ||
1128 | temp->pid = current->pid; | ||
1129 | temp->command = cpu_to_le16(smb_buffer->Command); | ||
1130 | @@ -77,6 +78,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | ||
1131 | return temp; | ||
1132 | } | ||
1133 | |||
1134 | +static void _cifs_mid_q_entry_release(struct kref *refcount) | ||
1135 | +{ | ||
1136 | + struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, | ||
1137 | + refcount); | ||
1138 | + | ||
1139 | + mempool_free(mid, cifs_mid_poolp); | ||
1140 | +} | ||
1141 | + | ||
1142 | +void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) | ||
1143 | +{ | ||
1144 | + spin_lock(&GlobalMid_Lock); | ||
1145 | + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); | ||
1146 | + spin_unlock(&GlobalMid_Lock); | ||
1147 | +} | ||
1148 | + | ||
1149 | void | ||
1150 | DeleteMidQEntry(struct mid_q_entry *midEntry) | ||
1151 | { | ||
1152 | @@ -105,7 +121,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | ||
1153 | } | ||
1154 | } | ||
1155 | #endif | ||
1156 | - mempool_free(midEntry, cifs_mid_poolp); | ||
1157 | + cifs_mid_q_entry_release(midEntry); | ||
1158 | } | ||
1159 | |||
1160 | void | ||
1161 | diff --git a/fs/ext2/super.c b/fs/ext2/super.c | ||
1162 | index 1458706bd2ec..726e680a3368 100644 | ||
1163 | --- a/fs/ext2/super.c | ||
1164 | +++ b/fs/ext2/super.c | ||
1165 | @@ -953,8 +953,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | ||
1166 | blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); | ||
1167 | |||
1168 | if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { | ||
1169 | - err = bdev_dax_supported(sb, blocksize); | ||
1170 | - if (err) | ||
1171 | + if (!bdev_dax_supported(sb->s_bdev, blocksize)) | ||
1172 | goto failed_mount; | ||
1173 | } | ||
1174 | |||
1175 | diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c | ||
1176 | index 58db8109defa..9c9eafd6bd76 100644 | ||
1177 | --- a/fs/ext4/balloc.c | ||
1178 | +++ b/fs/ext4/balloc.c | ||
1179 | @@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, | ||
1180 | unsigned int bit, bit_max; | ||
1181 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
1182 | ext4_fsblk_t start, tmp; | ||
1183 | - int flex_bg = 0; | ||
1184 | struct ext4_group_info *grp; | ||
1185 | |||
1186 | J_ASSERT_BH(bh, buffer_locked(bh)); | ||
1187 | @@ -217,22 +216,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, | ||
1188 | |||
1189 | start = ext4_group_first_block_no(sb, block_group); | ||
1190 | |||
1191 | - if (ext4_has_feature_flex_bg(sb)) | ||
1192 | - flex_bg = 1; | ||
1193 | - | ||
1194 | /* Set bits for block and inode bitmaps, and inode table */ | ||
1195 | tmp = ext4_block_bitmap(sb, gdp); | ||
1196 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | ||
1197 | + if (ext4_block_in_group(sb, tmp, block_group)) | ||
1198 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | ||
1199 | |||
1200 | tmp = ext4_inode_bitmap(sb, gdp); | ||
1201 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | ||
1202 | + if (ext4_block_in_group(sb, tmp, block_group)) | ||
1203 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | ||
1204 | |||
1205 | tmp = ext4_inode_table(sb, gdp); | ||
1206 | for (; tmp < ext4_inode_table(sb, gdp) + | ||
1207 | sbi->s_itb_per_group; tmp++) { | ||
1208 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) | ||
1209 | + if (ext4_block_in_group(sb, tmp, block_group)) | ||
1210 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); | ||
1211 | } | ||
1212 | |||
1213 | @@ -455,7 +451,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) | ||
1214 | goto verify; | ||
1215 | } | ||
1216 | ext4_lock_group(sb, block_group); | ||
1217 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | ||
1218 | + if (ext4_has_group_desc_csum(sb) && | ||
1219 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
1220 | + if (block_group == 0) { | ||
1221 | + ext4_unlock_group(sb, block_group); | ||
1222 | + unlock_buffer(bh); | ||
1223 | + ext4_error(sb, "Block bitmap for bg 0 marked " | ||
1224 | + "uninitialized"); | ||
1225 | + err = -EFSCORRUPTED; | ||
1226 | + goto out; | ||
1227 | + } | ||
1228 | err = ext4_init_block_bitmap(sb, bh, block_group, desc); | ||
1229 | set_bitmap_uptodate(bh); | ||
1230 | set_buffer_uptodate(bh); | ||
1231 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h | ||
1232 | index 58a0304566db..0abb30d19fa1 100644 | ||
1233 | --- a/fs/ext4/ext4.h | ||
1234 | +++ b/fs/ext4/ext4.h | ||
1235 | @@ -1542,11 +1542,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) | ||
1236 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) | ||
1237 | { | ||
1238 | return ino == EXT4_ROOT_INO || | ||
1239 | - ino == EXT4_USR_QUOTA_INO || | ||
1240 | - ino == EXT4_GRP_QUOTA_INO || | ||
1241 | - ino == EXT4_BOOT_LOADER_INO || | ||
1242 | - ino == EXT4_JOURNAL_INO || | ||
1243 | - ino == EXT4_RESIZE_INO || | ||
1244 | (ino >= EXT4_FIRST_INO(sb) && | ||
1245 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); | ||
1246 | } | ||
1247 | @@ -3049,9 +3044,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode, | ||
1248 | extern int ext4_inline_data_fiemap(struct inode *inode, | ||
1249 | struct fiemap_extent_info *fieinfo, | ||
1250 | int *has_inline, __u64 start, __u64 len); | ||
1251 | -extern int ext4_try_to_evict_inline_data(handle_t *handle, | ||
1252 | - struct inode *inode, | ||
1253 | - int needed); | ||
1254 | extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); | ||
1255 | |||
1256 | extern int ext4_convert_inline_data(struct inode *inode); | ||
1257 | diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h | ||
1258 | index 8ecf84b8f5a1..a284fb28944b 100644 | ||
1259 | --- a/fs/ext4/ext4_extents.h | ||
1260 | +++ b/fs/ext4/ext4_extents.h | ||
1261 | @@ -103,6 +103,7 @@ struct ext4_extent_header { | ||
1262 | }; | ||
1263 | |||
1264 | #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) | ||
1265 | +#define EXT4_MAX_EXTENT_DEPTH 5 | ||
1266 | |||
1267 | #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ | ||
1268 | (sizeof(struct ext4_extent_header) + \ | ||
1269 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c | ||
1270 | index 883e89a903d1..5592b7726241 100644 | ||
1271 | --- a/fs/ext4/extents.c | ||
1272 | +++ b/fs/ext4/extents.c | ||
1273 | @@ -881,6 +881,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, | ||
1274 | |||
1275 | eh = ext_inode_hdr(inode); | ||
1276 | depth = ext_depth(inode); | ||
1277 | + if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { | ||
1278 | + EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", | ||
1279 | + depth); | ||
1280 | + ret = -EFSCORRUPTED; | ||
1281 | + goto err; | ||
1282 | + } | ||
1283 | |||
1284 | if (path) { | ||
1285 | ext4_ext_drop_refs(path); | ||
1286 | diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c | ||
1287 | index f420124ac035..95341bc2b3b7 100644 | ||
1288 | --- a/fs/ext4/ialloc.c | ||
1289 | +++ b/fs/ext4/ialloc.c | ||
1290 | @@ -155,7 +155,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | ||
1291 | } | ||
1292 | |||
1293 | ext4_lock_group(sb, block_group); | ||
1294 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { | ||
1295 | + if (ext4_has_group_desc_csum(sb) && | ||
1296 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { | ||
1297 | + if (block_group == 0) { | ||
1298 | + ext4_unlock_group(sb, block_group); | ||
1299 | + unlock_buffer(bh); | ||
1300 | + ext4_error(sb, "Inode bitmap for bg 0 marked " | ||
1301 | + "uninitialized"); | ||
1302 | + err = -EFSCORRUPTED; | ||
1303 | + goto out; | ||
1304 | + } | ||
1305 | memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); | ||
1306 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), | ||
1307 | sb->s_blocksize * 8, bh->b_data); | ||
1308 | @@ -1000,7 +1009,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, | ||
1309 | |||
1310 | /* recheck and clear flag under lock if we still need to */ | ||
1311 | ext4_lock_group(sb, group); | ||
1312 | - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | ||
1313 | + if (ext4_has_group_desc_csum(sb) && | ||
1314 | + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
1315 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | ||
1316 | ext4_free_group_clusters_set(sb, gdp, | ||
1317 | ext4_free_clusters_after_init(sb, group, gdp)); | ||
1318 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c | ||
1319 | index 8f5dc243effd..7d498f4a3f90 100644 | ||
1320 | --- a/fs/ext4/inline.c | ||
1321 | +++ b/fs/ext4/inline.c | ||
1322 | @@ -443,6 +443,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, | ||
1323 | |||
1324 | memset((void *)ext4_raw_inode(&is.iloc)->i_block, | ||
1325 | 0, EXT4_MIN_INLINE_DATA_SIZE); | ||
1326 | + memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); | ||
1327 | |||
1328 | if (ext4_has_feature_extents(inode->i_sb)) { | ||
1329 | if (S_ISDIR(inode->i_mode) || | ||
1330 | @@ -892,11 +893,11 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, | ||
1331 | flags |= AOP_FLAG_NOFS; | ||
1332 | |||
1333 | if (ret == -ENOSPC) { | ||
1334 | + ext4_journal_stop(handle); | ||
1335 | ret = ext4_da_convert_inline_data_to_extent(mapping, | ||
1336 | inode, | ||
1337 | flags, | ||
1338 | fsdata); | ||
1339 | - ext4_journal_stop(handle); | ||
1340 | if (ret == -ENOSPC && | ||
1341 | ext4_should_retry_alloc(inode->i_sb, &retries)) | ||
1342 | goto retry_journal; | ||
1343 | @@ -1864,42 +1865,6 @@ int ext4_inline_data_fiemap(struct inode *inode, | ||
1344 | return (error < 0 ? error : 0); | ||
1345 | } | ||
1346 | |||
1347 | -/* | ||
1348 | - * Called during xattr set, and if we can sparse space 'needed', | ||
1349 | - * just create the extent tree evict the data to the outer block. | ||
1350 | - * | ||
1351 | - * We use jbd2 instead of page cache to move data to the 1st block | ||
1352 | - * so that the whole transaction can be committed as a whole and | ||
1353 | - * the data isn't lost because of the delayed page cache write. | ||
1354 | - */ | ||
1355 | -int ext4_try_to_evict_inline_data(handle_t *handle, | ||
1356 | - struct inode *inode, | ||
1357 | - int needed) | ||
1358 | -{ | ||
1359 | - int error; | ||
1360 | - struct ext4_xattr_entry *entry; | ||
1361 | - struct ext4_inode *raw_inode; | ||
1362 | - struct ext4_iloc iloc; | ||
1363 | - | ||
1364 | - error = ext4_get_inode_loc(inode, &iloc); | ||
1365 | - if (error) | ||
1366 | - return error; | ||
1367 | - | ||
1368 | - raw_inode = ext4_raw_inode(&iloc); | ||
1369 | - entry = (struct ext4_xattr_entry *)((void *)raw_inode + | ||
1370 | - EXT4_I(inode)->i_inline_off); | ||
1371 | - if (EXT4_XATTR_LEN(entry->e_name_len) + | ||
1372 | - EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) { | ||
1373 | - error = -ENOSPC; | ||
1374 | - goto out; | ||
1375 | - } | ||
1376 | - | ||
1377 | - error = ext4_convert_inline_data_nolock(handle, inode, &iloc); | ||
1378 | -out: | ||
1379 | - brelse(iloc.bh); | ||
1380 | - return error; | ||
1381 | -} | ||
1382 | - | ||
1383 | int ext4_inline_data_truncate(struct inode *inode, int *has_inline) | ||
1384 | { | ||
1385 | handle_t *handle; | ||
1386 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c | ||
1387 | index bd6453e78992..c2efe4d2ad87 100644 | ||
1388 | --- a/fs/ext4/inode.c | ||
1389 | +++ b/fs/ext4/inode.c | ||
1390 | @@ -401,9 +401,9 @@ static int __check_block_validity(struct inode *inode, const char *func, | ||
1391 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, | ||
1392 | map->m_len)) { | ||
1393 | ext4_error_inode(inode, func, line, map->m_pblk, | ||
1394 | - "lblock %lu mapped to illegal pblock " | ||
1395 | + "lblock %lu mapped to illegal pblock %llu " | ||
1396 | "(length %d)", (unsigned long) map->m_lblk, | ||
1397 | - map->m_len); | ||
1398 | + map->m_pblk, map->m_len); | ||
1399 | return -EFSCORRUPTED; | ||
1400 | } | ||
1401 | return 0; | ||
1402 | @@ -4455,7 +4455,8 @@ static int __ext4_get_inode_loc(struct inode *inode, | ||
1403 | int inodes_per_block, inode_offset; | ||
1404 | |||
1405 | iloc->bh = NULL; | ||
1406 | - if (!ext4_valid_inum(sb, inode->i_ino)) | ||
1407 | + if (inode->i_ino < EXT4_ROOT_INO || | ||
1408 | + inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) | ||
1409 | return -EFSCORRUPTED; | ||
1410 | |||
1411 | iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); | ||
1412 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c | ||
1413 | index 701085620cd8..048c586d9a8b 100644 | ||
1414 | --- a/fs/ext4/mballoc.c | ||
1415 | +++ b/fs/ext4/mballoc.c | ||
1416 | @@ -2456,7 +2456,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, | ||
1417 | * initialize bb_free to be able to skip | ||
1418 | * empty groups without initialization | ||
1419 | */ | ||
1420 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | ||
1421 | + if (ext4_has_group_desc_csum(sb) && | ||
1422 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
1423 | meta_group_info[i]->bb_free = | ||
1424 | ext4_free_clusters_after_init(sb, group, desc); | ||
1425 | } else { | ||
1426 | @@ -3023,7 +3024,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | ||
1427 | #endif | ||
1428 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, | ||
1429 | ac->ac_b_ex.fe_len); | ||
1430 | - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | ||
1431 | + if (ext4_has_group_desc_csum(sb) && | ||
1432 | + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { | ||
1433 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | ||
1434 | ext4_free_group_clusters_set(sb, gdp, | ||
1435 | ext4_free_clusters_after_init(sb, | ||
1436 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c | ||
1437 | index ec74d06fa24a..fc32a67a7a19 100644 | ||
1438 | --- a/fs/ext4/super.c | ||
1439 | +++ b/fs/ext4/super.c | ||
1440 | @@ -2301,6 +2301,7 @@ static int ext4_check_descriptors(struct super_block *sb, | ||
1441 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
1442 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | ||
1443 | ext4_fsblk_t last_block; | ||
1444 | + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; | ||
1445 | ext4_fsblk_t block_bitmap; | ||
1446 | ext4_fsblk_t inode_bitmap; | ||
1447 | ext4_fsblk_t inode_table; | ||
1448 | @@ -2333,6 +2334,14 @@ static int ext4_check_descriptors(struct super_block *sb, | ||
1449 | if (!sb_rdonly(sb)) | ||
1450 | return 0; | ||
1451 | } | ||
1452 | + if (block_bitmap >= sb_block + 1 && | ||
1453 | + block_bitmap <= last_bg_block) { | ||
1454 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1455 | + "Block bitmap for group %u overlaps " | ||
1456 | + "block group descriptors", i); | ||
1457 | + if (!sb_rdonly(sb)) | ||
1458 | + return 0; | ||
1459 | + } | ||
1460 | if (block_bitmap < first_block || block_bitmap > last_block) { | ||
1461 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1462 | "Block bitmap for group %u not in group " | ||
1463 | @@ -2347,6 +2356,14 @@ static int ext4_check_descriptors(struct super_block *sb, | ||
1464 | if (!sb_rdonly(sb)) | ||
1465 | return 0; | ||
1466 | } | ||
1467 | + if (inode_bitmap >= sb_block + 1 && | ||
1468 | + inode_bitmap <= last_bg_block) { | ||
1469 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1470 | + "Inode bitmap for group %u overlaps " | ||
1471 | + "block group descriptors", i); | ||
1472 | + if (!sb_rdonly(sb)) | ||
1473 | + return 0; | ||
1474 | + } | ||
1475 | if (inode_bitmap < first_block || inode_bitmap > last_block) { | ||
1476 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1477 | "Inode bitmap for group %u not in group " | ||
1478 | @@ -2361,6 +2378,14 @@ static int ext4_check_descriptors(struct super_block *sb, | ||
1479 | if (!sb_rdonly(sb)) | ||
1480 | return 0; | ||
1481 | } | ||
1482 | + if (inode_table >= sb_block + 1 && | ||
1483 | + inode_table <= last_bg_block) { | ||
1484 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1485 | + "Inode table for group %u overlaps " | ||
1486 | + "block group descriptors", i); | ||
1487 | + if (!sb_rdonly(sb)) | ||
1488 | + return 0; | ||
1489 | + } | ||
1490 | if (inode_table < first_block || | ||
1491 | inode_table + sbi->s_itb_per_group - 1 > last_block) { | ||
1492 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | ||
1493 | @@ -3070,13 +3095,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) | ||
1494 | ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; | ||
1495 | struct ext4_group_desc *gdp = NULL; | ||
1496 | |||
1497 | + if (!ext4_has_group_desc_csum(sb)) | ||
1498 | + return ngroups; | ||
1499 | + | ||
1500 | for (group = 0; group < ngroups; group++) { | ||
1501 | gdp = ext4_get_group_desc(sb, group, NULL); | ||
1502 | if (!gdp) | ||
1503 | continue; | ||
1504 | |||
1505 | - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) | ||
1506 | + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) | ||
1507 | + continue; | ||
1508 | + if (group != 0) | ||
1509 | break; | ||
1510 | + ext4_error(sb, "Inode table for bg 0 marked as " | ||
1511 | + "needing zeroing"); | ||
1512 | + if (sb_rdonly(sb)) | ||
1513 | + return ngroups; | ||
1514 | } | ||
1515 | |||
1516 | return group; | ||
1517 | @@ -3715,6 +3749,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1518 | le32_to_cpu(es->s_log_block_size)); | ||
1519 | goto failed_mount; | ||
1520 | } | ||
1521 | + if (le32_to_cpu(es->s_log_cluster_size) > | ||
1522 | + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
1523 | + ext4_msg(sb, KERN_ERR, | ||
1524 | + "Invalid log cluster size: %u", | ||
1525 | + le32_to_cpu(es->s_log_cluster_size)); | ||
1526 | + goto failed_mount; | ||
1527 | + } | ||
1528 | |||
1529 | if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { | ||
1530 | ext4_msg(sb, KERN_ERR, | ||
1531 | @@ -3729,8 +3770,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1532 | " that may contain inline data"); | ||
1533 | goto failed_mount; | ||
1534 | } | ||
1535 | - err = bdev_dax_supported(sb, blocksize); | ||
1536 | - if (err) | ||
1537 | + if (!bdev_dax_supported(sb->s_bdev, blocksize)) | ||
1538 | goto failed_mount; | ||
1539 | } | ||
1540 | |||
1541 | @@ -3777,6 +3817,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1542 | } else { | ||
1543 | sbi->s_inode_size = le16_to_cpu(es->s_inode_size); | ||
1544 | sbi->s_first_ino = le32_to_cpu(es->s_first_ino); | ||
1545 | + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { | ||
1546 | + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", | ||
1547 | + sbi->s_first_ino); | ||
1548 | + goto failed_mount; | ||
1549 | + } | ||
1550 | if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || | ||
1551 | (!is_power_of_2(sbi->s_inode_size)) || | ||
1552 | (sbi->s_inode_size > blocksize)) { | ||
1553 | @@ -3853,13 +3898,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1554 | "block size (%d)", clustersize, blocksize); | ||
1555 | goto failed_mount; | ||
1556 | } | ||
1557 | - if (le32_to_cpu(es->s_log_cluster_size) > | ||
1558 | - (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
1559 | - ext4_msg(sb, KERN_ERR, | ||
1560 | - "Invalid log cluster size: %u", | ||
1561 | - le32_to_cpu(es->s_log_cluster_size)); | ||
1562 | - goto failed_mount; | ||
1563 | - } | ||
1564 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - | ||
1565 | le32_to_cpu(es->s_log_block_size); | ||
1566 | sbi->s_clusters_per_group = | ||
1567 | @@ -3880,10 +3918,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1568 | } | ||
1569 | } else { | ||
1570 | if (clustersize != blocksize) { | ||
1571 | - ext4_warning(sb, "fragment/cluster size (%d) != " | ||
1572 | - "block size (%d)", clustersize, | ||
1573 | - blocksize); | ||
1574 | - clustersize = blocksize; | ||
1575 | + ext4_msg(sb, KERN_ERR, | ||
1576 | + "fragment/cluster size (%d) != " | ||
1577 | + "block size (%d)", clustersize, blocksize); | ||
1578 | + goto failed_mount; | ||
1579 | } | ||
1580 | if (sbi->s_blocks_per_group > blocksize * 8) { | ||
1581 | ext4_msg(sb, KERN_ERR, | ||
1582 | @@ -3937,6 +3975,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1583 | ext4_blocks_count(es)); | ||
1584 | goto failed_mount; | ||
1585 | } | ||
1586 | + if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && | ||
1587 | + (sbi->s_cluster_ratio == 1)) { | ||
1588 | + ext4_msg(sb, KERN_WARNING, "bad geometry: first data " | ||
1589 | + "block is 0 with a 1k block and cluster size"); | ||
1590 | + goto failed_mount; | ||
1591 | + } | ||
1592 | + | ||
1593 | blocks_count = (ext4_blocks_count(es) - | ||
1594 | le32_to_cpu(es->s_first_data_block) + | ||
1595 | EXT4_BLOCKS_PER_GROUP(sb) - 1); | ||
1596 | @@ -3972,6 +4017,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
1597 | ret = -ENOMEM; | ||
1598 | goto failed_mount; | ||
1599 | } | ||
1600 | + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != | ||
1601 | + le32_to_cpu(es->s_inodes_count)) { | ||
1602 | + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", | ||
1603 | + le32_to_cpu(es->s_inodes_count), | ||
1604 | + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); | ||
1605 | + ret = -EINVAL; | ||
1606 | + goto failed_mount; | ||
1607 | + } | ||
1608 | |||
1609 | bgl_lock_init(sbi->s_blockgroup_lock); | ||
1610 | |||
1611 | @@ -4700,6 +4753,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) | ||
1612 | |||
1613 | if (!sbh || block_device_ejected(sb)) | ||
1614 | return error; | ||
1615 | + | ||
1616 | + /* | ||
1617 | + * The superblock bh should be mapped, but it might not be if the | ||
1618 | + * device was hot-removed. Not much we can do but fail the I/O. | ||
1619 | + */ | ||
1620 | + if (!buffer_mapped(sbh)) | ||
1621 | + return error; | ||
1622 | + | ||
1623 | /* | ||
1624 | * If the file system is mounted read-only, don't update the | ||
1625 | * superblock write time. This avoids updating the superblock | ||
1626 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c | ||
1627 | index ed1cf24a7831..c7c8c16ccd93 100644 | ||
1628 | --- a/fs/ext4/xattr.c | ||
1629 | +++ b/fs/ext4/xattr.c | ||
1630 | @@ -229,12 +229,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, | ||
1631 | { | ||
1632 | int error = -EFSCORRUPTED; | ||
1633 | |||
1634 | - if (buffer_verified(bh)) | ||
1635 | - return 0; | ||
1636 | - | ||
1637 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || | ||
1638 | BHDR(bh)->h_blocks != cpu_to_le32(1)) | ||
1639 | goto errout; | ||
1640 | + if (buffer_verified(bh)) | ||
1641 | + return 0; | ||
1642 | + | ||
1643 | error = -EFSBADCRC; | ||
1644 | if (!ext4_xattr_block_csum_verify(inode, bh)) | ||
1645 | goto errout; | ||
1646 | @@ -1559,7 +1559,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, | ||
1647 | handle_t *handle, struct inode *inode, | ||
1648 | bool is_block) | ||
1649 | { | ||
1650 | - struct ext4_xattr_entry *last; | ||
1651 | + struct ext4_xattr_entry *last, *next; | ||
1652 | struct ext4_xattr_entry *here = s->here; | ||
1653 | size_t min_offs = s->end - s->base, name_len = strlen(i->name); | ||
1654 | int in_inode = i->in_inode; | ||
1655 | @@ -1594,7 +1594,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, | ||
1656 | |||
1657 | /* Compute min_offs and last. */ | ||
1658 | last = s->first; | ||
1659 | - for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { | ||
1660 | + for (; !IS_LAST_ENTRY(last); last = next) { | ||
1661 | + next = EXT4_XATTR_NEXT(last); | ||
1662 | + if ((void *)next >= s->end) { | ||
1663 | + EXT4_ERROR_INODE(inode, "corrupted xattr entries"); | ||
1664 | + ret = -EFSCORRUPTED; | ||
1665 | + goto out; | ||
1666 | + } | ||
1667 | if (!last->e_value_inum && last->e_value_size) { | ||
1668 | size_t offs = le16_to_cpu(last->e_value_offs); | ||
1669 | if (offs < min_offs) | ||
1670 | @@ -2205,23 +2211,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, | ||
1671 | if (EXT4_I(inode)->i_extra_isize == 0) | ||
1672 | return -ENOSPC; | ||
1673 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); | ||
1674 | - if (error) { | ||
1675 | - if (error == -ENOSPC && | ||
1676 | - ext4_has_inline_data(inode)) { | ||
1677 | - error = ext4_try_to_evict_inline_data(handle, inode, | ||
1678 | - EXT4_XATTR_LEN(strlen(i->name) + | ||
1679 | - EXT4_XATTR_SIZE(i->value_len))); | ||
1680 | - if (error) | ||
1681 | - return error; | ||
1682 | - error = ext4_xattr_ibody_find(inode, i, is); | ||
1683 | - if (error) | ||
1684 | - return error; | ||
1685 | - error = ext4_xattr_set_entry(i, s, handle, inode, | ||
1686 | - false /* is_block */); | ||
1687 | - } | ||
1688 | - if (error) | ||
1689 | - return error; | ||
1690 | - } | ||
1691 | + if (error) | ||
1692 | + return error; | ||
1693 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); | ||
1694 | if (!IS_LAST_ENTRY(s->first)) { | ||
1695 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); | ||
1696 | @@ -2650,6 +2641,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, | ||
1697 | last = IFIRST(header); | ||
1698 | /* Find the entry best suited to be pushed into EA block */ | ||
1699 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { | ||
1700 | + /* never move system.data out of the inode */ | ||
1701 | + if ((last->e_name_len == 4) && | ||
1702 | + (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && | ||
1703 | + !memcmp(last->e_name, "data", 4)) | ||
1704 | + continue; | ||
1705 | total_size = EXT4_XATTR_LEN(last->e_name_len); | ||
1706 | if (!last->e_value_inum) | ||
1707 | total_size += EXT4_XATTR_SIZE( | ||
1708 | diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c | ||
1709 | index 29c5f799890c..72c6a9e9a9b4 100644 | ||
1710 | --- a/fs/f2fs/file.c | ||
1711 | +++ b/fs/f2fs/file.c | ||
1712 | @@ -2694,11 +2694,16 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | ||
1713 | inode_lock(inode); | ||
1714 | ret = generic_write_checks(iocb, from); | ||
1715 | if (ret > 0) { | ||
1716 | + bool preallocated = false; | ||
1717 | + size_t target_size = 0; | ||
1718 | int err; | ||
1719 | |||
1720 | if (iov_iter_fault_in_readable(from, iov_iter_count(from))) | ||
1721 | set_inode_flag(inode, FI_NO_PREALLOC); | ||
1722 | |||
1723 | + preallocated = true; | ||
1724 | + target_size = iocb->ki_pos + iov_iter_count(from); | ||
1725 | + | ||
1726 | err = f2fs_preallocate_blocks(iocb, from); | ||
1727 | if (err) { | ||
1728 | clear_inode_flag(inode, FI_NO_PREALLOC); | ||
1729 | @@ -2710,6 +2715,10 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | ||
1730 | blk_finish_plug(&plug); | ||
1731 | clear_inode_flag(inode, FI_NO_PREALLOC); | ||
1732 | |||
1733 | + /* if we couldn't write data, we should deallocate blocks. */ | ||
1734 | + if (preallocated && i_size_read(inode) < target_size) | ||
1735 | + f2fs_truncate(inode); | ||
1736 | + | ||
1737 | if (ret > 0) | ||
1738 | f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); | ||
1739 | } | ||
1740 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c | ||
1741 | index 07793e25c976..e42736c1fdc8 100644 | ||
1742 | --- a/fs/jbd2/transaction.c | ||
1743 | +++ b/fs/jbd2/transaction.c | ||
1744 | @@ -1366,6 +1366,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | ||
1745 | if (jh->b_transaction == transaction && | ||
1746 | jh->b_jlist != BJ_Metadata) { | ||
1747 | jbd_lock_bh_state(bh); | ||
1748 | + if (jh->b_transaction == transaction && | ||
1749 | + jh->b_jlist != BJ_Metadata) | ||
1750 | + pr_err("JBD2: assertion failure: h_type=%u " | ||
1751 | + "h_line_no=%u block_no=%llu jlist=%u\n", | ||
1752 | + handle->h_type, handle->h_line_no, | ||
1753 | + (unsigned long long) bh->b_blocknr, | ||
1754 | + jh->b_jlist); | ||
1755 | J_ASSERT_JH(jh, jh->b_transaction != transaction || | ||
1756 | jh->b_jlist == BJ_Metadata); | ||
1757 | jbd_unlock_bh_state(bh); | ||
1758 | @@ -1385,11 +1392,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | ||
1759 | * of the transaction. This needs to be done | ||
1760 | * once a transaction -bzzz | ||
1761 | */ | ||
1762 | - jh->b_modified = 1; | ||
1763 | if (handle->h_buffer_credits <= 0) { | ||
1764 | ret = -ENOSPC; | ||
1765 | goto out_unlock_bh; | ||
1766 | } | ||
1767 | + jh->b_modified = 1; | ||
1768 | handle->h_buffer_credits--; | ||
1769 | } | ||
1770 | |||
1771 | diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c | ||
1772 | index 5aa392eae1c3..f6ed92524a03 100644 | ||
1773 | --- a/fs/userfaultfd.c | ||
1774 | +++ b/fs/userfaultfd.c | ||
1775 | @@ -220,24 +220,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | ||
1776 | unsigned long reason) | ||
1777 | { | ||
1778 | struct mm_struct *mm = ctx->mm; | ||
1779 | - pte_t *pte; | ||
1780 | + pte_t *ptep, pte; | ||
1781 | bool ret = true; | ||
1782 | |||
1783 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | ||
1784 | |||
1785 | - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); | ||
1786 | - if (!pte) | ||
1787 | + ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); | ||
1788 | + | ||
1789 | + if (!ptep) | ||
1790 | goto out; | ||
1791 | |||
1792 | ret = false; | ||
1793 | + pte = huge_ptep_get(ptep); | ||
1794 | |||
1795 | /* | ||
1796 | * Lockless access: we're in a wait_event so it's ok if it | ||
1797 | * changes under us. | ||
1798 | */ | ||
1799 | - if (huge_pte_none(*pte)) | ||
1800 | + if (huge_pte_none(pte)) | ||
1801 | ret = true; | ||
1802 | - if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP)) | ||
1803 | + if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) | ||
1804 | ret = true; | ||
1805 | out: | ||
1806 | return ret; | ||
1807 | diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c | ||
1808 | index aa75389be8cf..79a9a0def7db 100644 | ||
1809 | --- a/fs/xfs/xfs_ioctl.c | ||
1810 | +++ b/fs/xfs/xfs_ioctl.c | ||
1811 | @@ -1101,7 +1101,8 @@ xfs_ioctl_setattr_dax_invalidate( | ||
1812 | if (fa->fsx_xflags & FS_XFLAG_DAX) { | ||
1813 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) | ||
1814 | return -EINVAL; | ||
1815 | - if (bdev_dax_supported(sb, sb->s_blocksize) < 0) | ||
1816 | + if (!bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)), | ||
1817 | + sb->s_blocksize)) | ||
1818 | return -EINVAL; | ||
1819 | } | ||
1820 | |||
1821 | diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c | ||
1822 | index f24e5b6cfc86..1daa965f1e08 100644 | ||
1823 | --- a/fs/xfs/xfs_iops.c | ||
1824 | +++ b/fs/xfs/xfs_iops.c | ||
1825 | @@ -1184,6 +1184,30 @@ static const struct inode_operations xfs_inline_symlink_inode_operations = { | ||
1826 | .update_time = xfs_vn_update_time, | ||
1827 | }; | ||
1828 | |||
1829 | +/* Figure out if this file actually supports DAX. */ | ||
1830 | +static bool | ||
1831 | +xfs_inode_supports_dax( | ||
1832 | + struct xfs_inode *ip) | ||
1833 | +{ | ||
1834 | + struct xfs_mount *mp = ip->i_mount; | ||
1835 | + | ||
1836 | + /* Only supported on non-reflinked files. */ | ||
1837 | + if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip)) | ||
1838 | + return false; | ||
1839 | + | ||
1840 | + /* DAX mount option or DAX iflag must be set. */ | ||
1841 | + if (!(mp->m_flags & XFS_MOUNT_DAX) && | ||
1842 | + !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) | ||
1843 | + return false; | ||
1844 | + | ||
1845 | + /* Block size must match page size */ | ||
1846 | + if (mp->m_sb.sb_blocksize != PAGE_SIZE) | ||
1847 | + return false; | ||
1848 | + | ||
1849 | + /* Device has to support DAX too. */ | ||
1850 | + return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL; | ||
1851 | +} | ||
1852 | + | ||
1853 | STATIC void | ||
1854 | xfs_diflags_to_iflags( | ||
1855 | struct inode *inode, | ||
1856 | @@ -1202,11 +1226,7 @@ xfs_diflags_to_iflags( | ||
1857 | inode->i_flags |= S_SYNC; | ||
1858 | if (flags & XFS_DIFLAG_NOATIME) | ||
1859 | inode->i_flags |= S_NOATIME; | ||
1860 | - if (S_ISREG(inode->i_mode) && | ||
1861 | - ip->i_mount->m_sb.sb_blocksize == PAGE_SIZE && | ||
1862 | - !xfs_is_reflink_inode(ip) && | ||
1863 | - (ip->i_mount->m_flags & XFS_MOUNT_DAX || | ||
1864 | - ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) | ||
1865 | + if (xfs_inode_supports_dax(ip)) | ||
1866 | inode->i_flags |= S_DAX; | ||
1867 | } | ||
1868 | |||
1869 | diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c | ||
1870 | index f663022353c0..0b0282d2f011 100644 | ||
1871 | --- a/fs/xfs/xfs_super.c | ||
1872 | +++ b/fs/xfs/xfs_super.c | ||
1873 | @@ -1640,11 +1640,17 @@ xfs_fs_fill_super( | ||
1874 | sb->s_flags |= SB_I_VERSION; | ||
1875 | |||
1876 | if (mp->m_flags & XFS_MOUNT_DAX) { | ||
1877 | + bool rtdev_is_dax = false, datadev_is_dax; | ||
1878 | + | ||
1879 | xfs_warn(mp, | ||
1880 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); | ||
1881 | |||
1882 | - error = bdev_dax_supported(sb, sb->s_blocksize); | ||
1883 | - if (error) { | ||
1884 | + datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev, | ||
1885 | + sb->s_blocksize); | ||
1886 | + if (mp->m_rtdev_targp) | ||
1887 | + rtdev_is_dax = bdev_dax_supported( | ||
1888 | + mp->m_rtdev_targp->bt_bdev, sb->s_blocksize); | ||
1889 | + if (!rtdev_is_dax && !datadev_is_dax) { | ||
1890 | xfs_alert(mp, | ||
1891 | "DAX unsupported by block device. Turning off DAX."); | ||
1892 | mp->m_flags &= ~XFS_MOUNT_DAX; | ||
1893 | diff --git a/include/linux/dax.h b/include/linux/dax.h | ||
1894 | index 895e16fcc62d..07d6bc1f90a3 100644 | ||
1895 | --- a/include/linux/dax.h | ||
1896 | +++ b/include/linux/dax.h | ||
1897 | @@ -40,10 +40,10 @@ static inline void put_dax(struct dax_device *dax_dev) | ||
1898 | |||
1899 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); | ||
1900 | #if IS_ENABLED(CONFIG_FS_DAX) | ||
1901 | -int __bdev_dax_supported(struct super_block *sb, int blocksize); | ||
1902 | -static inline int bdev_dax_supported(struct super_block *sb, int blocksize) | ||
1903 | +bool __bdev_dax_supported(struct block_device *bdev, int blocksize); | ||
1904 | +static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) | ||
1905 | { | ||
1906 | - return __bdev_dax_supported(sb, blocksize); | ||
1907 | + return __bdev_dax_supported(bdev, blocksize); | ||
1908 | } | ||
1909 | |||
1910 | static inline struct dax_device *fs_dax_get_by_host(const char *host) | ||
1911 | @@ -58,9 +58,10 @@ static inline void fs_put_dax(struct dax_device *dax_dev) | ||
1912 | |||
1913 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); | ||
1914 | #else | ||
1915 | -static inline int bdev_dax_supported(struct super_block *sb, int blocksize) | ||
1916 | +static inline bool bdev_dax_supported(struct block_device *bdev, | ||
1917 | + int blocksize) | ||
1918 | { | ||
1919 | - return -EOPNOTSUPP; | ||
1920 | + return false; | ||
1921 | } | ||
1922 | |||
1923 | static inline struct dax_device *fs_dax_get_by_host(const char *host) | ||
1924 | diff --git a/include/linux/mm.h b/include/linux/mm.h | ||
1925 | index f23215854c80..a26cf767407e 100644 | ||
1926 | --- a/include/linux/mm.h | ||
1927 | +++ b/include/linux/mm.h | ||
1928 | @@ -2549,6 +2549,7 @@ enum mf_action_page_type { | ||
1929 | MF_MSG_POISONED_HUGE, | ||
1930 | MF_MSG_HUGE, | ||
1931 | MF_MSG_FREE_HUGE, | ||
1932 | + MF_MSG_NON_PMD_HUGE, | ||
1933 | MF_MSG_UNMAP_FAILED, | ||
1934 | MF_MSG_DIRTY_SWAPCACHE, | ||
1935 | MF_MSG_CLEAN_SWAPCACHE, | ||
1936 | diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h | ||
1937 | index da10aa21bebc..d447f24df970 100644 | ||
1938 | --- a/include/trace/events/sched.h | ||
1939 | +++ b/include/trace/events/sched.h | ||
1940 | @@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio, | ||
1941 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | ||
1942 | __entry->pid = tsk->pid; | ||
1943 | __entry->oldprio = tsk->prio; | ||
1944 | - __entry->newprio = pi_task ? pi_task->prio : tsk->prio; | ||
1945 | + __entry->newprio = pi_task ? | ||
1946 | + min(tsk->normal_prio, pi_task->prio) : | ||
1947 | + tsk->normal_prio; | ||
1948 | /* XXX SCHED_DEADLINE bits missing */ | ||
1949 | ), | ||
1950 | |||
1951 | diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c | ||
1952 | index 82afb7ed369f..e97bbae947f0 100644 | ||
1953 | --- a/kernel/irq/irqdesc.c | ||
1954 | +++ b/kernel/irq/irqdesc.c | ||
1955 | @@ -27,7 +27,7 @@ static struct lock_class_key irq_desc_lock_class; | ||
1956 | #if defined(CONFIG_SMP) | ||
1957 | static int __init irq_affinity_setup(char *str) | ||
1958 | { | ||
1959 | - zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | ||
1960 | + alloc_bootmem_cpumask_var(&irq_default_affinity); | ||
1961 | cpulist_parse(str, irq_default_affinity); | ||
1962 | /* | ||
1963 | * Set at least the boot cpu. We don't want to end up with | ||
1964 | @@ -40,10 +40,8 @@ __setup("irqaffinity=", irq_affinity_setup); | ||
1965 | |||
1966 | static void __init init_irq_default_affinity(void) | ||
1967 | { | ||
1968 | -#ifdef CONFIG_CPUMASK_OFFSTACK | ||
1969 | - if (!irq_default_affinity) | ||
1970 | + if (!cpumask_available(irq_default_affinity)) | ||
1971 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | ||
1972 | -#endif | ||
1973 | if (cpumask_empty(irq_default_affinity)) | ||
1974 | cpumask_setall(irq_default_affinity); | ||
1975 | } | ||
1976 | diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c | ||
1977 | index 23c0b0cb5fb9..169b3c44ee97 100644 | ||
1978 | --- a/kernel/trace/trace_functions_graph.c | ||
1979 | +++ b/kernel/trace/trace_functions_graph.c | ||
1980 | @@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | ||
1981 | struct ftrace_graph_ret *graph_ret; | ||
1982 | struct ftrace_graph_ent *call; | ||
1983 | unsigned long long duration; | ||
1984 | + int cpu = iter->cpu; | ||
1985 | int i; | ||
1986 | |||
1987 | graph_ret = &ret_entry->ret; | ||
1988 | @@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | ||
1989 | |||
1990 | if (data) { | ||
1991 | struct fgraph_cpu_data *cpu_data; | ||
1992 | - int cpu = iter->cpu; | ||
1993 | |||
1994 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); | ||
1995 | |||
1996 | @@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, | ||
1997 | |||
1998 | trace_seq_printf(s, "%ps();\n", (void *)call->func); | ||
1999 | |||
2000 | + print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, | ||
2001 | + cpu, iter->ent->pid, flags); | ||
2002 | + | ||
2003 | return trace_handle_return(s); | ||
2004 | } | ||
2005 | |||
2006 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c | ||
2007 | index b1f841a9edd4..dfd2947e046e 100644 | ||
2008 | --- a/mm/hugetlb.c | ||
2009 | +++ b/mm/hugetlb.c | ||
2010 | @@ -2159,6 +2159,7 @@ static void __init gather_bootmem_prealloc(void) | ||
2011 | */ | ||
2012 | if (hstate_is_gigantic(h)) | ||
2013 | adjust_managed_page_count(page, 1 << h->order); | ||
2014 | + cond_resched(); | ||
2015 | } | ||
2016 | } | ||
2017 | |||
2018 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c | ||
2019 | index 1cd3b3569af8..345e69d88b37 100644 | ||
2020 | --- a/mm/memory-failure.c | ||
2021 | +++ b/mm/memory-failure.c | ||
2022 | @@ -508,6 +508,7 @@ static const char * const action_page_types[] = { | ||
2023 | [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned", | ||
2024 | [MF_MSG_HUGE] = "huge page", | ||
2025 | [MF_MSG_FREE_HUGE] = "free huge page", | ||
2026 | + [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page", | ||
2027 | [MF_MSG_UNMAP_FAILED] = "unmapping failed page", | ||
2028 | [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", | ||
2029 | [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", | ||
2030 | @@ -1090,6 +1091,21 @@ static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags) | ||
2031 | return 0; | ||
2032 | } | ||
2033 | |||
2034 | + /* | ||
2035 | + * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so | ||
2036 | + * simply disable it. In order to make it work properly, we need | ||
2037 | + * make sure that: | ||
2038 | + * - conversion of a pud that maps an error hugetlb into hwpoison | ||
2039 | + * entry properly works, and | ||
2040 | + * - other mm code walking over page table is aware of pud-aligned | ||
2041 | + * hwpoison entries. | ||
2042 | + */ | ||
2043 | + if (huge_page_size(page_hstate(head)) > PMD_SIZE) { | ||
2044 | + action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED); | ||
2045 | + res = -EBUSY; | ||
2046 | + goto out; | ||
2047 | + } | ||
2048 | + | ||
2049 | if (!hwpoison_user_mappings(p, pfn, trapno, flags, &head)) { | ||
2050 | action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); | ||
2051 | res = -EBUSY; | ||
2052 | diff --git a/mm/vmstat.c b/mm/vmstat.c | ||
2053 | index e085b13c572e..4bb13e72ac97 100644 | ||
2054 | --- a/mm/vmstat.c | ||
2055 | +++ b/mm/vmstat.c | ||
2056 | @@ -1770,11 +1770,9 @@ static void vmstat_update(struct work_struct *w) | ||
2057 | * to occur in the future. Keep on running the | ||
2058 | * update worker thread. | ||
2059 | */ | ||
2060 | - preempt_disable(); | ||
2061 | queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, | ||
2062 | this_cpu_ptr(&vmstat_work), | ||
2063 | round_jiffies_relative(sysctl_stat_interval)); | ||
2064 | - preempt_enable(); | ||
2065 | } | ||
2066 | } | ||
2067 | |||
2068 | diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c | ||
2069 | index 8bb152a7cca4..276324abfa60 100644 | ||
2070 | --- a/net/netfilter/nf_log.c | ||
2071 | +++ b/net/netfilter/nf_log.c | ||
2072 | @@ -458,14 +458,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, | ||
2073 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); | ||
2074 | mutex_unlock(&nf_log_mutex); | ||
2075 | } else { | ||
2076 | + struct ctl_table tmp = *table; | ||
2077 | + | ||
2078 | + tmp.data = buf; | ||
2079 | mutex_lock(&nf_log_mutex); | ||
2080 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); | ||
2081 | if (!logger) | ||
2082 | - table->data = "NONE"; | ||
2083 | + strlcpy(buf, "NONE", sizeof(buf)); | ||
2084 | else | ||
2085 | - table->data = logger->name; | ||
2086 | - r = proc_dostring(table, write, buffer, lenp, ppos); | ||
2087 | + strlcpy(buf, logger->name, sizeof(buf)); | ||
2088 | mutex_unlock(&nf_log_mutex); | ||
2089 | + r = proc_dostring(&tmp, write, buffer, lenp, ppos); | ||
2090 | } | ||
2091 | |||
2092 | return r; | ||
2093 | diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include | ||
2094 | index 97769465de13..fcbbecf92395 100644 | ||
2095 | --- a/scripts/Kbuild.include | ||
2096 | +++ b/scripts/Kbuild.include | ||
2097 | @@ -8,6 +8,7 @@ squote := ' | ||
2098 | empty := | ||
2099 | space := $(empty) $(empty) | ||
2100 | space_escape := _-_SPACE_-_ | ||
2101 | +pound := \# | ||
2102 | |||
2103 | ### | ||
2104 | # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o | ||
2105 | @@ -251,11 +252,11 @@ endif | ||
2106 | |||
2107 | # Replace >$< with >$$< to preserve $ when reloading the .cmd file | ||
2108 | # (needed for make) | ||
2109 | -# Replace >#< with >\#< to avoid starting a comment in the .cmd file | ||
2110 | +# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file | ||
2111 | # (needed for make) | ||
2112 | # Replace >'< with >'\''< to be able to enclose the whole string in '...' | ||
2113 | # (needed for the shell) | ||
2114 | -make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) | ||
2115 | +make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1))))) | ||
2116 | |||
2117 | # Find any prerequisites that is newer than target or that does not exist. | ||
2118 | # PHONY targets skipped in both cases. | ||
2119 | diff --git a/tools/build/Build.include b/tools/build/Build.include | ||
2120 | index 418871d02ebf..a4bbb984941d 100644 | ||
2121 | --- a/tools/build/Build.include | ||
2122 | +++ b/tools/build/Build.include | ||
2123 | @@ -12,6 +12,7 @@ | ||
2124 | # Convenient variables | ||
2125 | comma := , | ||
2126 | squote := ' | ||
2127 | +pound := \# | ||
2128 | |||
2129 | ### | ||
2130 | # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o | ||
2131 | @@ -43,11 +44,11 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\ | ||
2132 | ### | ||
2133 | # Replace >$< with >$$< to preserve $ when reloading the .cmd file | ||
2134 | # (needed for make) | ||
2135 | -# Replace >#< with >\#< to avoid starting a comment in the .cmd file | ||
2136 | +# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file | ||
2137 | # (needed for make) | ||
2138 | # Replace >'< with >'\''< to be able to enclose the whole string in '...' | ||
2139 | # (needed for the shell) | ||
2140 | -make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) | ||
2141 | +make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1))))) | ||
2142 | |||
2143 | ### | ||
2144 | # Find any prerequisites that is newer than target or that does not exist. | ||
2145 | diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile | ||
2146 | index e6acc281dd37..8ae824dbfca3 100644 | ||
2147 | --- a/tools/objtool/Makefile | ||
2148 | +++ b/tools/objtool/Makefile | ||
2149 | @@ -35,7 +35,7 @@ CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) | ||
2150 | LDFLAGS += -lelf $(LIBSUBCMD) | ||
2151 | |||
2152 | # Allow old libelf to be used: | ||
2153 | -elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) | ||
2154 | +elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) | ||
2155 | CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED) | ||
2156 | |||
2157 | AWK = awk | ||
2158 | diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include | ||
2159 | index 654efd9768fd..5f3f1f44ed0a 100644 | ||
2160 | --- a/tools/scripts/Makefile.include | ||
2161 | +++ b/tools/scripts/Makefile.include | ||
2162 | @@ -101,3 +101,5 @@ ifneq ($(silent),1) | ||
2163 | QUIET_INSTALL = @printf ' INSTALL %s\n' $1; | ||
2164 | endif | ||
2165 | endif | ||
2166 | + | ||
2167 | +pound := \# |