Contents of /trunk/kernel-magellan/patches-4.17/0105-4.17.6-all-fixes.patch
Parent Directory | Revision Log
Revision 3154 -
(show annotations)
(download)
Thu Jul 12 09:16:58 2018 UTC (6 years, 2 months ago) by niro
File size: 78553 byte(s)
Thu Jul 12 09:16:58 2018 UTC (6 years, 2 months ago) by niro
File size: 78553 byte(s)
-linux-4.17.6
1 | diff --git a/Makefile b/Makefile |
2 | index e4ddbad49636..1a885c8f82ef 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 17 |
9 | -SUBLEVEL = 5 |
10 | +SUBLEVEL = 6 |
11 | EXTRAVERSION = |
12 | NAME = Merciless Moray |
13 | |
14 | diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi |
15 | index ca294914bbb1..4b6062b631b1 100644 |
16 | --- a/arch/arm/boot/dts/am3517.dtsi |
17 | +++ b/arch/arm/boot/dts/am3517.dtsi |
18 | @@ -39,6 +39,8 @@ |
19 | ti,davinci-ctrl-ram-size = <0x2000>; |
20 | ti,davinci-rmii-en = /bits/ 8 <1>; |
21 | local-mac-address = [ 00 00 00 00 00 00 ]; |
22 | + clocks = <&emac_ick>; |
23 | + clock-names = "ick"; |
24 | }; |
25 | |
26 | davinci_mdio: ethernet@5c030000 { |
27 | @@ -49,6 +51,8 @@ |
28 | bus_freq = <1000000>; |
29 | #address-cells = <1>; |
30 | #size-cells = <0>; |
31 | + clocks = <&emac_fck>; |
32 | + clock-names = "fck"; |
33 | }; |
34 | |
35 | uart4: serial@4809e000 { |
36 | diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi |
37 | index f4ddd86f2c77..9cace9f3dd15 100644 |
38 | --- a/arch/arm/boot/dts/dra7.dtsi |
39 | +++ b/arch/arm/boot/dts/dra7.dtsi |
40 | @@ -1582,7 +1582,6 @@ |
41 | dr_mode = "otg"; |
42 | snps,dis_u3_susphy_quirk; |
43 | snps,dis_u2_susphy_quirk; |
44 | - snps,dis_metastability_quirk; |
45 | }; |
46 | }; |
47 | |
48 | @@ -1610,6 +1609,7 @@ |
49 | dr_mode = "otg"; |
50 | snps,dis_u3_susphy_quirk; |
51 | snps,dis_u2_susphy_quirk; |
52 | + snps,dis_metastability_quirk; |
53 | }; |
54 | }; |
55 | |
56 | diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts |
57 | index 6464f2560e06..0662217751dc 100644 |
58 | --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts |
59 | +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts |
60 | @@ -768,7 +768,7 @@ |
61 | |
62 | pinctrl_ts: tsgrp { |
63 | fsl,pins = < |
64 | - MX51_PAD_CSI1_D8__GPIO3_12 0x85 |
65 | + MX51_PAD_CSI1_D8__GPIO3_12 0x04 |
66 | MX51_PAD_CSI1_D9__GPIO3_13 0x85 |
67 | >; |
68 | }; |
69 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S |
70 | index f03402efab4b..3891805bfcdd 100644 |
71 | --- a/arch/s390/kernel/entry.S |
72 | +++ b/arch/s390/kernel/entry.S |
73 | @@ -1265,7 +1265,7 @@ cleanup_critical: |
74 | jl 0f |
75 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end |
76 | jl .Lcleanup_load_fpu_regs |
77 | -0: BR_EX %r14 |
78 | +0: BR_EX %r14,%r11 |
79 | |
80 | .align 8 |
81 | .Lcleanup_table: |
82 | @@ -1301,7 +1301,7 @@ cleanup_critical: |
83 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
84 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
85 | larl %r9,sie_exit # skip forward to sie_exit |
86 | - BR_EX %r14 |
87 | + BR_EX %r14,%r11 |
88 | #endif |
89 | |
90 | .Lcleanup_system_call: |
91 | diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c |
92 | index 5a64ddaed8a3..e47430272692 100644 |
93 | --- a/drivers/acpi/acpica/uterror.c |
94 | +++ b/drivers/acpi/acpica/uterror.c |
95 | @@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, |
96 | switch (lookup_status) { |
97 | case AE_ALREADY_EXISTS: |
98 | |
99 | - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); |
100 | + acpi_os_printf(ACPI_MSG_BIOS_ERROR); |
101 | message = "Failure creating"; |
102 | break; |
103 | |
104 | case AE_NOT_FOUND: |
105 | |
106 | - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); |
107 | + acpi_os_printf(ACPI_MSG_BIOS_ERROR); |
108 | message = "Could not resolve"; |
109 | break; |
110 | |
111 | default: |
112 | |
113 | - acpi_os_printf("\n" ACPI_MSG_ERROR); |
114 | + acpi_os_printf(ACPI_MSG_ERROR); |
115 | message = "Failure resolving"; |
116 | break; |
117 | } |
118 | diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c |
119 | index bdb24d636d9a..4cc7bfec76ff 100644 |
120 | --- a/drivers/acpi/battery.c |
121 | +++ b/drivers/acpi/battery.c |
122 | @@ -709,10 +709,11 @@ void battery_hook_register(struct acpi_battery_hook *hook) |
123 | */ |
124 | pr_err("extension failed to load: %s", hook->name); |
125 | __battery_hook_unregister(hook, 0); |
126 | - return; |
127 | + goto end; |
128 | } |
129 | } |
130 | pr_info("new extension: %s\n", hook->name); |
131 | +end: |
132 | mutex_unlock(&hook_mutex); |
133 | } |
134 | EXPORT_SYMBOL_GPL(battery_hook_register); |
135 | @@ -724,7 +725,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register); |
136 | */ |
137 | static void battery_hook_add_battery(struct acpi_battery *battery) |
138 | { |
139 | - struct acpi_battery_hook *hook_node; |
140 | + struct acpi_battery_hook *hook_node, *tmp; |
141 | |
142 | mutex_lock(&hook_mutex); |
143 | INIT_LIST_HEAD(&battery->list); |
144 | @@ -736,15 +737,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery) |
145 | * when a battery gets hotplugged or initialized |
146 | * during the battery module initialization. |
147 | */ |
148 | - list_for_each_entry(hook_node, &battery_hook_list, list) { |
149 | + list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) { |
150 | if (hook_node->add_battery(battery->bat)) { |
151 | /* |
152 | * The notification of the extensions has failed, to |
153 | * prevent further errors we will unload the extension. |
154 | */ |
155 | - __battery_hook_unregister(hook_node, 0); |
156 | pr_err("error in extension, unloading: %s", |
157 | hook_node->name); |
158 | + __battery_hook_unregister(hook_node, 0); |
159 | } |
160 | } |
161 | mutex_unlock(&hook_mutex); |
162 | diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c |
163 | index 1476cb3439f4..5e793dd7adfb 100644 |
164 | --- a/drivers/block/drbd/drbd_worker.c |
165 | +++ b/drivers/block/drbd/drbd_worker.c |
166 | @@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) |
167 | what = COMPLETED_OK; |
168 | } |
169 | |
170 | - bio_put(req->private_bio); |
171 | req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); |
172 | + bio_put(bio); |
173 | |
174 | /* not req_mod(), we need irqsave here! */ |
175 | spin_lock_irqsave(&device->resource->req_lock, flags); |
176 | diff --git a/drivers/dax/super.c b/drivers/dax/super.c |
177 | index 2b2332b605e4..1d2de641cabb 100644 |
178 | --- a/drivers/dax/super.c |
179 | +++ b/drivers/dax/super.c |
180 | @@ -74,42 +74,50 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); |
181 | |
182 | /** |
183 | * __bdev_dax_supported() - Check if the device supports dax for filesystem |
184 | - * @sb: The superblock of the device |
185 | + * @bdev: block device to check |
186 | * @blocksize: The block size of the device |
187 | * |
188 | * This is a library function for filesystems to check if the block device |
189 | * can be mounted with dax option. |
190 | * |
191 | - * Return: negative errno if unsupported, 0 if supported. |
192 | + * Return: true if supported, false if unsupported |
193 | */ |
194 | -int __bdev_dax_supported(struct super_block *sb, int blocksize) |
195 | +bool __bdev_dax_supported(struct block_device *bdev, int blocksize) |
196 | { |
197 | - struct block_device *bdev = sb->s_bdev; |
198 | struct dax_device *dax_dev; |
199 | + struct request_queue *q; |
200 | pgoff_t pgoff; |
201 | int err, id; |
202 | void *kaddr; |
203 | pfn_t pfn; |
204 | long len; |
205 | + char buf[BDEVNAME_SIZE]; |
206 | |
207 | if (blocksize != PAGE_SIZE) { |
208 | - pr_debug("VFS (%s): error: unsupported blocksize for dax\n", |
209 | - sb->s_id); |
210 | - return -EINVAL; |
211 | + pr_debug("%s: error: unsupported blocksize for dax\n", |
212 | + bdevname(bdev, buf)); |
213 | + return false; |
214 | + } |
215 | + |
216 | + q = bdev_get_queue(bdev); |
217 | + if (!q || !blk_queue_dax(q)) { |
218 | + pr_debug("%s: error: request queue doesn't support dax\n", |
219 | + bdevname(bdev, buf)); |
220 | + return false; |
221 | } |
222 | |
223 | err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); |
224 | if (err) { |
225 | - pr_debug("VFS (%s): error: unaligned partition for dax\n", |
226 | - sb->s_id); |
227 | - return err; |
228 | + pr_debug("%s: error: unaligned partition for dax\n", |
229 | + bdevname(bdev, buf)); |
230 | + return false; |
231 | } |
232 | |
233 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); |
234 | if (!dax_dev) { |
235 | - pr_debug("VFS (%s): error: device does not support dax\n", |
236 | - sb->s_id); |
237 | - return -EOPNOTSUPP; |
238 | + pr_debug("%s: error: device does not support dax\n", |
239 | + bdevname(bdev, buf)); |
240 | + return false; |
241 | } |
242 | |
243 | id = dax_read_lock(); |
244 | @@ -119,9 +127,9 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) |
245 | put_dax(dax_dev); |
246 | |
247 | if (len < 1) { |
248 | - pr_debug("VFS (%s): error: dax access failed (%ld)\n", |
249 | - sb->s_id, len); |
250 | - return len < 0 ? len : -EIO; |
251 | + pr_debug("%s: error: dax access failed (%ld)\n", |
252 | + bdevname(bdev, buf), len); |
253 | + return false; |
254 | } |
255 | |
256 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { |
257 | @@ -137,12 +145,12 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) |
258 | } else if (pfn_t_devmap(pfn)) { |
259 | /* pass */; |
260 | } else { |
261 | - pr_debug("VFS (%s): error: dax support not enabled\n", |
262 | - sb->s_id); |
263 | - return -EOPNOTSUPP; |
264 | + pr_debug("%s: error: dax support not enabled\n", |
265 | + bdevname(bdev, buf)); |
266 | + return false; |
267 | } |
268 | |
269 | - return 0; |
270 | + return true; |
271 | } |
272 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); |
273 | #endif |
274 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
275 | index c8b605f3dc05..06401f0cde6d 100644 |
276 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
277 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
278 | @@ -188,6 +188,7 @@ struct amdgpu_job; |
279 | struct amdgpu_irq_src; |
280 | struct amdgpu_fpriv; |
281 | struct amdgpu_bo_va_mapping; |
282 | +struct amdgpu_atif; |
283 | |
284 | enum amdgpu_cp_irq { |
285 | AMDGPU_CP_IRQ_GFX_EOP = 0, |
286 | @@ -1246,43 +1247,6 @@ struct amdgpu_vram_scratch { |
287 | /* |
288 | * ACPI |
289 | */ |
290 | -struct amdgpu_atif_notification_cfg { |
291 | - bool enabled; |
292 | - int command_code; |
293 | -}; |
294 | - |
295 | -struct amdgpu_atif_notifications { |
296 | - bool display_switch; |
297 | - bool expansion_mode_change; |
298 | - bool thermal_state; |
299 | - bool forced_power_state; |
300 | - bool system_power_state; |
301 | - bool display_conf_change; |
302 | - bool px_gfx_switch; |
303 | - bool brightness_change; |
304 | - bool dgpu_display_event; |
305 | -}; |
306 | - |
307 | -struct amdgpu_atif_functions { |
308 | - bool system_params; |
309 | - bool sbios_requests; |
310 | - bool select_active_disp; |
311 | - bool lid_state; |
312 | - bool get_tv_standard; |
313 | - bool set_tv_standard; |
314 | - bool get_panel_expansion_mode; |
315 | - bool set_panel_expansion_mode; |
316 | - bool temperature_change; |
317 | - bool graphics_device_types; |
318 | -}; |
319 | - |
320 | -struct amdgpu_atif { |
321 | - struct amdgpu_atif_notifications notifications; |
322 | - struct amdgpu_atif_functions functions; |
323 | - struct amdgpu_atif_notification_cfg notification_cfg; |
324 | - struct amdgpu_encoder *encoder_for_bl; |
325 | -}; |
326 | - |
327 | struct amdgpu_atcs_functions { |
328 | bool get_ext_state; |
329 | bool pcie_perf_req; |
330 | @@ -1430,7 +1394,7 @@ struct amdgpu_device { |
331 | #if defined(CONFIG_DEBUG_FS) |
332 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
333 | #endif |
334 | - struct amdgpu_atif atif; |
335 | + struct amdgpu_atif *atif; |
336 | struct amdgpu_atcs atcs; |
337 | struct mutex srbm_mutex; |
338 | /* GRBM index mutex. Protects concurrent access to GRBM index */ |
339 | @@ -1855,6 +1819,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; |
340 | static inline bool amdgpu_has_atpx(void) { return false; } |
341 | #endif |
342 | |
343 | +#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) |
344 | +void *amdgpu_atpx_get_dhandle(void); |
345 | +#else |
346 | +static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } |
347 | +#endif |
348 | + |
349 | /* |
350 | * KMS |
351 | */ |
352 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
353 | index 8fa850a070e0..0d8c3fc6eace 100644 |
354 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
355 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
356 | @@ -34,6 +34,45 @@ |
357 | #include "amd_acpi.h" |
358 | #include "atom.h" |
359 | |
360 | +struct amdgpu_atif_notification_cfg { |
361 | + bool enabled; |
362 | + int command_code; |
363 | +}; |
364 | + |
365 | +struct amdgpu_atif_notifications { |
366 | + bool display_switch; |
367 | + bool expansion_mode_change; |
368 | + bool thermal_state; |
369 | + bool forced_power_state; |
370 | + bool system_power_state; |
371 | + bool display_conf_change; |
372 | + bool px_gfx_switch; |
373 | + bool brightness_change; |
374 | + bool dgpu_display_event; |
375 | +}; |
376 | + |
377 | +struct amdgpu_atif_functions { |
378 | + bool system_params; |
379 | + bool sbios_requests; |
380 | + bool select_active_disp; |
381 | + bool lid_state; |
382 | + bool get_tv_standard; |
383 | + bool set_tv_standard; |
384 | + bool get_panel_expansion_mode; |
385 | + bool set_panel_expansion_mode; |
386 | + bool temperature_change; |
387 | + bool graphics_device_types; |
388 | +}; |
389 | + |
390 | +struct amdgpu_atif { |
391 | + acpi_handle handle; |
392 | + |
393 | + struct amdgpu_atif_notifications notifications; |
394 | + struct amdgpu_atif_functions functions; |
395 | + struct amdgpu_atif_notification_cfg notification_cfg; |
396 | + struct amdgpu_encoder *encoder_for_bl; |
397 | +}; |
398 | + |
399 | /* Call the ATIF method |
400 | */ |
401 | /** |
402 | @@ -46,8 +85,9 @@ |
403 | * Executes the requested ATIF function (all asics). |
404 | * Returns a pointer to the acpi output buffer. |
405 | */ |
406 | -static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, |
407 | - struct acpi_buffer *params) |
408 | +static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, |
409 | + int function, |
410 | + struct acpi_buffer *params) |
411 | { |
412 | acpi_status status; |
413 | union acpi_object atif_arg_elements[2]; |
414 | @@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, |
415 | atif_arg_elements[1].integer.value = 0; |
416 | } |
417 | |
418 | - status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); |
419 | + status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, |
420 | + &buffer); |
421 | |
422 | /* Fail only if calling the method fails and ATIF is supported */ |
423 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { |
424 | @@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas |
425 | * (all asics). |
426 | * returns 0 on success, error on failure. |
427 | */ |
428 | -static int amdgpu_atif_verify_interface(acpi_handle handle, |
429 | - struct amdgpu_atif *atif) |
430 | +static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif) |
431 | { |
432 | union acpi_object *info; |
433 | struct atif_verify_interface output; |
434 | size_t size; |
435 | int err = 0; |
436 | |
437 | - info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); |
438 | + info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); |
439 | if (!info) |
440 | return -EIO; |
441 | |
442 | @@ -176,6 +216,35 @@ static int amdgpu_atif_verify_interface(acpi_handle handle, |
443 | return err; |
444 | } |
445 | |
446 | +static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) |
447 | +{ |
448 | + acpi_handle handle = NULL; |
449 | + char acpi_method_name[255] = { 0 }; |
450 | + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; |
451 | + acpi_status status; |
452 | + |
453 | + /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only |
454 | + * systems, ATIF is in the dGPU's namespace. |
455 | + */ |
456 | + status = acpi_get_handle(dhandle, "ATIF", &handle); |
457 | + if (ACPI_SUCCESS(status)) |
458 | + goto out; |
459 | + |
460 | + if (amdgpu_has_atpx()) { |
461 | + status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", |
462 | + &handle); |
463 | + if (ACPI_SUCCESS(status)) |
464 | + goto out; |
465 | + } |
466 | + |
467 | + DRM_DEBUG_DRIVER("No ATIF handle found\n"); |
468 | + return NULL; |
469 | +out: |
470 | + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
471 | + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); |
472 | + return handle; |
473 | +} |
474 | + |
475 | /** |
476 | * amdgpu_atif_get_notification_params - determine notify configuration |
477 | * |
478 | @@ -188,15 +257,16 @@ static int amdgpu_atif_verify_interface(acpi_handle handle, |
479 | * where n is specified in the result if a notifier is used. |
480 | * Returns 0 on success, error on failure. |
481 | */ |
482 | -static int amdgpu_atif_get_notification_params(acpi_handle handle, |
483 | - struct amdgpu_atif_notification_cfg *n) |
484 | +static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif) |
485 | { |
486 | union acpi_object *info; |
487 | + struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; |
488 | struct atif_system_params params; |
489 | size_t size; |
490 | int err = 0; |
491 | |
492 | - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); |
493 | + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, |
494 | + NULL); |
495 | if (!info) { |
496 | err = -EIO; |
497 | goto out; |
498 | @@ -250,14 +320,15 @@ static int amdgpu_atif_get_notification_params(acpi_handle handle, |
499 | * (all asics). |
500 | * Returns 0 on success, error on failure. |
501 | */ |
502 | -static int amdgpu_atif_get_sbios_requests(acpi_handle handle, |
503 | - struct atif_sbios_requests *req) |
504 | +static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, |
505 | + struct atif_sbios_requests *req) |
506 | { |
507 | union acpi_object *info; |
508 | size_t size; |
509 | int count = 0; |
510 | |
511 | - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); |
512 | + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, |
513 | + NULL); |
514 | if (!info) |
515 | return -EIO; |
516 | |
517 | @@ -290,11 +361,10 @@ static int amdgpu_atif_get_sbios_requests(acpi_handle handle, |
518 | * Returns NOTIFY code |
519 | */ |
520 | static int amdgpu_atif_handler(struct amdgpu_device *adev, |
521 | - struct acpi_bus_event *event) |
522 | + struct acpi_bus_event *event) |
523 | { |
524 | - struct amdgpu_atif *atif = &adev->atif; |
525 | + struct amdgpu_atif *atif = adev->atif; |
526 | struct atif_sbios_requests req; |
527 | - acpi_handle handle; |
528 | int count; |
529 | |
530 | DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", |
531 | @@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, |
532 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
533 | return NOTIFY_DONE; |
534 | |
535 | - if (!atif->notification_cfg.enabled || |
536 | + if (!atif || |
537 | + !atif->notification_cfg.enabled || |
538 | event->type != atif->notification_cfg.command_code) |
539 | /* Not our event */ |
540 | return NOTIFY_DONE; |
541 | |
542 | /* Check pending SBIOS requests */ |
543 | - handle = ACPI_HANDLE(&adev->pdev->dev); |
544 | - count = amdgpu_atif_get_sbios_requests(handle, &req); |
545 | + count = amdgpu_atif_get_sbios_requests(atif, &req); |
546 | |
547 | if (count <= 0) |
548 | return NOTIFY_DONE; |
549 | @@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb, |
550 | */ |
551 | int amdgpu_acpi_init(struct amdgpu_device *adev) |
552 | { |
553 | - acpi_handle handle; |
554 | - struct amdgpu_atif *atif = &adev->atif; |
555 | + acpi_handle handle, atif_handle; |
556 | + struct amdgpu_atif *atif; |
557 | struct amdgpu_atcs *atcs = &adev->atcs; |
558 | int ret; |
559 | |
560 | @@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) |
561 | DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); |
562 | } |
563 | |
564 | + /* Probe for ATIF, and initialize it if found */ |
565 | + atif_handle = amdgpu_atif_probe_handle(handle); |
566 | + if (!atif_handle) |
567 | + goto out; |
568 | + |
569 | + atif = kzalloc(sizeof(*atif), GFP_KERNEL); |
570 | + if (!atif) { |
571 | + DRM_WARN("Not enough memory to initialize ATIF\n"); |
572 | + goto out; |
573 | + } |
574 | + atif->handle = atif_handle; |
575 | + |
576 | /* Call the ATIF method */ |
577 | - ret = amdgpu_atif_verify_interface(handle, atif); |
578 | + ret = amdgpu_atif_verify_interface(atif); |
579 | if (ret) { |
580 | DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); |
581 | + kfree(atif); |
582 | goto out; |
583 | } |
584 | + adev->atif = atif; |
585 | |
586 | if (atif->notifications.brightness_change) { |
587 | struct drm_encoder *tmp; |
588 | @@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) |
589 | } |
590 | |
591 | if (atif->functions.system_params) { |
592 | - ret = amdgpu_atif_get_notification_params(handle, |
593 | - &atif->notification_cfg); |
594 | + ret = amdgpu_atif_get_notification_params(atif); |
595 | if (ret) { |
596 | DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", |
597 | ret); |
598 | @@ -720,4 +803,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) |
599 | void amdgpu_acpi_fini(struct amdgpu_device *adev) |
600 | { |
601 | unregister_acpi_notifier(&adev->acpi_nb); |
602 | + if (adev->atif) |
603 | + kfree(adev->atif); |
604 | } |
605 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
606 | index 1ae5ae8c45a4..2593b106d970 100644 |
607 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
608 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
609 | @@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) { |
610 | return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; |
611 | } |
612 | |
613 | +#if defined(CONFIG_ACPI) |
614 | +void *amdgpu_atpx_get_dhandle(void) { |
615 | + return amdgpu_atpx_priv.dhandle; |
616 | +} |
617 | +#endif |
618 | + |
619 | /** |
620 | * amdgpu_atpx_call - call an ATPX method |
621 | * |
622 | diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c |
623 | index 8f4672daac7f..52174d017fb4 100644 |
624 | --- a/drivers/gpu/drm/drm_property.c |
625 | +++ b/drivers/gpu/drm/drm_property.c |
626 | @@ -533,7 +533,7 @@ static void drm_property_free_blob(struct kref *kref) |
627 | |
628 | drm_mode_object_unregister(blob->dev, &blob->base); |
629 | |
630 | - kfree(blob); |
631 | + kvfree(blob); |
632 | } |
633 | |
634 | /** |
635 | @@ -560,7 +560,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, |
636 | if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) |
637 | return ERR_PTR(-EINVAL); |
638 | |
639 | - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); |
640 | + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); |
641 | if (!blob) |
642 | return ERR_PTR(-ENOMEM); |
643 | |
644 | @@ -577,7 +577,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, |
645 | ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, |
646 | true, drm_property_free_blob); |
647 | if (ret) { |
648 | - kfree(blob); |
649 | + kvfree(blob); |
650 | return ERR_PTR(-EINVAL); |
651 | } |
652 | |
653 | diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c |
654 | index 2ebdc6d5a76e..d5583190f3e4 100644 |
655 | --- a/drivers/gpu/drm/udl/udl_fb.c |
656 | +++ b/drivers/gpu/drm/udl/udl_fb.c |
657 | @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, |
658 | |
659 | if (cmd > (char *) urb->transfer_buffer) { |
660 | /* Send partial buffer remaining before exiting */ |
661 | - int len = cmd - (char *) urb->transfer_buffer; |
662 | + int len; |
663 | + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) |
664 | + *cmd++ = 0xAF; |
665 | + len = cmd - (char *) urb->transfer_buffer; |
666 | ret = udl_submit_urb(dev, urb, len); |
667 | bytes_sent += len; |
668 | } else |
669 | diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c |
670 | index 0c87b1ac6b68..b992644c17e6 100644 |
671 | --- a/drivers/gpu/drm/udl/udl_transfer.c |
672 | +++ b/drivers/gpu/drm/udl/udl_transfer.c |
673 | @@ -153,11 +153,11 @@ static void udl_compress_hline16( |
674 | raw_pixels_count_byte = cmd++; /* we'll know this later */ |
675 | raw_pixel_start = pixel; |
676 | |
677 | - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, |
678 | - min((int)(pixel_end - pixel) / bpp, |
679 | - (int)(cmd_buffer_end - cmd) / 2))) * bpp; |
680 | + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, |
681 | + (unsigned long)(pixel_end - pixel) / bpp, |
682 | + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; |
683 | |
684 | - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); |
685 | + prefetch_range((void *) pixel, cmd_pixel_end - pixel); |
686 | pixel_val16 = get_pixel_val16(pixel, bpp); |
687 | |
688 | while (pixel < cmd_pixel_end) { |
689 | @@ -193,6 +193,9 @@ static void udl_compress_hline16( |
690 | if (pixel > raw_pixel_start) { |
691 | /* finalize last RAW span */ |
692 | *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; |
693 | + } else { |
694 | + /* undo unused byte */ |
695 | + cmd--; |
696 | } |
697 | |
698 | *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; |
699 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
700 | index 5d7cc6bbbac6..c1ce4baeeaca 100644 |
701 | --- a/drivers/hid/hid-core.c |
702 | +++ b/drivers/hid/hid-core.c |
703 | @@ -1942,6 +1942,8 @@ static int hid_device_probe(struct device *dev) |
704 | } |
705 | hdev->io_started = false; |
706 | |
707 | + clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); |
708 | + |
709 | if (!hdev->driver) { |
710 | id = hid_match_device(hdev, hdrv); |
711 | if (id == NULL) { |
712 | @@ -2205,7 +2207,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data) |
713 | struct hid_device *hdev = to_hid_device(dev); |
714 | |
715 | if (hdev->driver == hdrv && |
716 | - !hdrv->match(hdev, hid_ignore_special_drivers)) |
717 | + !hdrv->match(hdev, hid_ignore_special_drivers) && |
718 | + !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) |
719 | return device_reprobe(dev); |
720 | |
721 | return 0; |
722 | diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c |
723 | index 4f4e7a08a07b..4db8e140f709 100644 |
724 | --- a/drivers/hid/hid-debug.c |
725 | +++ b/drivers/hid/hid-debug.c |
726 | @@ -1154,6 +1154,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, |
727 | goto out; |
728 | if (list->tail > list->head) { |
729 | len = list->tail - list->head; |
730 | + if (len > count) |
731 | + len = count; |
732 | |
733 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { |
734 | ret = -EFAULT; |
735 | @@ -1163,6 +1165,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, |
736 | list->head += len; |
737 | } else { |
738 | len = HID_DEBUG_BUFSIZE - list->head; |
739 | + if (len > count) |
740 | + len = count; |
741 | |
742 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { |
743 | ret = -EFAULT; |
744 | @@ -1170,7 +1174,9 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, |
745 | } |
746 | list->head = 0; |
747 | ret += len; |
748 | - goto copy_rest; |
749 | + count -= len; |
750 | + if (count > 0) |
751 | + goto copy_rest; |
752 | } |
753 | |
754 | } |
755 | diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c |
756 | index cc33622253aa..a92377285034 100644 |
757 | --- a/drivers/hid/i2c-hid/i2c-hid.c |
758 | +++ b/drivers/hid/i2c-hid/i2c-hid.c |
759 | @@ -486,7 +486,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) |
760 | return; |
761 | } |
762 | |
763 | - if ((ret_size > size) || (ret_size <= 2)) { |
764 | + if ((ret_size > size) || (ret_size < 2)) { |
765 | dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", |
766 | __func__, size, ret_size); |
767 | return; |
768 | diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c |
769 | index e3ce233f8bdc..23872d08308c 100644 |
770 | --- a/drivers/hid/usbhid/hiddev.c |
771 | +++ b/drivers/hid/usbhid/hiddev.c |
772 | @@ -36,6 +36,7 @@ |
773 | #include <linux/hiddev.h> |
774 | #include <linux/compat.h> |
775 | #include <linux/vmalloc.h> |
776 | +#include <linux/nospec.h> |
777 | #include "usbhid.h" |
778 | |
779 | #ifdef CONFIG_USB_DYNAMIC_MINORS |
780 | @@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, |
781 | |
782 | if (uref->field_index >= report->maxfield) |
783 | goto inval; |
784 | + uref->field_index = array_index_nospec(uref->field_index, |
785 | + report->maxfield); |
786 | |
787 | field = report->field[uref->field_index]; |
788 | if (uref->usage_index >= field->maxusage) |
789 | goto inval; |
790 | + uref->usage_index = array_index_nospec(uref->usage_index, |
791 | + field->maxusage); |
792 | |
793 | uref->usage_code = field->usage[uref->usage_index].hid; |
794 | |
795 | @@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, |
796 | |
797 | if (uref->field_index >= report->maxfield) |
798 | goto inval; |
799 | + uref->field_index = array_index_nospec(uref->field_index, |
800 | + report->maxfield); |
801 | |
802 | field = report->field[uref->field_index]; |
803 | |
804 | @@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
805 | |
806 | if (finfo.field_index >= report->maxfield) |
807 | break; |
808 | + finfo.field_index = array_index_nospec(finfo.field_index, |
809 | + report->maxfield); |
810 | |
811 | field = report->field[finfo.field_index]; |
812 | memset(&finfo, 0, sizeof(finfo)); |
813 | @@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
814 | |
815 | if (cinfo.index >= hid->maxcollection) |
816 | break; |
817 | + cinfo.index = array_index_nospec(cinfo.index, |
818 | + hid->maxcollection); |
819 | |
820 | cinfo.type = hid->collection[cinfo.index].type; |
821 | cinfo.usage = hid->collection[cinfo.index].usage; |
822 | diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c |
823 | index b5aec33002c3..51970bae3c4a 100644 |
824 | --- a/drivers/i2c/i2c-core-smbus.c |
825 | +++ b/drivers/i2c/i2c-core-smbus.c |
826 | @@ -465,13 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, |
827 | |
828 | status = i2c_transfer(adapter, msg, num); |
829 | if (status < 0) |
830 | - return status; |
831 | + goto cleanup; |
832 | + if (status != num) { |
833 | + status = -EIO; |
834 | + goto cleanup; |
835 | + } |
836 | + status = 0; |
837 | |
838 | /* Check PEC if last message is a read */ |
839 | if (i && (msg[num-1].flags & I2C_M_RD)) { |
840 | status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); |
841 | if (status < 0) |
842 | - return status; |
843 | + goto cleanup; |
844 | } |
845 | |
846 | if (read_write == I2C_SMBUS_READ) |
847 | @@ -497,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, |
848 | break; |
849 | } |
850 | |
851 | +cleanup: |
852 | if (msg[0].flags & I2C_M_DMA_SAFE) |
853 | kfree(msg[0].buf); |
854 | if (msg[1].flags & I2C_M_DMA_SAFE) |
855 | kfree(msg[1].buf); |
856 | |
857 | - return 0; |
858 | + return status; |
859 | } |
860 | |
861 | /** |
862 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
863 | index 0589a4da12bb..7c8e5878446a 100644 |
864 | --- a/drivers/md/dm-table.c |
865 | +++ b/drivers/md/dm-table.c |
866 | @@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); |
867 | static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, |
868 | sector_t start, sector_t len, void *data) |
869 | { |
870 | - struct request_queue *q = bdev_get_queue(dev->bdev); |
871 | - |
872 | - return q && blk_queue_dax(q); |
873 | + return bdev_dax_supported(dev->bdev, PAGE_SIZE); |
874 | } |
875 | |
876 | static bool dm_table_supports_dax(struct dm_table *t) |
877 | @@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
878 | |
879 | if (dm_table_supports_dax(t)) |
880 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
881 | + else |
882 | + blk_queue_flag_clear(QUEUE_FLAG_DAX, q); |
883 | + |
884 | if (dm_table_supports_dax_write_cache(t)) |
885 | dax_write_cache(t->md->dax_dev, true); |
886 | |
887 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
888 | index cabae3e280c2..78173e137176 100644 |
889 | --- a/drivers/md/dm.c |
890 | +++ b/drivers/md/dm.c |
891 | @@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, |
892 | if (len < 1) |
893 | goto out; |
894 | nr_pages = min(len, nr_pages); |
895 | - if (ti->type->direct_access) |
896 | - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); |
897 | + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); |
898 | |
899 | out: |
900 | dm_put_live_table(md, srcu_idx); |
901 | diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c |
902 | index 3a8a88fa06aa..a863ae4e8538 100644 |
903 | --- a/drivers/mtd/chips/cfi_cmdset_0002.c |
904 | +++ b/drivers/mtd/chips/cfi_cmdset_0002.c |
905 | @@ -42,7 +42,7 @@ |
906 | #define AMD_BOOTLOC_BUG |
907 | #define FORCE_WORD_WRITE 0 |
908 | |
909 | -#define MAX_WORD_RETRIES 3 |
910 | +#define MAX_RETRIES 3 |
911 | |
912 | #define SST49LF004B 0x0060 |
913 | #define SST49LF040B 0x0050 |
914 | @@ -1647,7 +1647,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, |
915 | map_write( map, CMD(0xF0), chip->start ); |
916 | /* FIXME - should have reset delay before continuing */ |
917 | |
918 | - if (++retry_cnt <= MAX_WORD_RETRIES) |
919 | + if (++retry_cnt <= MAX_RETRIES) |
920 | goto retry; |
921 | |
922 | ret = -EIO; |
923 | @@ -2106,7 +2106,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, |
924 | map_write(map, CMD(0xF0), chip->start); |
925 | /* FIXME - should have reset delay before continuing */ |
926 | |
927 | - if (++retry_cnt <= MAX_WORD_RETRIES) |
928 | + if (++retry_cnt <= MAX_RETRIES) |
929 | goto retry; |
930 | |
931 | ret = -EIO; |
932 | @@ -2241,6 +2241,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) |
933 | unsigned long int adr; |
934 | DECLARE_WAITQUEUE(wait, current); |
935 | int ret = 0; |
936 | + int retry_cnt = 0; |
937 | |
938 | adr = cfi->addr_unlock1; |
939 | |
940 | @@ -2258,6 +2259,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) |
941 | ENABLE_VPP(map); |
942 | xip_disable(map, chip, adr); |
943 | |
944 | + retry: |
945 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
946 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); |
947 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
948 | @@ -2294,12 +2296,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) |
949 | chip->erase_suspended = 0; |
950 | } |
951 | |
952 | - if (chip_ready(map, adr)) |
953 | + if (chip_good(map, adr, map_word_ff(map))) |
954 | break; |
955 | |
956 | if (time_after(jiffies, timeo)) { |
957 | printk(KERN_WARNING "MTD %s(): software timeout\n", |
958 | __func__ ); |
959 | + ret = -EIO; |
960 | break; |
961 | } |
962 | |
963 | @@ -2307,12 +2310,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) |
964 | UDELAY(map, chip, adr, 1000000/HZ); |
965 | } |
966 | /* Did we succeed? */ |
967 | - if (!chip_good(map, adr, map_word_ff(map))) { |
968 | + if (ret) { |
969 | /* reset on all failures. */ |
970 | map_write( map, CMD(0xF0), chip->start ); |
971 | /* FIXME - should have reset delay before continuing */ |
972 | |
973 | - ret = -EIO; |
974 | + if (++retry_cnt <= MAX_RETRIES) { |
975 | + ret = 0; |
976 | + goto retry; |
977 | + } |
978 | } |
979 | |
980 | chip->state = FL_READY; |
981 | @@ -2331,6 +2337,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, |
982 | unsigned long timeo = jiffies + HZ; |
983 | DECLARE_WAITQUEUE(wait, current); |
984 | int ret = 0; |
985 | + int retry_cnt = 0; |
986 | |
987 | adr += chip->start; |
988 | |
989 | @@ -2348,6 +2355,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, |
990 | ENABLE_VPP(map); |
991 | xip_disable(map, chip, adr); |
992 | |
993 | + retry: |
994 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
995 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); |
996 | cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); |
997 | @@ -2384,7 +2392,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, |
998 | chip->erase_suspended = 0; |
999 | } |
1000 | |
1001 | - if (chip_ready(map, adr)) { |
1002 | + if (chip_good(map, adr, map_word_ff(map))) { |
1003 | xip_enable(map, chip, adr); |
1004 | break; |
1005 | } |
1006 | @@ -2393,6 +2401,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, |
1007 | xip_enable(map, chip, adr); |
1008 | printk(KERN_WARNING "MTD %s(): software timeout\n", |
1009 | __func__ ); |
1010 | + ret = -EIO; |
1011 | break; |
1012 | } |
1013 | |
1014 | @@ -2400,12 +2409,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, |
1015 | UDELAY(map, chip, adr, 1000000/HZ); |
1016 | } |
1017 | /* Did we succeed? */ |
1018 | - if (!chip_good(map, adr, map_word_ff(map))) { |
1019 | + if (ret) { |
1020 | /* reset on all failures. */ |
1021 | map_write( map, CMD(0xF0), chip->start ); |
1022 | /* FIXME - should have reset delay before continuing */ |
1023 | |
1024 | - ret = -EIO; |
1025 | + if (++retry_cnt <= MAX_RETRIES) { |
1026 | + ret = 0; |
1027 | + goto retry; |
1028 | + } |
1029 | } |
1030 | |
1031 | chip->state = FL_READY; |
1032 | diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c |
1033 | index 1abdbf267c19..054974055ea4 100644 |
1034 | --- a/drivers/pci/pci-acpi.c |
1035 | +++ b/drivers/pci/pci-acpi.c |
1036 | @@ -598,6 +598,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) |
1037 | { |
1038 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
1039 | |
1040 | + /* |
1041 | + * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over |
1042 | + * system-wide suspend/resume confuses the platform firmware, so avoid |
1043 | + * doing that, unless the bridge has a driver that should take care of |
1044 | + * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint |
1045 | + * devices are expected to be in D3 before invoking the S3 entry path |
1046 | + * from the firmware, so they should not be affected by this issue. |
1047 | + */ |
1048 | + if (pci_is_bridge(dev) && !dev->driver && |
1049 | + acpi_target_system_state() != ACPI_STATE_S0) |
1050 | + return true; |
1051 | + |
1052 | if (!adev || !acpi_device_power_manageable(adev)) |
1053 | return false; |
1054 | |
1055 | diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c |
1056 | index e7961cbd2c55..1d20aad3aa92 100644 |
1057 | --- a/drivers/scsi/aacraid/aachba.c |
1058 | +++ b/drivers/scsi/aacraid/aachba.c |
1059 | @@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) |
1060 | u32 lun_count, nexus; |
1061 | u32 i, bus, target; |
1062 | u8 expose_flag, attribs; |
1063 | - u8 devtype; |
1064 | |
1065 | lun_count = aac_get_safw_phys_lun_count(dev); |
1066 | |
1067 | @@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) |
1068 | continue; |
1069 | |
1070 | if (expose_flag != 0) { |
1071 | - devtype = AAC_DEVTYPE_RAID_MEMBER; |
1072 | - goto update_devtype; |
1073 | + dev->hba_map[bus][target].devtype = |
1074 | + AAC_DEVTYPE_RAID_MEMBER; |
1075 | + continue; |
1076 | } |
1077 | |
1078 | if (nexus != 0 && (attribs & 8)) { |
1079 | - devtype = AAC_DEVTYPE_NATIVE_RAW; |
1080 | + dev->hba_map[bus][target].devtype = |
1081 | + AAC_DEVTYPE_NATIVE_RAW; |
1082 | dev->hba_map[bus][target].rmw_nexus = |
1083 | nexus; |
1084 | } else |
1085 | - devtype = AAC_DEVTYPE_ARC_RAW; |
1086 | + dev->hba_map[bus][target].devtype = |
1087 | + AAC_DEVTYPE_ARC_RAW; |
1088 | |
1089 | dev->hba_map[bus][target].scan_counter = dev->scan_counter; |
1090 | |
1091 | aac_set_safw_target_qd(dev, bus, target); |
1092 | - |
1093 | -update_devtype: |
1094 | - dev->hba_map[bus][target].devtype = devtype; |
1095 | } |
1096 | } |
1097 | |
1098 | diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c |
1099 | index 5c40d809830f..ecc87a53294f 100644 |
1100 | --- a/drivers/scsi/sg.c |
1101 | +++ b/drivers/scsi/sg.c |
1102 | @@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ |
1103 | #include <linux/atomic.h> |
1104 | #include <linux/ratelimit.h> |
1105 | #include <linux/uio.h> |
1106 | +#include <linux/cred.h> /* for sg_check_file_access() */ |
1107 | |
1108 | #include "scsi.h" |
1109 | #include <scsi/scsi_dbg.h> |
1110 | @@ -210,6 +211,33 @@ static void sg_device_destroy(struct kref *kref); |
1111 | sdev_prefix_printk(prefix, (sdp)->device, \ |
1112 | (sdp)->disk->disk_name, fmt, ##a) |
1113 | |
1114 | +/* |
1115 | + * The SCSI interfaces that use read() and write() as an asynchronous variant of |
1116 | + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways |
1117 | + * to trigger read() and write() calls from various contexts with elevated |
1118 | + * privileges. This can lead to kernel memory corruption (e.g. if these |
1119 | + * interfaces are called through splice()) and privilege escalation inside |
1120 | + * userspace (e.g. if a process with access to such a device passes a file |
1121 | + * descriptor to a SUID binary as stdin/stdout/stderr). |
1122 | + * |
1123 | + * This function provides protection for the legacy API by restricting the |
1124 | + * calling context. |
1125 | + */ |
1126 | +static int sg_check_file_access(struct file *filp, const char *caller) |
1127 | +{ |
1128 | + if (filp->f_cred != current_real_cred()) { |
1129 | + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", |
1130 | + caller, task_tgid_vnr(current), current->comm); |
1131 | + return -EPERM; |
1132 | + } |
1133 | + if (uaccess_kernel()) { |
1134 | + pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", |
1135 | + caller, task_tgid_vnr(current), current->comm); |
1136 | + return -EACCES; |
1137 | + } |
1138 | + return 0; |
1139 | +} |
1140 | + |
1141 | static int sg_allow_access(struct file *filp, unsigned char *cmd) |
1142 | { |
1143 | struct sg_fd *sfp = filp->private_data; |
1144 | @@ -394,6 +422,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) |
1145 | struct sg_header *old_hdr = NULL; |
1146 | int retval = 0; |
1147 | |
1148 | + /* |
1149 | + * This could cause a response to be stranded. Close the associated |
1150 | + * file descriptor to free up any resources being held. |
1151 | + */ |
1152 | + retval = sg_check_file_access(filp, __func__); |
1153 | + if (retval) |
1154 | + return retval; |
1155 | + |
1156 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
1157 | return -ENXIO; |
1158 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
1159 | @@ -581,9 +617,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
1160 | struct sg_header old_hdr; |
1161 | sg_io_hdr_t *hp; |
1162 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
1163 | + int retval; |
1164 | |
1165 | - if (unlikely(uaccess_kernel())) |
1166 | - return -EINVAL; |
1167 | + retval = sg_check_file_access(filp, __func__); |
1168 | + if (retval) |
1169 | + return retval; |
1170 | |
1171 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
1172 | return -ENXIO; |
1173 | diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c |
1174 | index ea194aa01a64..257b0daff01f 100644 |
1175 | --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c |
1176 | +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c |
1177 | @@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, |
1178 | /* Make sure D/A update mode is direct update */ |
1179 | outb(0, dev->iobase + DAQP_AUX_REG); |
1180 | |
1181 | - for (i = 0; i > insn->n; i++) { |
1182 | + for (i = 0; i < insn->n; i++) { |
1183 | unsigned int val = data[i]; |
1184 | int ret; |
1185 | |
1186 | diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c |
1187 | index 01ac306131c1..10db5656fd5d 100644 |
1188 | --- a/drivers/target/target_core_pr.c |
1189 | +++ b/drivers/target/target_core_pr.c |
1190 | @@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) |
1191 | * Check for overflow of 8byte PRI READ_KEYS payload and |
1192 | * next reservation key list descriptor. |
1193 | */ |
1194 | - if ((add_len + 8) > (cmd->data_length - 8)) |
1195 | - break; |
1196 | - |
1197 | - put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); |
1198 | - off += 8; |
1199 | + if (off + 8 <= cmd->data_length) { |
1200 | + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); |
1201 | + off += 8; |
1202 | + } |
1203 | + /* |
1204 | + * SPC5r17: 6.16.2 READ KEYS service action |
1205 | + * The ADDITIONAL LENGTH field indicates the number of bytes in |
1206 | + * the Reservation key list. The contents of the ADDITIONAL |
1207 | + * LENGTH field are not altered based on the allocation length |
1208 | + */ |
1209 | add_len += 8; |
1210 | } |
1211 | spin_unlock(&dev->t10_pr.registration_lock); |
1212 | diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c |
1213 | index 3c082451ab1a..0586ad5eb590 100644 |
1214 | --- a/drivers/vfio/vfio_iommu_type1.c |
1215 | +++ b/drivers/vfio/vfio_iommu_type1.c |
1216 | @@ -346,18 +346,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, |
1217 | struct page *page[1]; |
1218 | struct vm_area_struct *vma; |
1219 | struct vm_area_struct *vmas[1]; |
1220 | + unsigned int flags = 0; |
1221 | int ret; |
1222 | |
1223 | + if (prot & IOMMU_WRITE) |
1224 | + flags |= FOLL_WRITE; |
1225 | + |
1226 | + down_read(&mm->mmap_sem); |
1227 | if (mm == current->mm) { |
1228 | - ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), |
1229 | - page, vmas); |
1230 | + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); |
1231 | } else { |
1232 | - unsigned int flags = 0; |
1233 | - |
1234 | - if (prot & IOMMU_WRITE) |
1235 | - flags |= FOLL_WRITE; |
1236 | - |
1237 | - down_read(&mm->mmap_sem); |
1238 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, |
1239 | vmas, NULL); |
1240 | /* |
1241 | @@ -371,8 +369,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, |
1242 | ret = -EOPNOTSUPP; |
1243 | put_page(page[0]); |
1244 | } |
1245 | - up_read(&mm->mmap_sem); |
1246 | } |
1247 | + up_read(&mm->mmap_sem); |
1248 | |
1249 | if (ret == 1) { |
1250 | *pfn = page_to_pfn(page[0]); |
1251 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
1252 | index cb950a5fa078..c7ee09d9a236 100644 |
1253 | --- a/fs/cifs/cifsglob.h |
1254 | +++ b/fs/cifs/cifsglob.h |
1255 | @@ -1362,6 +1362,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server, |
1256 | /* one of these for every pending CIFS request to the server */ |
1257 | struct mid_q_entry { |
1258 | struct list_head qhead; /* mids waiting on reply from this server */ |
1259 | + struct kref refcount; |
1260 | struct TCP_Server_Info *server; /* server corresponding to this mid */ |
1261 | __u64 mid; /* multiplex id */ |
1262 | __u32 pid; /* process id */ |
1263 | diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h |
1264 | index 365a414a75e9..c4e5c69810f9 100644 |
1265 | --- a/fs/cifs/cifsproto.h |
1266 | +++ b/fs/cifs/cifsproto.h |
1267 | @@ -76,6 +76,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, |
1268 | struct TCP_Server_Info *server); |
1269 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); |
1270 | extern void cifs_delete_mid(struct mid_q_entry *mid); |
1271 | +extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); |
1272 | extern void cifs_wake_up_task(struct mid_q_entry *mid); |
1273 | extern int cifs_handle_standard(struct TCP_Server_Info *server, |
1274 | struct mid_q_entry *mid); |
1275 | diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c |
1276 | index 1529a088383d..9540699ce85a 100644 |
1277 | --- a/fs/cifs/cifssmb.c |
1278 | +++ b/fs/cifs/cifssmb.c |
1279 | @@ -151,8 +151,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) |
1280 | * greater than cifs socket timeout which is 7 seconds |
1281 | */ |
1282 | while (server->tcpStatus == CifsNeedReconnect) { |
1283 | - wait_event_interruptible_timeout(server->response_q, |
1284 | - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); |
1285 | + rc = wait_event_interruptible_timeout(server->response_q, |
1286 | + (server->tcpStatus != CifsNeedReconnect), |
1287 | + 10 * HZ); |
1288 | + if (rc < 0) { |
1289 | + cifs_dbg(FYI, "%s: aborting reconnect due to a received" |
1290 | + " signal by the process\n", __func__); |
1291 | + return -ERESTARTSYS; |
1292 | + } |
1293 | |
1294 | /* are we still trying to reconnect? */ |
1295 | if (server->tcpStatus != CifsNeedReconnect) |
1296 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
1297 | index 7a10a5d0731f..5e1c09a3e0ea 100644 |
1298 | --- a/fs/cifs/connect.c |
1299 | +++ b/fs/cifs/connect.c |
1300 | @@ -906,6 +906,7 @@ cifs_demultiplex_thread(void *p) |
1301 | continue; |
1302 | server->total_read += length; |
1303 | |
1304 | + mid_entry = NULL; |
1305 | if (server->ops->is_transform_hdr && |
1306 | server->ops->receive_transform && |
1307 | server->ops->is_transform_hdr(buf)) { |
1308 | @@ -920,8 +921,11 @@ cifs_demultiplex_thread(void *p) |
1309 | length = mid_entry->receive(server, mid_entry); |
1310 | } |
1311 | |
1312 | - if (length < 0) |
1313 | + if (length < 0) { |
1314 | + if (mid_entry) |
1315 | + cifs_mid_q_entry_release(mid_entry); |
1316 | continue; |
1317 | + } |
1318 | |
1319 | if (server->large_buf) |
1320 | buf = server->bigbuf; |
1321 | @@ -938,6 +942,8 @@ cifs_demultiplex_thread(void *p) |
1322 | |
1323 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
1324 | mid_entry->callback(mid_entry); |
1325 | + |
1326 | + cifs_mid_q_entry_release(mid_entry); |
1327 | } else if (server->ops->is_oplock_break && |
1328 | server->ops->is_oplock_break(buf, server)) { |
1329 | cifs_dbg(FYI, "Received oplock break\n"); |
1330 | diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c |
1331 | index aff8ce8ba34d..646dcd149de1 100644 |
1332 | --- a/fs/cifs/smb1ops.c |
1333 | +++ b/fs/cifs/smb1ops.c |
1334 | @@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) |
1335 | if (compare_mid(mid->mid, buf) && |
1336 | mid->mid_state == MID_REQUEST_SUBMITTED && |
1337 | le16_to_cpu(mid->command) == buf->Command) { |
1338 | + kref_get(&mid->refcount); |
1339 | spin_unlock(&GlobalMid_Lock); |
1340 | return mid; |
1341 | } |
1342 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
1343 | index 4ee32488ff74..824ec1742557 100644 |
1344 | --- a/fs/cifs/smb2ops.c |
1345 | +++ b/fs/cifs/smb2ops.c |
1346 | @@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) |
1347 | if ((mid->mid == wire_mid) && |
1348 | (mid->mid_state == MID_REQUEST_SUBMITTED) && |
1349 | (mid->command == shdr->Command)) { |
1350 | + kref_get(&mid->refcount); |
1351 | spin_unlock(&GlobalMid_Lock); |
1352 | return mid; |
1353 | } |
1354 | @@ -654,6 +655,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, |
1355 | |
1356 | rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, |
1357 | len); |
1358 | + kfree(ea); |
1359 | + |
1360 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); |
1361 | |
1362 | return rc; |
1363 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
1364 | index 32d7fd830aae..71013c5268b9 100644 |
1365 | --- a/fs/cifs/smb2pdu.c |
1366 | +++ b/fs/cifs/smb2pdu.c |
1367 | @@ -154,7 +154,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, |
1368 | static int |
1369 | smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
1370 | { |
1371 | - int rc = 0; |
1372 | + int rc; |
1373 | struct nls_table *nls_codepage; |
1374 | struct cifs_ses *ses; |
1375 | struct TCP_Server_Info *server; |
1376 | @@ -165,10 +165,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
1377 | * for those three - in the calling routine. |
1378 | */ |
1379 | if (tcon == NULL) |
1380 | - return rc; |
1381 | + return 0; |
1382 | |
1383 | if (smb2_command == SMB2_TREE_CONNECT) |
1384 | - return rc; |
1385 | + return 0; |
1386 | |
1387 | if (tcon->tidStatus == CifsExiting) { |
1388 | /* |
1389 | @@ -211,8 +211,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
1390 | return -EAGAIN; |
1391 | } |
1392 | |
1393 | - wait_event_interruptible_timeout(server->response_q, |
1394 | - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); |
1395 | + rc = wait_event_interruptible_timeout(server->response_q, |
1396 | + (server->tcpStatus != CifsNeedReconnect), |
1397 | + 10 * HZ); |
1398 | + if (rc < 0) { |
1399 | + cifs_dbg(FYI, "%s: aborting reconnect due to a received" |
1400 | + " signal by the process\n", __func__); |
1401 | + return -ERESTARTSYS; |
1402 | + } |
1403 | |
1404 | /* are we still trying to reconnect? */ |
1405 | if (server->tcpStatus != CifsNeedReconnect) |
1406 | @@ -230,7 +236,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) |
1407 | } |
1408 | |
1409 | if (!tcon->ses->need_reconnect && !tcon->need_reconnect) |
1410 | - return rc; |
1411 | + return 0; |
1412 | |
1413 | nls_codepage = load_nls_default(); |
1414 | |
1415 | @@ -339,7 +345,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, |
1416 | return rc; |
1417 | |
1418 | /* BB eventually switch this to SMB2 specific small buf size */ |
1419 | - *request_buf = cifs_small_buf_get(); |
1420 | + if (smb2_command == SMB2_SET_INFO) |
1421 | + *request_buf = cifs_buf_get(); |
1422 | + else |
1423 | + *request_buf = cifs_small_buf_get(); |
1424 | if (*request_buf == NULL) { |
1425 | /* BB should we add a retry in here if not a writepage? */ |
1426 | return -ENOMEM; |
1427 | @@ -3363,7 +3372,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, |
1428 | |
1429 | rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags, |
1430 | &rsp_iov); |
1431 | - cifs_small_buf_release(req); |
1432 | + cifs_buf_release(req); |
1433 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; |
1434 | |
1435 | if (rc != 0) |
1436 | diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c |
1437 | index 8806f3f76c1d..97f24d82ae6b 100644 |
1438 | --- a/fs/cifs/smb2transport.c |
1439 | +++ b/fs/cifs/smb2transport.c |
1440 | @@ -548,6 +548,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, |
1441 | |
1442 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); |
1443 | memset(temp, 0, sizeof(struct mid_q_entry)); |
1444 | + kref_init(&temp->refcount); |
1445 | temp->mid = le64_to_cpu(shdr->MessageId); |
1446 | temp->pid = current->pid; |
1447 | temp->command = shdr->Command; /* Always LE */ |
1448 | diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c |
1449 | index 927226a2122f..60faf2fcec7f 100644 |
1450 | --- a/fs/cifs/transport.c |
1451 | +++ b/fs/cifs/transport.c |
1452 | @@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) |
1453 | |
1454 | temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); |
1455 | memset(temp, 0, sizeof(struct mid_q_entry)); |
1456 | + kref_init(&temp->refcount); |
1457 | temp->mid = get_mid(smb_buffer); |
1458 | temp->pid = current->pid; |
1459 | temp->command = cpu_to_le16(smb_buffer->Command); |
1460 | @@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) |
1461 | return temp; |
1462 | } |
1463 | |
1464 | +static void _cifs_mid_q_entry_release(struct kref *refcount) |
1465 | +{ |
1466 | + struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, |
1467 | + refcount); |
1468 | + |
1469 | + mempool_free(mid, cifs_mid_poolp); |
1470 | +} |
1471 | + |
1472 | +void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) |
1473 | +{ |
1474 | + spin_lock(&GlobalMid_Lock); |
1475 | + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); |
1476 | + spin_unlock(&GlobalMid_Lock); |
1477 | +} |
1478 | + |
1479 | void |
1480 | DeleteMidQEntry(struct mid_q_entry *midEntry) |
1481 | { |
1482 | @@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) |
1483 | } |
1484 | } |
1485 | #endif |
1486 | - mempool_free(midEntry, cifs_mid_poolp); |
1487 | + cifs_mid_q_entry_release(midEntry); |
1488 | } |
1489 | |
1490 | void |
1491 | diff --git a/fs/ext2/super.c b/fs/ext2/super.c |
1492 | index de1694512f1f..c09289a42dc5 100644 |
1493 | --- a/fs/ext2/super.c |
1494 | +++ b/fs/ext2/super.c |
1495 | @@ -961,8 +961,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) |
1496 | blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); |
1497 | |
1498 | if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { |
1499 | - err = bdev_dax_supported(sb, blocksize); |
1500 | - if (err) { |
1501 | + if (!bdev_dax_supported(sb->s_bdev, blocksize)) { |
1502 | ext2_msg(sb, KERN_ERR, |
1503 | "DAX unsupported by block device. Turning off DAX."); |
1504 | sbi->s_mount_opt &= ~EXT2_MOUNT_DAX; |
1505 | diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c |
1506 | index 508b905d744d..f8b5635f0396 100644 |
1507 | --- a/fs/ext4/balloc.c |
1508 | +++ b/fs/ext4/balloc.c |
1509 | @@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, |
1510 | unsigned int bit, bit_max; |
1511 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1512 | ext4_fsblk_t start, tmp; |
1513 | - int flex_bg = 0; |
1514 | struct ext4_group_info *grp; |
1515 | |
1516 | J_ASSERT_BH(bh, buffer_locked(bh)); |
1517 | @@ -217,22 +216,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, |
1518 | |
1519 | start = ext4_group_first_block_no(sb, block_group); |
1520 | |
1521 | - if (ext4_has_feature_flex_bg(sb)) |
1522 | - flex_bg = 1; |
1523 | - |
1524 | /* Set bits for block and inode bitmaps, and inode table */ |
1525 | tmp = ext4_block_bitmap(sb, gdp); |
1526 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) |
1527 | + if (ext4_block_in_group(sb, tmp, block_group)) |
1528 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
1529 | |
1530 | tmp = ext4_inode_bitmap(sb, gdp); |
1531 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) |
1532 | + if (ext4_block_in_group(sb, tmp, block_group)) |
1533 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
1534 | |
1535 | tmp = ext4_inode_table(sb, gdp); |
1536 | for (; tmp < ext4_inode_table(sb, gdp) + |
1537 | sbi->s_itb_per_group; tmp++) { |
1538 | - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) |
1539 | + if (ext4_block_in_group(sb, tmp, block_group)) |
1540 | ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); |
1541 | } |
1542 | |
1543 | @@ -455,7 +451,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) |
1544 | goto verify; |
1545 | } |
1546 | ext4_lock_group(sb, block_group); |
1547 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
1548 | + if (ext4_has_group_desc_csum(sb) && |
1549 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { |
1550 | + if (block_group == 0) { |
1551 | + ext4_unlock_group(sb, block_group); |
1552 | + unlock_buffer(bh); |
1553 | + ext4_error(sb, "Block bitmap for bg 0 marked " |
1554 | + "uninitialized"); |
1555 | + err = -EFSCORRUPTED; |
1556 | + goto out; |
1557 | + } |
1558 | err = ext4_init_block_bitmap(sb, bh, block_group, desc); |
1559 | set_bitmap_uptodate(bh); |
1560 | set_buffer_uptodate(bh); |
1561 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h |
1562 | index a42e71203e53..51fcfdefc3a6 100644 |
1563 | --- a/fs/ext4/ext4.h |
1564 | +++ b/fs/ext4/ext4.h |
1565 | @@ -1501,11 +1501,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) |
1566 | static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) |
1567 | { |
1568 | return ino == EXT4_ROOT_INO || |
1569 | - ino == EXT4_USR_QUOTA_INO || |
1570 | - ino == EXT4_GRP_QUOTA_INO || |
1571 | - ino == EXT4_BOOT_LOADER_INO || |
1572 | - ino == EXT4_JOURNAL_INO || |
1573 | - ino == EXT4_RESIZE_INO || |
1574 | (ino >= EXT4_FIRST_INO(sb) && |
1575 | ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); |
1576 | } |
1577 | @@ -3005,9 +3000,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode, |
1578 | struct iomap; |
1579 | extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); |
1580 | |
1581 | -extern int ext4_try_to_evict_inline_data(handle_t *handle, |
1582 | - struct inode *inode, |
1583 | - int needed); |
1584 | extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); |
1585 | |
1586 | extern int ext4_convert_inline_data(struct inode *inode); |
1587 | diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h |
1588 | index 98fb0c119c68..adf6668b596f 100644 |
1589 | --- a/fs/ext4/ext4_extents.h |
1590 | +++ b/fs/ext4/ext4_extents.h |
1591 | @@ -91,6 +91,7 @@ struct ext4_extent_header { |
1592 | }; |
1593 | |
1594 | #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) |
1595 | +#define EXT4_MAX_EXTENT_DEPTH 5 |
1596 | |
1597 | #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ |
1598 | (sizeof(struct ext4_extent_header) + \ |
1599 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
1600 | index c969275ce3ee..08226f72b7ee 100644 |
1601 | --- a/fs/ext4/extents.c |
1602 | +++ b/fs/ext4/extents.c |
1603 | @@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, |
1604 | |
1605 | eh = ext_inode_hdr(inode); |
1606 | depth = ext_depth(inode); |
1607 | + if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { |
1608 | + EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", |
1609 | + depth); |
1610 | + ret = -EFSCORRUPTED; |
1611 | + goto err; |
1612 | + } |
1613 | |
1614 | if (path) { |
1615 | ext4_ext_drop_refs(path); |
1616 | diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c |
1617 | index df92e3ec9913..478b8f21c814 100644 |
1618 | --- a/fs/ext4/ialloc.c |
1619 | +++ b/fs/ext4/ialloc.c |
1620 | @@ -155,7 +155,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) |
1621 | } |
1622 | |
1623 | ext4_lock_group(sb, block_group); |
1624 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { |
1625 | + if (ext4_has_group_desc_csum(sb) && |
1626 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { |
1627 | + if (block_group == 0) { |
1628 | + ext4_unlock_group(sb, block_group); |
1629 | + unlock_buffer(bh); |
1630 | + ext4_error(sb, "Inode bitmap for bg 0 marked " |
1631 | + "uninitialized"); |
1632 | + err = -EFSCORRUPTED; |
1633 | + goto out; |
1634 | + } |
1635 | memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); |
1636 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), |
1637 | sb->s_blocksize * 8, bh->b_data); |
1638 | @@ -1000,7 +1009,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, |
1639 | |
1640 | /* recheck and clear flag under lock if we still need to */ |
1641 | ext4_lock_group(sb, group); |
1642 | - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
1643 | + if (ext4_has_group_desc_csum(sb) && |
1644 | + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { |
1645 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
1646 | ext4_free_group_clusters_set(sb, gdp, |
1647 | ext4_free_clusters_after_init(sb, group, gdp)); |
1648 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c |
1649 | index 44b4fcdc3755..851bc552d849 100644 |
1650 | --- a/fs/ext4/inline.c |
1651 | +++ b/fs/ext4/inline.c |
1652 | @@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, |
1653 | |
1654 | memset((void *)ext4_raw_inode(&is.iloc)->i_block, |
1655 | 0, EXT4_MIN_INLINE_DATA_SIZE); |
1656 | + memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); |
1657 | |
1658 | if (ext4_has_feature_extents(inode->i_sb)) { |
1659 | if (S_ISDIR(inode->i_mode) || |
1660 | @@ -886,11 +887,11 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, |
1661 | flags |= AOP_FLAG_NOFS; |
1662 | |
1663 | if (ret == -ENOSPC) { |
1664 | + ext4_journal_stop(handle); |
1665 | ret = ext4_da_convert_inline_data_to_extent(mapping, |
1666 | inode, |
1667 | flags, |
1668 | fsdata); |
1669 | - ext4_journal_stop(handle); |
1670 | if (ret == -ENOSPC && |
1671 | ext4_should_retry_alloc(inode->i_sb, &retries)) |
1672 | goto retry_journal; |
1673 | @@ -1890,42 +1891,6 @@ int ext4_inline_data_fiemap(struct inode *inode, |
1674 | return (error < 0 ? error : 0); |
1675 | } |
1676 | |
1677 | -/* |
1678 | - * Called during xattr set, and if we can sparse space 'needed', |
1679 | - * just create the extent tree evict the data to the outer block. |
1680 | - * |
1681 | - * We use jbd2 instead of page cache to move data to the 1st block |
1682 | - * so that the whole transaction can be committed as a whole and |
1683 | - * the data isn't lost because of the delayed page cache write. |
1684 | - */ |
1685 | -int ext4_try_to_evict_inline_data(handle_t *handle, |
1686 | - struct inode *inode, |
1687 | - int needed) |
1688 | -{ |
1689 | - int error; |
1690 | - struct ext4_xattr_entry *entry; |
1691 | - struct ext4_inode *raw_inode; |
1692 | - struct ext4_iloc iloc; |
1693 | - |
1694 | - error = ext4_get_inode_loc(inode, &iloc); |
1695 | - if (error) |
1696 | - return error; |
1697 | - |
1698 | - raw_inode = ext4_raw_inode(&iloc); |
1699 | - entry = (struct ext4_xattr_entry *)((void *)raw_inode + |
1700 | - EXT4_I(inode)->i_inline_off); |
1701 | - if (EXT4_XATTR_LEN(entry->e_name_len) + |
1702 | - EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) { |
1703 | - error = -ENOSPC; |
1704 | - goto out; |
1705 | - } |
1706 | - |
1707 | - error = ext4_convert_inline_data_nolock(handle, inode, &iloc); |
1708 | -out: |
1709 | - brelse(iloc.bh); |
1710 | - return error; |
1711 | -} |
1712 | - |
1713 | int ext4_inline_data_truncate(struct inode *inode, int *has_inline) |
1714 | { |
1715 | handle_t *handle; |
1716 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
1717 | index c73cb9346aee..06b963d2fc36 100644 |
1718 | --- a/fs/ext4/inode.c |
1719 | +++ b/fs/ext4/inode.c |
1720 | @@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func, |
1721 | if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, |
1722 | map->m_len)) { |
1723 | ext4_error_inode(inode, func, line, map->m_pblk, |
1724 | - "lblock %lu mapped to illegal pblock " |
1725 | + "lblock %lu mapped to illegal pblock %llu " |
1726 | "(length %d)", (unsigned long) map->m_lblk, |
1727 | - map->m_len); |
1728 | + map->m_pblk, map->m_len); |
1729 | return -EFSCORRUPTED; |
1730 | } |
1731 | return 0; |
1732 | @@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode, |
1733 | int inodes_per_block, inode_offset; |
1734 | |
1735 | iloc->bh = NULL; |
1736 | - if (!ext4_valid_inum(sb, inode->i_ino)) |
1737 | + if (inode->i_ino < EXT4_ROOT_INO || |
1738 | + inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) |
1739 | return -EFSCORRUPTED; |
1740 | |
1741 | iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); |
1742 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
1743 | index 769a62708b1c..39187e7b3748 100644 |
1744 | --- a/fs/ext4/mballoc.c |
1745 | +++ b/fs/ext4/mballoc.c |
1746 | @@ -2444,7 +2444,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, |
1747 | * initialize bb_free to be able to skip |
1748 | * empty groups without initialization |
1749 | */ |
1750 | - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
1751 | + if (ext4_has_group_desc_csum(sb) && |
1752 | + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { |
1753 | meta_group_info[i]->bb_free = |
1754 | ext4_free_clusters_after_init(sb, group, desc); |
1755 | } else { |
1756 | @@ -3011,7 +3012,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, |
1757 | #endif |
1758 | ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, |
1759 | ac->ac_b_ex.fe_len); |
1760 | - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { |
1761 | + if (ext4_has_group_desc_csum(sb) && |
1762 | + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { |
1763 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); |
1764 | ext4_free_group_clusters_set(sb, gdp, |
1765 | ext4_free_clusters_after_init(sb, |
1766 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
1767 | index eb104e8476f0..74a6d884ede4 100644 |
1768 | --- a/fs/ext4/super.c |
1769 | +++ b/fs/ext4/super.c |
1770 | @@ -2307,6 +2307,7 @@ static int ext4_check_descriptors(struct super_block *sb, |
1771 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1772 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
1773 | ext4_fsblk_t last_block; |
1774 | + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; |
1775 | ext4_fsblk_t block_bitmap; |
1776 | ext4_fsblk_t inode_bitmap; |
1777 | ext4_fsblk_t inode_table; |
1778 | @@ -2339,6 +2340,14 @@ static int ext4_check_descriptors(struct super_block *sb, |
1779 | if (!sb_rdonly(sb)) |
1780 | return 0; |
1781 | } |
1782 | + if (block_bitmap >= sb_block + 1 && |
1783 | + block_bitmap <= last_bg_block) { |
1784 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1785 | + "Block bitmap for group %u overlaps " |
1786 | + "block group descriptors", i); |
1787 | + if (!sb_rdonly(sb)) |
1788 | + return 0; |
1789 | + } |
1790 | if (block_bitmap < first_block || block_bitmap > last_block) { |
1791 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1792 | "Block bitmap for group %u not in group " |
1793 | @@ -2353,6 +2362,14 @@ static int ext4_check_descriptors(struct super_block *sb, |
1794 | if (!sb_rdonly(sb)) |
1795 | return 0; |
1796 | } |
1797 | + if (inode_bitmap >= sb_block + 1 && |
1798 | + inode_bitmap <= last_bg_block) { |
1799 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1800 | + "Inode bitmap for group %u overlaps " |
1801 | + "block group descriptors", i); |
1802 | + if (!sb_rdonly(sb)) |
1803 | + return 0; |
1804 | + } |
1805 | if (inode_bitmap < first_block || inode_bitmap > last_block) { |
1806 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1807 | "Inode bitmap for group %u not in group " |
1808 | @@ -2367,6 +2384,14 @@ static int ext4_check_descriptors(struct super_block *sb, |
1809 | if (!sb_rdonly(sb)) |
1810 | return 0; |
1811 | } |
1812 | + if (inode_table >= sb_block + 1 && |
1813 | + inode_table <= last_bg_block) { |
1814 | + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1815 | + "Inode table for group %u overlaps " |
1816 | + "block group descriptors", i); |
1817 | + if (!sb_rdonly(sb)) |
1818 | + return 0; |
1819 | + } |
1820 | if (inode_table < first_block || |
1821 | inode_table + sbi->s_itb_per_group - 1 > last_block) { |
1822 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
1823 | @@ -3073,13 +3098,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) |
1824 | ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; |
1825 | struct ext4_group_desc *gdp = NULL; |
1826 | |
1827 | + if (!ext4_has_group_desc_csum(sb)) |
1828 | + return ngroups; |
1829 | + |
1830 | for (group = 0; group < ngroups; group++) { |
1831 | gdp = ext4_get_group_desc(sb, group, NULL); |
1832 | if (!gdp) |
1833 | continue; |
1834 | |
1835 | - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) |
1836 | + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) |
1837 | + continue; |
1838 | + if (group != 0) |
1839 | break; |
1840 | + ext4_error(sb, "Inode table for bg 0 marked as " |
1841 | + "needing zeroing"); |
1842 | + if (sb_rdonly(sb)) |
1843 | + return ngroups; |
1844 | } |
1845 | |
1846 | return group; |
1847 | @@ -3718,6 +3752,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1848 | le32_to_cpu(es->s_log_block_size)); |
1849 | goto failed_mount; |
1850 | } |
1851 | + if (le32_to_cpu(es->s_log_cluster_size) > |
1852 | + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { |
1853 | + ext4_msg(sb, KERN_ERR, |
1854 | + "Invalid log cluster size: %u", |
1855 | + le32_to_cpu(es->s_log_cluster_size)); |
1856 | + goto failed_mount; |
1857 | + } |
1858 | |
1859 | if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { |
1860 | ext4_msg(sb, KERN_ERR, |
1861 | @@ -3732,8 +3773,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1862 | " that may contain inline data"); |
1863 | sbi->s_mount_opt &= ~EXT4_MOUNT_DAX; |
1864 | } |
1865 | - err = bdev_dax_supported(sb, blocksize); |
1866 | - if (err) { |
1867 | + if (!bdev_dax_supported(sb->s_bdev, blocksize)) { |
1868 | ext4_msg(sb, KERN_ERR, |
1869 | "DAX unsupported by block device. Turning off DAX."); |
1870 | sbi->s_mount_opt &= ~EXT4_MOUNT_DAX; |
1871 | @@ -3783,6 +3823,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1872 | } else { |
1873 | sbi->s_inode_size = le16_to_cpu(es->s_inode_size); |
1874 | sbi->s_first_ino = le32_to_cpu(es->s_first_ino); |
1875 | + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { |
1876 | + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", |
1877 | + sbi->s_first_ino); |
1878 | + goto failed_mount; |
1879 | + } |
1880 | if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || |
1881 | (!is_power_of_2(sbi->s_inode_size)) || |
1882 | (sbi->s_inode_size > blocksize)) { |
1883 | @@ -3859,13 +3904,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1884 | "block size (%d)", clustersize, blocksize); |
1885 | goto failed_mount; |
1886 | } |
1887 | - if (le32_to_cpu(es->s_log_cluster_size) > |
1888 | - (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { |
1889 | - ext4_msg(sb, KERN_ERR, |
1890 | - "Invalid log cluster size: %u", |
1891 | - le32_to_cpu(es->s_log_cluster_size)); |
1892 | - goto failed_mount; |
1893 | - } |
1894 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - |
1895 | le32_to_cpu(es->s_log_block_size); |
1896 | sbi->s_clusters_per_group = |
1897 | @@ -3886,10 +3924,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1898 | } |
1899 | } else { |
1900 | if (clustersize != blocksize) { |
1901 | - ext4_warning(sb, "fragment/cluster size (%d) != " |
1902 | - "block size (%d)", clustersize, |
1903 | - blocksize); |
1904 | - clustersize = blocksize; |
1905 | + ext4_msg(sb, KERN_ERR, |
1906 | + "fragment/cluster size (%d) != " |
1907 | + "block size (%d)", clustersize, blocksize); |
1908 | + goto failed_mount; |
1909 | } |
1910 | if (sbi->s_blocks_per_group > blocksize * 8) { |
1911 | ext4_msg(sb, KERN_ERR, |
1912 | @@ -3943,6 +3981,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1913 | ext4_blocks_count(es)); |
1914 | goto failed_mount; |
1915 | } |
1916 | + if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && |
1917 | + (sbi->s_cluster_ratio == 1)) { |
1918 | + ext4_msg(sb, KERN_WARNING, "bad geometry: first data " |
1919 | + "block is 0 with a 1k block and cluster size"); |
1920 | + goto failed_mount; |
1921 | + } |
1922 | + |
1923 | blocks_count = (ext4_blocks_count(es) - |
1924 | le32_to_cpu(es->s_first_data_block) + |
1925 | EXT4_BLOCKS_PER_GROUP(sb) - 1); |
1926 | @@ -3978,6 +4023,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1927 | ret = -ENOMEM; |
1928 | goto failed_mount; |
1929 | } |
1930 | + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != |
1931 | + le32_to_cpu(es->s_inodes_count)) { |
1932 | + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", |
1933 | + le32_to_cpu(es->s_inodes_count), |
1934 | + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); |
1935 | + ret = -EINVAL; |
1936 | + goto failed_mount; |
1937 | + } |
1938 | |
1939 | bgl_lock_init(sbi->s_blockgroup_lock); |
1940 | |
1941 | @@ -4709,6 +4762,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) |
1942 | |
1943 | if (!sbh || block_device_ejected(sb)) |
1944 | return error; |
1945 | + |
1946 | + /* |
1947 | + * The superblock bh should be mapped, but it might not be if the |
1948 | + * device was hot-removed. Not much we can do but fail the I/O. |
1949 | + */ |
1950 | + if (!buffer_mapped(sbh)) |
1951 | + return error; |
1952 | + |
1953 | /* |
1954 | * If the file system is mounted read-only, don't update the |
1955 | * superblock write time. This avoids updating the superblock |
1956 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c |
1957 | index fc4ced59c565..723df14f4084 100644 |
1958 | --- a/fs/ext4/xattr.c |
1959 | +++ b/fs/ext4/xattr.c |
1960 | @@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, |
1961 | { |
1962 | int error = -EFSCORRUPTED; |
1963 | |
1964 | - if (buffer_verified(bh)) |
1965 | - return 0; |
1966 | - |
1967 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
1968 | BHDR(bh)->h_blocks != cpu_to_le32(1)) |
1969 | goto errout; |
1970 | + if (buffer_verified(bh)) |
1971 | + return 0; |
1972 | + |
1973 | error = -EFSBADCRC; |
1974 | if (!ext4_xattr_block_csum_verify(inode, bh)) |
1975 | goto errout; |
1976 | @@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, |
1977 | handle_t *handle, struct inode *inode, |
1978 | bool is_block) |
1979 | { |
1980 | - struct ext4_xattr_entry *last; |
1981 | + struct ext4_xattr_entry *last, *next; |
1982 | struct ext4_xattr_entry *here = s->here; |
1983 | size_t min_offs = s->end - s->base, name_len = strlen(i->name); |
1984 | int in_inode = i->in_inode; |
1985 | @@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, |
1986 | |
1987 | /* Compute min_offs and last. */ |
1988 | last = s->first; |
1989 | - for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
1990 | + for (; !IS_LAST_ENTRY(last); last = next) { |
1991 | + next = EXT4_XATTR_NEXT(last); |
1992 | + if ((void *)next >= s->end) { |
1993 | + EXT4_ERROR_INODE(inode, "corrupted xattr entries"); |
1994 | + ret = -EFSCORRUPTED; |
1995 | + goto out; |
1996 | + } |
1997 | if (!last->e_value_inum && last->e_value_size) { |
1998 | size_t offs = le16_to_cpu(last->e_value_offs); |
1999 | if (offs < min_offs) |
2000 | @@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, |
2001 | if (EXT4_I(inode)->i_extra_isize == 0) |
2002 | return -ENOSPC; |
2003 | error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); |
2004 | - if (error) { |
2005 | - if (error == -ENOSPC && |
2006 | - ext4_has_inline_data(inode)) { |
2007 | - error = ext4_try_to_evict_inline_data(handle, inode, |
2008 | - EXT4_XATTR_LEN(strlen(i->name) + |
2009 | - EXT4_XATTR_SIZE(i->value_len))); |
2010 | - if (error) |
2011 | - return error; |
2012 | - error = ext4_xattr_ibody_find(inode, i, is); |
2013 | - if (error) |
2014 | - return error; |
2015 | - error = ext4_xattr_set_entry(i, s, handle, inode, |
2016 | - false /* is_block */); |
2017 | - } |
2018 | - if (error) |
2019 | - return error; |
2020 | - } |
2021 | + if (error) |
2022 | + return error; |
2023 | header = IHDR(inode, ext4_raw_inode(&is->iloc)); |
2024 | if (!IS_LAST_ENTRY(s->first)) { |
2025 | header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); |
2026 | @@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, |
2027 | last = IFIRST(header); |
2028 | /* Find the entry best suited to be pushed into EA block */ |
2029 | for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { |
2030 | + /* never move system.data out of the inode */ |
2031 | + if ((last->e_name_len == 4) && |
2032 | + (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && |
2033 | + !memcmp(last->e_name, "data", 4)) |
2034 | + continue; |
2035 | total_size = EXT4_XATTR_LEN(last->e_name_len); |
2036 | if (!last->e_value_inum) |
2037 | total_size += EXT4_XATTR_SIZE( |
2038 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
2039 | index 8aa453784402..c51bf0d2aa9b 100644 |
2040 | --- a/fs/jbd2/transaction.c |
2041 | +++ b/fs/jbd2/transaction.c |
2042 | @@ -1363,6 +1363,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) |
2043 | if (jh->b_transaction == transaction && |
2044 | jh->b_jlist != BJ_Metadata) { |
2045 | jbd_lock_bh_state(bh); |
2046 | + if (jh->b_transaction == transaction && |
2047 | + jh->b_jlist != BJ_Metadata) |
2048 | + pr_err("JBD2: assertion failure: h_type=%u " |
2049 | + "h_line_no=%u block_no=%llu jlist=%u\n", |
2050 | + handle->h_type, handle->h_line_no, |
2051 | + (unsigned long long) bh->b_blocknr, |
2052 | + jh->b_jlist); |
2053 | J_ASSERT_JH(jh, jh->b_transaction != transaction || |
2054 | jh->b_jlist == BJ_Metadata); |
2055 | jbd_unlock_bh_state(bh); |
2056 | @@ -1382,11 +1389,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) |
2057 | * of the transaction. This needs to be done |
2058 | * once a transaction -bzzz |
2059 | */ |
2060 | - jh->b_modified = 1; |
2061 | if (handle->h_buffer_credits <= 0) { |
2062 | ret = -ENOSPC; |
2063 | goto out_unlock_bh; |
2064 | } |
2065 | + jh->b_modified = 1; |
2066 | handle->h_buffer_credits--; |
2067 | } |
2068 | |
2069 | diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c |
2070 | index cec550c8468f..1d85efacfc8e 100644 |
2071 | --- a/fs/userfaultfd.c |
2072 | +++ b/fs/userfaultfd.c |
2073 | @@ -220,24 +220,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, |
2074 | unsigned long reason) |
2075 | { |
2076 | struct mm_struct *mm = ctx->mm; |
2077 | - pte_t *pte; |
2078 | + pte_t *ptep, pte; |
2079 | bool ret = true; |
2080 | |
2081 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
2082 | |
2083 | - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
2084 | - if (!pte) |
2085 | + ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
2086 | + |
2087 | + if (!ptep) |
2088 | goto out; |
2089 | |
2090 | ret = false; |
2091 | + pte = huge_ptep_get(ptep); |
2092 | |
2093 | /* |
2094 | * Lockless access: we're in a wait_event so it's ok if it |
2095 | * changes under us. |
2096 | */ |
2097 | - if (huge_pte_none(*pte)) |
2098 | + if (huge_pte_none(pte)) |
2099 | ret = true; |
2100 | - if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP)) |
2101 | + if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
2102 | ret = true; |
2103 | out: |
2104 | return ret; |
2105 | diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c |
2106 | index 89fb1eb80aae..2c70a0a4f59f 100644 |
2107 | --- a/fs/xfs/xfs_ioctl.c |
2108 | +++ b/fs/xfs/xfs_ioctl.c |
2109 | @@ -1103,7 +1103,8 @@ xfs_ioctl_setattr_dax_invalidate( |
2110 | if (fa->fsx_xflags & FS_XFLAG_DAX) { |
2111 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) |
2112 | return -EINVAL; |
2113 | - if (bdev_dax_supported(sb, sb->s_blocksize) < 0) |
2114 | + if (!bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)), |
2115 | + sb->s_blocksize)) |
2116 | return -EINVAL; |
2117 | } |
2118 | |
2119 | diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c |
2120 | index a3ed3c811dfa..6e83acf74a95 100644 |
2121 | --- a/fs/xfs/xfs_iops.c |
2122 | +++ b/fs/xfs/xfs_iops.c |
2123 | @@ -1195,6 +1195,30 @@ static const struct inode_operations xfs_inline_symlink_inode_operations = { |
2124 | .update_time = xfs_vn_update_time, |
2125 | }; |
2126 | |
2127 | +/* Figure out if this file actually supports DAX. */ |
2128 | +static bool |
2129 | +xfs_inode_supports_dax( |
2130 | + struct xfs_inode *ip) |
2131 | +{ |
2132 | + struct xfs_mount *mp = ip->i_mount; |
2133 | + |
2134 | + /* Only supported on non-reflinked files. */ |
2135 | + if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip)) |
2136 | + return false; |
2137 | + |
2138 | + /* DAX mount option or DAX iflag must be set. */ |
2139 | + if (!(mp->m_flags & XFS_MOUNT_DAX) && |
2140 | + !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) |
2141 | + return false; |
2142 | + |
2143 | + /* Block size must match page size */ |
2144 | + if (mp->m_sb.sb_blocksize != PAGE_SIZE) |
2145 | + return false; |
2146 | + |
2147 | + /* Device has to support DAX too. */ |
2148 | + return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL; |
2149 | +} |
2150 | + |
2151 | STATIC void |
2152 | xfs_diflags_to_iflags( |
2153 | struct inode *inode, |
2154 | @@ -1213,11 +1237,7 @@ xfs_diflags_to_iflags( |
2155 | inode->i_flags |= S_SYNC; |
2156 | if (flags & XFS_DIFLAG_NOATIME) |
2157 | inode->i_flags |= S_NOATIME; |
2158 | - if (S_ISREG(inode->i_mode) && |
2159 | - ip->i_mount->m_sb.sb_blocksize == PAGE_SIZE && |
2160 | - !xfs_is_reflink_inode(ip) && |
2161 | - (ip->i_mount->m_flags & XFS_MOUNT_DAX || |
2162 | - ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) |
2163 | + if (xfs_inode_supports_dax(ip)) |
2164 | inode->i_flags |= S_DAX; |
2165 | } |
2166 | |
2167 | diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c |
2168 | index d71424052917..86915dc40eed 100644 |
2169 | --- a/fs/xfs/xfs_super.c |
2170 | +++ b/fs/xfs/xfs_super.c |
2171 | @@ -1690,11 +1690,17 @@ xfs_fs_fill_super( |
2172 | sb->s_flags |= SB_I_VERSION; |
2173 | |
2174 | if (mp->m_flags & XFS_MOUNT_DAX) { |
2175 | + bool rtdev_is_dax = false, datadev_is_dax; |
2176 | + |
2177 | xfs_warn(mp, |
2178 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); |
2179 | |
2180 | - error = bdev_dax_supported(sb, sb->s_blocksize); |
2181 | - if (error) { |
2182 | + datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev, |
2183 | + sb->s_blocksize); |
2184 | + if (mp->m_rtdev_targp) |
2185 | + rtdev_is_dax = bdev_dax_supported( |
2186 | + mp->m_rtdev_targp->bt_bdev, sb->s_blocksize); |
2187 | + if (!rtdev_is_dax && !datadev_is_dax) { |
2188 | xfs_alert(mp, |
2189 | "DAX unsupported by block device. Turning off DAX."); |
2190 | mp->m_flags &= ~XFS_MOUNT_DAX; |
2191 | diff --git a/include/linux/dax.h b/include/linux/dax.h |
2192 | index f9eb22ad341e..c99692ddd4b5 100644 |
2193 | --- a/include/linux/dax.h |
2194 | +++ b/include/linux/dax.h |
2195 | @@ -64,10 +64,10 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) |
2196 | struct writeback_control; |
2197 | int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); |
2198 | #if IS_ENABLED(CONFIG_FS_DAX) |
2199 | -int __bdev_dax_supported(struct super_block *sb, int blocksize); |
2200 | -static inline int bdev_dax_supported(struct super_block *sb, int blocksize) |
2201 | +bool __bdev_dax_supported(struct block_device *bdev, int blocksize); |
2202 | +static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) |
2203 | { |
2204 | - return __bdev_dax_supported(sb, blocksize); |
2205 | + return __bdev_dax_supported(bdev, blocksize); |
2206 | } |
2207 | |
2208 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
2209 | @@ -84,9 +84,10 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); |
2210 | int dax_writeback_mapping_range(struct address_space *mapping, |
2211 | struct block_device *bdev, struct writeback_control *wbc); |
2212 | #else |
2213 | -static inline int bdev_dax_supported(struct super_block *sb, int blocksize) |
2214 | +static inline bool bdev_dax_supported(struct block_device *bdev, |
2215 | + int blocksize) |
2216 | { |
2217 | - return -EOPNOTSUPP; |
2218 | + return false; |
2219 | } |
2220 | |
2221 | static inline struct dax_device *fs_dax_get_by_host(const char *host) |
2222 | diff --git a/include/linux/hid.h b/include/linux/hid.h |
2223 | index 26240a22978a..2a4c0900e46a 100644 |
2224 | --- a/include/linux/hid.h |
2225 | +++ b/include/linux/hid.h |
2226 | @@ -502,6 +502,7 @@ struct hid_output_fifo { |
2227 | |
2228 | #define HID_STAT_ADDED BIT(0) |
2229 | #define HID_STAT_PARSED BIT(1) |
2230 | +#define HID_STAT_REPROBED BIT(3) |
2231 | |
2232 | struct hid_input { |
2233 | struct list_head list; |
2234 | @@ -568,7 +569,7 @@ struct hid_device { /* device report descriptor */ |
2235 | bool battery_avoid_query; |
2236 | #endif |
2237 | |
2238 | - unsigned int status; /* see STAT flags above */ |
2239 | + unsigned long status; /* see STAT flags above */ |
2240 | unsigned claimed; /* Claimed by hidinput, hiddev? */ |
2241 | unsigned quirks; /* Various quirks the device can pull on us */ |
2242 | bool io_started; /* If IO has started */ |
2243 | diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
2244 | index b9061ed59bbd..c7bbc8997db8 100644 |
2245 | --- a/kernel/trace/trace_events_hist.c |
2246 | +++ b/kernel/trace/trace_events_hist.c |
2247 | @@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var) |
2248 | else if (system) |
2249 | snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); |
2250 | else |
2251 | - strncpy(err, var, MAX_FILTER_STR_VAL); |
2252 | + strscpy(err, var, MAX_FILTER_STR_VAL); |
2253 | |
2254 | hist_err(str, err); |
2255 | } |
2256 | diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c |
2257 | index 23c0b0cb5fb9..169b3c44ee97 100644 |
2258 | --- a/kernel/trace/trace_functions_graph.c |
2259 | +++ b/kernel/trace/trace_functions_graph.c |
2260 | @@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, |
2261 | struct ftrace_graph_ret *graph_ret; |
2262 | struct ftrace_graph_ent *call; |
2263 | unsigned long long duration; |
2264 | + int cpu = iter->cpu; |
2265 | int i; |
2266 | |
2267 | graph_ret = &ret_entry->ret; |
2268 | @@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, |
2269 | |
2270 | if (data) { |
2271 | struct fgraph_cpu_data *cpu_data; |
2272 | - int cpu = iter->cpu; |
2273 | |
2274 | cpu_data = per_cpu_ptr(data->cpu_data, cpu); |
2275 | |
2276 | @@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, |
2277 | |
2278 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
2279 | |
2280 | + print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, |
2281 | + cpu, iter->ent->pid, flags); |
2282 | + |
2283 | return trace_handle_return(s); |
2284 | } |
2285 | |
2286 | diff --git a/mm/debug.c b/mm/debug.c |
2287 | index 56e2d9125ea5..38c926520c97 100644 |
2288 | --- a/mm/debug.c |
2289 | +++ b/mm/debug.c |
2290 | @@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = { |
2291 | |
2292 | void __dump_page(struct page *page, const char *reason) |
2293 | { |
2294 | + bool page_poisoned = PagePoisoned(page); |
2295 | + int mapcount; |
2296 | + |
2297 | + /* |
2298 | + * If struct page is poisoned don't access Page*() functions as that |
2299 | + * leads to recursive loop. Page*() check for poisoned pages, and calls |
2300 | + * dump_page() when detected. |
2301 | + */ |
2302 | + if (page_poisoned) { |
2303 | + pr_emerg("page:%px is uninitialized and poisoned", page); |
2304 | + goto hex_only; |
2305 | + } |
2306 | + |
2307 | /* |
2308 | * Avoid VM_BUG_ON() in page_mapcount(). |
2309 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
2310 | * encode own info. |
2311 | */ |
2312 | - int mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
2313 | + mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
2314 | |
2315 | pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", |
2316 | page, page_ref_count(page), mapcount, |
2317 | @@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason) |
2318 | |
2319 | pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); |
2320 | |
2321 | +hex_only: |
2322 | print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, |
2323 | sizeof(unsigned long), page, |
2324 | sizeof(struct page), false); |
2325 | @@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason) |
2326 | pr_alert("page dumped because: %s\n", reason); |
2327 | |
2328 | #ifdef CONFIG_MEMCG |
2329 | - if (page->mem_cgroup) |
2330 | + if (!page_poisoned && page->mem_cgroup) |
2331 | pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); |
2332 | #endif |
2333 | } |
2334 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
2335 | index 218679138255..a2d9eb6a0af9 100644 |
2336 | --- a/mm/hugetlb.c |
2337 | +++ b/mm/hugetlb.c |
2338 | @@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void) |
2339 | */ |
2340 | if (hstate_is_gigantic(h)) |
2341 | adjust_managed_page_count(page, 1 << h->order); |
2342 | + cond_resched(); |
2343 | } |
2344 | } |
2345 | |
2346 | diff --git a/mm/vmstat.c b/mm/vmstat.c |
2347 | index a2b9518980ce..1377a89eb84c 100644 |
2348 | --- a/mm/vmstat.c |
2349 | +++ b/mm/vmstat.c |
2350 | @@ -1844,11 +1844,9 @@ static void vmstat_update(struct work_struct *w) |
2351 | * to occur in the future. Keep on running the |
2352 | * update worker thread. |
2353 | */ |
2354 | - preempt_disable(); |
2355 | queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, |
2356 | this_cpu_ptr(&vmstat_work), |
2357 | round_jiffies_relative(sysctl_stat_interval)); |
2358 | - preempt_enable(); |
2359 | } |
2360 | } |
2361 | |
2362 | diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c |
2363 | index 6d0357817cda..a82dfb8f8790 100644 |
2364 | --- a/net/netfilter/nf_log.c |
2365 | +++ b/net/netfilter/nf_log.c |
2366 | @@ -457,14 +457,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, |
2367 | rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); |
2368 | mutex_unlock(&nf_log_mutex); |
2369 | } else { |
2370 | + struct ctl_table tmp = *table; |
2371 | + |
2372 | + tmp.data = buf; |
2373 | mutex_lock(&nf_log_mutex); |
2374 | logger = nft_log_dereference(net->nf.nf_loggers[tindex]); |
2375 | if (!logger) |
2376 | - table->data = "NONE"; |
2377 | + strlcpy(buf, "NONE", sizeof(buf)); |
2378 | else |
2379 | - table->data = logger->name; |
2380 | - r = proc_dostring(table, write, buffer, lenp, ppos); |
2381 | + strlcpy(buf, logger->name, sizeof(buf)); |
2382 | mutex_unlock(&nf_log_mutex); |
2383 | + r = proc_dostring(&tmp, write, buffer, lenp, ppos); |
2384 | } |
2385 | |
2386 | return r; |