Contents of /trunk/kernel-alx/patches-4.19/0115-4.19.16-all-fixes.patch
Parent Directory | Revision Log
Revision 3394 -
(show annotations)
(download)
Fri Aug 2 11:47:29 2019 UTC (5 years, 1 month ago) by niro
File size: 59485 byte(s)
Fri Aug 2 11:47:29 2019 UTC (5 years, 1 month ago) by niro
File size: 59485 byte(s)
-linux-4.19.16
1 | diff --git a/Makefile b/Makefile |
2 | index 0e30d48274fa..e8cb4875b86d 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 19 |
9 | -SUBLEVEL = 15 |
10 | +SUBLEVEL = 16 |
11 | EXTRAVERSION = |
12 | NAME = "People's Front" |
13 | |
14 | diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c |
15 | index 7be666062c7c..010212d35700 100644 |
16 | --- a/arch/arm64/kernel/sys_compat.c |
17 | +++ b/arch/arm64/kernel/sys_compat.c |
18 | @@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) |
19 | /* |
20 | * Handle all unrecognised system calls. |
21 | */ |
22 | -long compat_arm_syscall(struct pt_regs *regs) |
23 | +long compat_arm_syscall(struct pt_regs *regs, int scno) |
24 | { |
25 | siginfo_t info; |
26 | - unsigned int no = regs->regs[7]; |
27 | |
28 | - switch (no) { |
29 | + switch (scno) { |
30 | /* |
31 | * Flush a region from virtual address 'r0' to virtual address 'r1' |
32 | * _exclusive_. There is no alignment requirement on either address; |
33 | @@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs *regs) |
34 | * way the calling program can gracefully determine whether |
35 | * a feature is supported. |
36 | */ |
37 | - if (no < __ARM_NR_COMPAT_END) |
38 | + if (scno < __ARM_NR_COMPAT_END) |
39 | return -ENOSYS; |
40 | break; |
41 | } |
42 | @@ -119,6 +118,6 @@ long compat_arm_syscall(struct pt_regs *regs) |
43 | info.si_addr = (void __user *)instruction_pointer(regs) - |
44 | (compat_thumb_mode(regs) ? 2 : 4); |
45 | |
46 | - arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no); |
47 | + arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno); |
48 | return 0; |
49 | } |
50 | diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c |
51 | index 032d22312881..5610ac01c1ec 100644 |
52 | --- a/arch/arm64/kernel/syscall.c |
53 | +++ b/arch/arm64/kernel/syscall.c |
54 | @@ -13,16 +13,15 @@ |
55 | #include <asm/thread_info.h> |
56 | #include <asm/unistd.h> |
57 | |
58 | -long compat_arm_syscall(struct pt_regs *regs); |
59 | - |
60 | +long compat_arm_syscall(struct pt_regs *regs, int scno); |
61 | long sys_ni_syscall(void); |
62 | |
63 | -asmlinkage long do_ni_syscall(struct pt_regs *regs) |
64 | +static long do_ni_syscall(struct pt_regs *regs, int scno) |
65 | { |
66 | #ifdef CONFIG_COMPAT |
67 | long ret; |
68 | if (is_compat_task()) { |
69 | - ret = compat_arm_syscall(regs); |
70 | + ret = compat_arm_syscall(regs, scno); |
71 | if (ret != -ENOSYS) |
72 | return ret; |
73 | } |
74 | @@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, |
75 | syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; |
76 | ret = __invoke_syscall(regs, syscall_fn); |
77 | } else { |
78 | - ret = do_ni_syscall(regs); |
79 | + ret = do_ni_syscall(regs, scno); |
80 | } |
81 | |
82 | regs->regs[0] = ret; |
83 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
84 | index abb92c341693..807d06a7acac 100644 |
85 | --- a/arch/x86/kernel/cpu/bugs.c |
86 | +++ b/arch/x86/kernel/cpu/bugs.c |
87 | @@ -213,7 +213,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
88 | static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = |
89 | SPECTRE_V2_USER_NONE; |
90 | |
91 | -#ifdef RETPOLINE |
92 | +#ifdef CONFIG_RETPOLINE |
93 | static bool spectre_v2_bad_module; |
94 | |
95 | bool retpoline_module_ok(bool has_retpoline) |
96 | diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c |
97 | index e938576e58cb..e48eebc27b81 100644 |
98 | --- a/drivers/acpi/arm64/iort.c |
99 | +++ b/drivers/acpi/arm64/iort.c |
100 | @@ -951,9 +951,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size) |
101 | { |
102 | struct acpi_iort_node *node; |
103 | struct acpi_iort_root_complex *rc; |
104 | + struct pci_bus *pbus = to_pci_dev(dev)->bus; |
105 | |
106 | node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, |
107 | - iort_match_node_callback, dev); |
108 | + iort_match_node_callback, &pbus->dev); |
109 | if (!node || node->revision < 1) |
110 | return -ENODEV; |
111 | |
112 | diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c |
113 | index 316e55174aa9..bb5391f59b8b 100644 |
114 | --- a/drivers/acpi/pmic/intel_pmic_xpower.c |
115 | +++ b/drivers/acpi/pmic/intel_pmic_xpower.c |
116 | @@ -27,8 +27,11 @@ |
117 | #define GPI1_LDO_ON (3 << 0) |
118 | #define GPI1_LDO_OFF (4 << 0) |
119 | |
120 | -#define AXP288_ADC_TS_PIN_GPADC 0xf2 |
121 | -#define AXP288_ADC_TS_PIN_ON 0xf3 |
122 | +#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) |
123 | +#define AXP288_ADC_TS_CURRENT_OFF (0 << 0) |
124 | +#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) |
125 | +#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) |
126 | +#define AXP288_ADC_TS_CURRENT_ON (3 << 0) |
127 | |
128 | static struct pmic_table power_table[] = { |
129 | { |
130 | @@ -211,22 +214,44 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, |
131 | */ |
132 | static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) |
133 | { |
134 | + int ret, adc_ts_pin_ctrl; |
135 | u8 buf[2]; |
136 | - int ret; |
137 | |
138 | - ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, |
139 | - AXP288_ADC_TS_PIN_GPADC); |
140 | + /* |
141 | + * The current-source used for the battery temp-sensor (TS) is shared |
142 | + * with the GPADC. For proper fuel-gauge and charger operation the TS |
143 | + * current-source needs to be permanently on. But to read the GPADC we |
144 | + * need to temporary switch the TS current-source to ondemand, so that |
145 | + * the GPADC can use it, otherwise we will always read an all 0 value. |
146 | + * |
147 | + * Note that the switching from on to on-ondemand is not necessary |
148 | + * when the TS current-source is off (this happens on devices which |
149 | + * do not use the TS-pin). |
150 | + */ |
151 | + ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); |
152 | if (ret) |
153 | return ret; |
154 | |
155 | - /* After switching to the GPADC pin give things some time to settle */ |
156 | - usleep_range(6000, 10000); |
157 | + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { |
158 | + ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, |
159 | + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, |
160 | + AXP288_ADC_TS_CURRENT_ON_ONDEMAND); |
161 | + if (ret) |
162 | + return ret; |
163 | + |
164 | + /* Wait a bit after switching the current-source */ |
165 | + usleep_range(6000, 10000); |
166 | + } |
167 | |
168 | ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); |
169 | if (ret == 0) |
170 | ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); |
171 | |
172 | - regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); |
173 | + if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { |
174 | + regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, |
175 | + AXP288_ADC_TS_CURRENT_ON_OFF_MASK, |
176 | + AXP288_ADC_TS_CURRENT_ON); |
177 | + } |
178 | |
179 | return ret; |
180 | } |
181 | diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c |
182 | index 1b475bc1ae16..665e93ca0b40 100644 |
183 | --- a/drivers/acpi/power.c |
184 | +++ b/drivers/acpi/power.c |
185 | @@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list) |
186 | } |
187 | } |
188 | |
189 | +static bool acpi_power_resource_is_dup(union acpi_object *package, |
190 | + unsigned int start, unsigned int i) |
191 | +{ |
192 | + acpi_handle rhandle, dup; |
193 | + unsigned int j; |
194 | + |
195 | + /* The caller is expected to check the package element types */ |
196 | + rhandle = package->package.elements[i].reference.handle; |
197 | + for (j = start; j < i; j++) { |
198 | + dup = package->package.elements[j].reference.handle; |
199 | + if (dup == rhandle) |
200 | + return true; |
201 | + } |
202 | + |
203 | + return false; |
204 | +} |
205 | + |
206 | int acpi_extract_power_resources(union acpi_object *package, unsigned int start, |
207 | struct list_head *list) |
208 | { |
209 | @@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, |
210 | err = -ENODEV; |
211 | break; |
212 | } |
213 | + |
214 | + /* Some ACPI tables contain duplicate power resource references */ |
215 | + if (acpi_power_resource_is_dup(package, start, i)) |
216 | + continue; |
217 | + |
218 | err = acpi_add_power_resource(rhandle); |
219 | if (err) |
220 | break; |
221 | diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c |
222 | index 73ed5f3a862d..585378bc988c 100644 |
223 | --- a/drivers/block/rbd.c |
224 | +++ b/drivers/block/rbd.c |
225 | @@ -5982,7 +5982,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus, |
226 | struct list_head *tmp; |
227 | int dev_id; |
228 | char opt_buf[6]; |
229 | - bool already = false; |
230 | bool force = false; |
231 | int ret; |
232 | |
233 | @@ -6015,13 +6014,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus, |
234 | spin_lock_irq(&rbd_dev->lock); |
235 | if (rbd_dev->open_count && !force) |
236 | ret = -EBUSY; |
237 | - else |
238 | - already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, |
239 | - &rbd_dev->flags); |
240 | + else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING, |
241 | + &rbd_dev->flags)) |
242 | + ret = -EINPROGRESS; |
243 | spin_unlock_irq(&rbd_dev->lock); |
244 | } |
245 | spin_unlock(&rbd_dev_list_lock); |
246 | - if (ret < 0 || already) |
247 | + if (ret) |
248 | return ret; |
249 | |
250 | if (force) { |
251 | diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c |
252 | index 50b1551ba894..3f0693439486 100644 |
253 | --- a/drivers/cpufreq/scmi-cpufreq.c |
254 | +++ b/drivers/cpufreq/scmi-cpufreq.c |
255 | @@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) |
256 | int ret; |
257 | struct scmi_data *priv = policy->driver_data; |
258 | struct scmi_perf_ops *perf_ops = handle->perf_ops; |
259 | - u64 freq = policy->freq_table[index].frequency * 1000; |
260 | + u64 freq = policy->freq_table[index].frequency; |
261 | |
262 | - ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); |
263 | + ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); |
264 | if (!ret) |
265 | arch_set_freq_scale(policy->related_cpus, freq, |
266 | policy->cpuinfo.max_freq); |
267 | diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig |
268 | index cb88528e7b10..e44e567bd789 100644 |
269 | --- a/drivers/gpu/drm/Kconfig |
270 | +++ b/drivers/gpu/drm/Kconfig |
271 | @@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC |
272 | is 100. Typical values for double buffering will be 200, |
273 | triple buffering 300. |
274 | |
275 | +config DRM_FBDEV_LEAK_PHYS_SMEM |
276 | + bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)" |
277 | + depends on DRM_FBDEV_EMULATION && EXPERT |
278 | + default n |
279 | + help |
280 | + In order to keep user-space compatibility, we want in certain |
281 | + use-cases to keep leaking the fbdev physical address to the |
282 | + user-space program handling the fbdev buffer. |
283 | + This affects, not only, Amlogic, Allwinner or Rockchip devices |
284 | + with ARM Mali GPUs using an userspace Blob. |
285 | + This option is not supported by upstream developers and should be |
286 | + removed as soon as possible and be considered as a broken and |
287 | + legacy behaviour from a modern fbdev device driver. |
288 | + |
289 | + Please send any bug reports when using this to your proprietary |
290 | + software vendor that requires this. |
291 | + |
292 | + If in doubt, say "N" or spread the word to your closed source |
293 | + library vendor. |
294 | + |
295 | config DRM_LOAD_EDID_FIRMWARE |
296 | bool "Allow to specify an EDID data set instead of probing for it" |
297 | depends on DRM |
298 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
299 | index 8e26e1ca14c6..b40e9c76af0c 100644 |
300 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
301 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |
302 | @@ -753,6 +753,7 @@ static const struct pci_device_id pciidlist[] = { |
303 | /* VEGAM */ |
304 | {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, |
305 | {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, |
306 | + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, |
307 | /* Vega 10 */ |
308 | {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
309 | {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
310 | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
311 | index d792735f1365..a851bb07443f 100644 |
312 | --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
313 | +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
314 | @@ -565,22 +565,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) |
315 | { |
316 | struct amdgpu_dm_connector *aconnector; |
317 | struct drm_connector *connector; |
318 | + struct drm_dp_mst_topology_mgr *mgr; |
319 | + int ret; |
320 | + bool need_hotplug = false; |
321 | |
322 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); |
323 | |
324 | - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
325 | - aconnector = to_amdgpu_dm_connector(connector); |
326 | - if (aconnector->dc_link->type == dc_connection_mst_branch && |
327 | - !aconnector->mst_port) { |
328 | + list_for_each_entry(connector, &dev->mode_config.connector_list, |
329 | + head) { |
330 | + aconnector = to_amdgpu_dm_connector(connector); |
331 | + if (aconnector->dc_link->type != dc_connection_mst_branch || |
332 | + aconnector->mst_port) |
333 | + continue; |
334 | |
335 | - if (suspend) |
336 | - drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); |
337 | - else |
338 | - drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); |
339 | - } |
340 | + mgr = &aconnector->mst_mgr; |
341 | + |
342 | + if (suspend) { |
343 | + drm_dp_mst_topology_mgr_suspend(mgr); |
344 | + } else { |
345 | + ret = drm_dp_mst_topology_mgr_resume(mgr); |
346 | + if (ret < 0) { |
347 | + drm_dp_mst_topology_mgr_set_mst(mgr, false); |
348 | + need_hotplug = true; |
349 | + } |
350 | + } |
351 | } |
352 | |
353 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
354 | + |
355 | + if (need_hotplug) |
356 | + drm_kms_helper_hotplug_event(dev); |
357 | } |
358 | |
359 | static int dm_hw_init(void *handle) |
360 | @@ -736,7 +750,6 @@ static int dm_resume(void *handle) |
361 | struct drm_plane_state *new_plane_state; |
362 | struct dm_plane_state *dm_new_plane_state; |
363 | enum dc_connection_type new_connection_type = dc_connection_none; |
364 | - int ret; |
365 | int i; |
366 | |
367 | /* power on hardware */ |
368 | @@ -809,13 +822,13 @@ static int dm_resume(void *handle) |
369 | } |
370 | } |
371 | |
372 | - ret = drm_atomic_helper_resume(ddev, dm->cached_state); |
373 | + drm_atomic_helper_resume(ddev, dm->cached_state); |
374 | |
375 | dm->cached_state = NULL; |
376 | |
377 | amdgpu_dm_irq_resume_late(adev); |
378 | |
379 | - return ret; |
380 | + return 0; |
381 | } |
382 | |
383 | static const struct amd_ip_funcs amdgpu_dm_funcs = { |
384 | diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
385 | index fced3c1c2ef5..7c89785fd731 100644 |
386 | --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
387 | +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c |
388 | @@ -2457,11 +2457,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) |
389 | { |
390 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; |
391 | |
392 | + core_dc->hwss.blank_stream(pipe_ctx); |
393 | + |
394 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) |
395 | deallocate_mst_payload(pipe_ctx); |
396 | |
397 | - core_dc->hwss.blank_stream(pipe_ctx); |
398 | - |
399 | core_dc->hwss.disable_stream(pipe_ctx, option); |
400 | |
401 | disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); |
402 | diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c |
403 | index 9214c8b02484..b5b9f15549c2 100644 |
404 | --- a/drivers/gpu/drm/drm_fb_helper.c |
405 | +++ b/drivers/gpu/drm/drm_fb_helper.c |
406 | @@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, |
407 | "Overallocation of the fbdev buffer (%) [default=" |
408 | __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); |
409 | |
410 | +/* |
411 | + * In order to keep user-space compatibility, we want in certain use-cases |
412 | + * to keep leaking the fbdev physical address to the user-space program |
413 | + * handling the fbdev buffer. |
414 | + * This is a bad habit essentially kept into closed source opengl driver |
415 | + * that should really be moved into open-source upstream projects instead |
416 | + * of using legacy physical addresses in user space to communicate with |
417 | + * other out-of-tree kernel modules. |
418 | + * |
419 | + * This module_param *should* be removed as soon as possible and be |
420 | + * considered as a broken and legacy behaviour from a modern fbdev device. |
421 | + */ |
422 | +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) |
423 | +static bool drm_leak_fbdev_smem = false; |
424 | +module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); |
425 | +MODULE_PARM_DESC(fbdev_emulation, |
426 | + "Allow unsafe leaking fbdev physical smem address [default=false]"); |
427 | +#endif |
428 | + |
429 | static LIST_HEAD(kernel_fb_helper_list); |
430 | static DEFINE_MUTEX(kernel_fb_helper_lock); |
431 | |
432 | @@ -1602,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, |
433 | var_1->transp.msb_right == var_2->transp.msb_right; |
434 | } |
435 | |
436 | +static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, |
437 | + u8 depth) |
438 | +{ |
439 | + switch (depth) { |
440 | + case 8: |
441 | + var->red.offset = 0; |
442 | + var->green.offset = 0; |
443 | + var->blue.offset = 0; |
444 | + var->red.length = 8; /* 8bit DAC */ |
445 | + var->green.length = 8; |
446 | + var->blue.length = 8; |
447 | + var->transp.offset = 0; |
448 | + var->transp.length = 0; |
449 | + break; |
450 | + case 15: |
451 | + var->red.offset = 10; |
452 | + var->green.offset = 5; |
453 | + var->blue.offset = 0; |
454 | + var->red.length = 5; |
455 | + var->green.length = 5; |
456 | + var->blue.length = 5; |
457 | + var->transp.offset = 15; |
458 | + var->transp.length = 1; |
459 | + break; |
460 | + case 16: |
461 | + var->red.offset = 11; |
462 | + var->green.offset = 5; |
463 | + var->blue.offset = 0; |
464 | + var->red.length = 5; |
465 | + var->green.length = 6; |
466 | + var->blue.length = 5; |
467 | + var->transp.offset = 0; |
468 | + break; |
469 | + case 24: |
470 | + var->red.offset = 16; |
471 | + var->green.offset = 8; |
472 | + var->blue.offset = 0; |
473 | + var->red.length = 8; |
474 | + var->green.length = 8; |
475 | + var->blue.length = 8; |
476 | + var->transp.offset = 0; |
477 | + var->transp.length = 0; |
478 | + break; |
479 | + case 32: |
480 | + var->red.offset = 16; |
481 | + var->green.offset = 8; |
482 | + var->blue.offset = 0; |
483 | + var->red.length = 8; |
484 | + var->green.length = 8; |
485 | + var->blue.length = 8; |
486 | + var->transp.offset = 24; |
487 | + var->transp.length = 8; |
488 | + break; |
489 | + default: |
490 | + break; |
491 | + } |
492 | +} |
493 | + |
494 | /** |
495 | * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var |
496 | * @var: screeninfo to check |
497 | @@ -1631,6 +1708,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, |
498 | return -EINVAL; |
499 | } |
500 | |
501 | + /* |
502 | + * Workaround for SDL 1.2, which is known to be setting all pixel format |
503 | + * fields values to zero in some cases. We treat this situation as a |
504 | + * kind of "use some reasonable autodetected values". |
505 | + */ |
506 | + if (!var->red.offset && !var->green.offset && |
507 | + !var->blue.offset && !var->transp.offset && |
508 | + !var->red.length && !var->green.length && |
509 | + !var->blue.length && !var->transp.length && |
510 | + !var->red.msb_right && !var->green.msb_right && |
511 | + !var->blue.msb_right && !var->transp.msb_right) { |
512 | + drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); |
513 | + } |
514 | + |
515 | /* |
516 | * drm fbdev emulation doesn't support changing the pixel format at all, |
517 | * so reject all pixel format changing requests. |
518 | @@ -1942,59 +2033,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe |
519 | info->var.yoffset = 0; |
520 | info->var.activate = FB_ACTIVATE_NOW; |
521 | |
522 | - switch (fb->format->depth) { |
523 | - case 8: |
524 | - info->var.red.offset = 0; |
525 | - info->var.green.offset = 0; |
526 | - info->var.blue.offset = 0; |
527 | - info->var.red.length = 8; /* 8bit DAC */ |
528 | - info->var.green.length = 8; |
529 | - info->var.blue.length = 8; |
530 | - info->var.transp.offset = 0; |
531 | - info->var.transp.length = 0; |
532 | - break; |
533 | - case 15: |
534 | - info->var.red.offset = 10; |
535 | - info->var.green.offset = 5; |
536 | - info->var.blue.offset = 0; |
537 | - info->var.red.length = 5; |
538 | - info->var.green.length = 5; |
539 | - info->var.blue.length = 5; |
540 | - info->var.transp.offset = 15; |
541 | - info->var.transp.length = 1; |
542 | - break; |
543 | - case 16: |
544 | - info->var.red.offset = 11; |
545 | - info->var.green.offset = 5; |
546 | - info->var.blue.offset = 0; |
547 | - info->var.red.length = 5; |
548 | - info->var.green.length = 6; |
549 | - info->var.blue.length = 5; |
550 | - info->var.transp.offset = 0; |
551 | - break; |
552 | - case 24: |
553 | - info->var.red.offset = 16; |
554 | - info->var.green.offset = 8; |
555 | - info->var.blue.offset = 0; |
556 | - info->var.red.length = 8; |
557 | - info->var.green.length = 8; |
558 | - info->var.blue.length = 8; |
559 | - info->var.transp.offset = 0; |
560 | - info->var.transp.length = 0; |
561 | - break; |
562 | - case 32: |
563 | - info->var.red.offset = 16; |
564 | - info->var.green.offset = 8; |
565 | - info->var.blue.offset = 0; |
566 | - info->var.red.length = 8; |
567 | - info->var.green.length = 8; |
568 | - info->var.blue.length = 8; |
569 | - info->var.transp.offset = 24; |
570 | - info->var.transp.length = 8; |
571 | - break; |
572 | - default: |
573 | - break; |
574 | - } |
575 | + drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); |
576 | |
577 | info->var.xres = fb_width; |
578 | info->var.yres = fb_height; |
579 | @@ -3041,6 +3080,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, |
580 | fbi->screen_size = fb->height * fb->pitches[0]; |
581 | fbi->fix.smem_len = fbi->screen_size; |
582 | fbi->screen_buffer = buffer->vaddr; |
583 | + /* Shamelessly leak the physical address to user-space */ |
584 | +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) |
585 | + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) |
586 | + fbi->fix.smem_start = |
587 | + page_to_phys(virt_to_page(fbi->screen_buffer)); |
588 | +#endif |
589 | strcpy(fbi->fix.id, "DRM emulated"); |
590 | |
591 | drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); |
592 | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c |
593 | index 5f57f4e1fbc8..87411a5aba77 100644 |
594 | --- a/drivers/gpu/drm/i915/i915_gem_gtt.c |
595 | +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c |
596 | @@ -2128,6 +2128,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) |
597 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) |
598 | { |
599 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); |
600 | + int err; |
601 | |
602 | /* |
603 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt |
604 | @@ -2143,9 +2144,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) |
605 | * allocator works in address space sizes, so it's multiplied by page |
606 | * size. We allocate at the top of the GTT to avoid fragmentation. |
607 | */ |
608 | - return i915_vma_pin(ppgtt->vma, |
609 | - 0, GEN6_PD_ALIGN, |
610 | - PIN_GLOBAL | PIN_HIGH); |
611 | + err = i915_vma_pin(ppgtt->vma, |
612 | + 0, GEN6_PD_ALIGN, |
613 | + PIN_GLOBAL | PIN_HIGH); |
614 | + if (err) |
615 | + goto unpin; |
616 | + |
617 | + return 0; |
618 | + |
619 | +unpin: |
620 | + ppgtt->pin_count = 0; |
621 | + return err; |
622 | } |
623 | |
624 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) |
625 | diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c |
626 | index 1aca742fde4a..ccd76c71af09 100644 |
627 | --- a/drivers/i2c/i2c-dev.c |
628 | +++ b/drivers/i2c/i2c-dev.c |
629 | @@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
630 | data_arg.data); |
631 | } |
632 | case I2C_RETRIES: |
633 | + if (arg > INT_MAX) |
634 | + return -EINVAL; |
635 | + |
636 | client->adapter->retries = arg; |
637 | break; |
638 | case I2C_TIMEOUT: |
639 | + if (arg > INT_MAX) |
640 | + return -EINVAL; |
641 | + |
642 | /* For historical reasons, user-space sets the timeout |
643 | * value in units of 10 ms. |
644 | */ |
645 | diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c |
646 | index 8815f3e2b718..880e75f63a19 100644 |
647 | --- a/drivers/mtd/nand/raw/qcom_nandc.c |
648 | +++ b/drivers/mtd/nand/raw/qcom_nandc.c |
649 | @@ -2839,6 +2839,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, |
650 | if (ret) |
651 | return ret; |
652 | |
653 | + if (nandc->props->is_bam) { |
654 | + free_bam_transaction(nandc); |
655 | + nandc->bam_txn = alloc_bam_transaction(nandc); |
656 | + if (!nandc->bam_txn) { |
657 | + dev_err(nandc->dev, |
658 | + "failed to allocate bam transaction\n"); |
659 | + return -ENOMEM; |
660 | + } |
661 | + } |
662 | + |
663 | ret = mtd_device_register(mtd, NULL, 0); |
664 | if (ret) |
665 | nand_cleanup(chip); |
666 | @@ -2853,16 +2863,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc) |
667 | struct qcom_nand_host *host; |
668 | int ret; |
669 | |
670 | - if (nandc->props->is_bam) { |
671 | - free_bam_transaction(nandc); |
672 | - nandc->bam_txn = alloc_bam_transaction(nandc); |
673 | - if (!nandc->bam_txn) { |
674 | - dev_err(nandc->dev, |
675 | - "failed to allocate bam transaction\n"); |
676 | - return -ENOMEM; |
677 | - } |
678 | - } |
679 | - |
680 | for_each_available_child_of_node(dn, child) { |
681 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); |
682 | if (!host) { |
683 | diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c |
684 | index 29a05759a294..0fa9e8fdce66 100644 |
685 | --- a/drivers/pci/controller/dwc/pcie-designware-host.c |
686 | +++ b/drivers/pci/controller/dwc/pcie-designware-host.c |
687 | @@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
688 | (i * MAX_MSI_IRQS_PER_CTRL) + |
689 | pos); |
690 | generic_handle_irq(irq); |
691 | - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + |
692 | - (i * MSI_REG_CTRL_BLOCK_SIZE), |
693 | - 4, 1 << pos); |
694 | pos++; |
695 | } |
696 | } |
697 | @@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data) |
698 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; |
699 | |
700 | pp->irq_status[ctrl] &= ~(1 << bit); |
701 | - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, |
702 | - pp->irq_status[ctrl]); |
703 | + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
704 | + ~pp->irq_status[ctrl]); |
705 | } |
706 | |
707 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
708 | @@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data) |
709 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; |
710 | |
711 | pp->irq_status[ctrl] |= 1 << bit; |
712 | - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, |
713 | - pp->irq_status[ctrl]); |
714 | + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
715 | + ~pp->irq_status[ctrl]); |
716 | } |
717 | |
718 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
719 | @@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data) |
720 | |
721 | static void dw_pci_bottom_ack(struct irq_data *d) |
722 | { |
723 | - struct msi_desc *msi = irq_data_get_msi_desc(d); |
724 | - struct pcie_port *pp; |
725 | + struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
726 | + unsigned int res, bit, ctrl; |
727 | + unsigned long flags; |
728 | + |
729 | + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
730 | + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
731 | + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
732 | + |
733 | + raw_spin_lock_irqsave(&pp->lock, flags); |
734 | |
735 | - pp = msi_desc_to_pci_sysdata(msi); |
736 | + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); |
737 | |
738 | if (pp->ops->msi_irq_ack) |
739 | pp->ops->msi_irq_ack(d->hwirq, pp); |
740 | + |
741 | + raw_spin_unlock_irqrestore(&pp->lock, flags); |
742 | } |
743 | |
744 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { |
745 | @@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) |
746 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; |
747 | |
748 | /* Initialize IRQ Status array */ |
749 | - for (ctrl = 0; ctrl < num_ctrls; ctrl++) |
750 | - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + |
751 | + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
752 | + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + |
753 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
754 | - 4, &pp->irq_status[ctrl]); |
755 | + 4, ~0); |
756 | + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + |
757 | + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
758 | + 4, ~0); |
759 | + pp->irq_status[ctrl] = 0; |
760 | + } |
761 | |
762 | /* Setup RC BARs */ |
763 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); |
764 | diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c |
765 | index 2a48b09ea9ae..470ea2c0c433 100644 |
766 | --- a/drivers/staging/rtl8188eu/core/rtw_security.c |
767 | +++ b/drivers/staging/rtl8188eu/core/rtw_security.c |
768 | @@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe) |
769 | |
770 | pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; |
771 | |
772 | - crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); |
773 | + crypto_ops = lib80211_get_crypto_ops("WEP"); |
774 | |
775 | if (!crypto_ops) |
776 | return; |
777 | @@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe) |
778 | void *crypto_private = NULL; |
779 | int status = _SUCCESS; |
780 | const int keyindex = prxattrib->key_index; |
781 | - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); |
782 | + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP"); |
783 | char iv[4], icv[4]; |
784 | |
785 | if (!crypto_ops) { |
786 | @@ -1292,7 +1292,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe) |
787 | struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; |
788 | void *crypto_private = NULL; |
789 | u8 *key, *pframe = skb->data; |
790 | - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); |
791 | + struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP"); |
792 | struct security_priv *psecuritypriv = &padapter->securitypriv; |
793 | char iv[8], icv[8]; |
794 | |
795 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
796 | index 2886b7b477c7..08b8aa5299b5 100644 |
797 | --- a/drivers/usb/class/cdc-acm.c |
798 | +++ b/drivers/usb/class/cdc-acm.c |
799 | @@ -1880,6 +1880,13 @@ static const struct usb_device_id acm_ids[] = { |
800 | .driver_info = IGNORE_DEVICE, |
801 | }, |
802 | |
803 | + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ |
804 | + .driver_info = SEND_ZERO_PACKET, |
805 | + }, |
806 | + { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */ |
807 | + .driver_info = SEND_ZERO_PACKET, |
808 | + }, |
809 | + |
810 | /* control interfaces without any protocol set */ |
811 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, |
812 | USB_CDC_PROTO_NONE) }, |
813 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
814 | index 514c5214ddb2..8bc35d53408b 100644 |
815 | --- a/drivers/usb/core/quirks.c |
816 | +++ b/drivers/usb/core/quirks.c |
817 | @@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = { |
818 | { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, |
819 | |
820 | /* Corsair K70 RGB */ |
821 | - { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, |
822 | + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT | |
823 | + USB_QUIRK_DELAY_CTRL_MSG }, |
824 | |
825 | /* Corsair Strafe */ |
826 | { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | |
827 | diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c |
828 | index e227bb5b794f..101ebac43c87 100644 |
829 | --- a/drivers/usb/storage/scsiglue.c |
830 | +++ b/drivers/usb/storage/scsiglue.c |
831 | @@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev) |
832 | if (!(us->fflags & US_FL_NEEDS_CAP16)) |
833 | sdev->try_rc_10_first = 1; |
834 | |
835 | - /* assume SPC3 or latter devices support sense size > 18 */ |
836 | - if (sdev->scsi_level > SCSI_SPC_2) |
837 | + /* |
838 | + * assume SPC3 or latter devices support sense size > 18 |
839 | + * unless US_FL_BAD_SENSE quirk is specified. |
840 | + */ |
841 | + if (sdev->scsi_level > SCSI_SPC_2 && |
842 | + !(us->fflags & US_FL_BAD_SENSE)) |
843 | us->fflags |= US_FL_SANE_SENSE; |
844 | |
845 | /* |
846 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
847 | index f7f83b21dc74..ea0d27a94afe 100644 |
848 | --- a/drivers/usb/storage/unusual_devs.h |
849 | +++ b/drivers/usb/storage/unusual_devs.h |
850 | @@ -1265,6 +1265,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, |
851 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
852 | US_FL_FIX_CAPACITY ), |
853 | |
854 | +/* |
855 | + * Reported by Icenowy Zheng <icenowy@aosc.io> |
856 | + * The SMI SM3350 USB-UFS bridge controller will enter a wrong state |
857 | + * that do not process read/write command if a long sense is requested, |
858 | + * so force to use 18-byte sense. |
859 | + */ |
860 | +UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff, |
861 | + "SMI", |
862 | + "SM3350 UFS-to-USB-Mass-Storage bridge", |
863 | + USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
864 | + US_FL_BAD_SENSE ), |
865 | + |
866 | /* |
867 | * Reported by Paul Hartman <paul.hartman+linux@gmail.com> |
868 | * This card reader returns "Illegal Request, Logical Block Address |
869 | diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c |
870 | index d9fd3188615d..64cbc2d007c9 100644 |
871 | --- a/drivers/vfio/vfio_iommu_type1.c |
872 | +++ b/drivers/vfio/vfio_iommu_type1.c |
873 | @@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, |
874 | return -EINVAL; |
875 | if (!unmap->size || unmap->size & mask) |
876 | return -EINVAL; |
877 | - if (unmap->iova + unmap->size < unmap->iova || |
878 | + if (unmap->iova + unmap->size - 1 < unmap->iova || |
879 | unmap->size > SIZE_MAX) |
880 | return -EINVAL; |
881 | |
882 | diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
883 | index fa18520529f3..7ad6f2eec711 100644 |
884 | --- a/fs/btrfs/ctree.c |
885 | +++ b/fs/btrfs/ctree.c |
886 | @@ -1051,19 +1051,21 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
887 | parent_start = parent->start; |
888 | |
889 | /* |
890 | - * If we are COWing a node/leaf from the extent, chunk or device trees, |
891 | - * make sure that we do not finish block group creation of pending block |
892 | - * groups. We do this to avoid a deadlock. |
893 | + * If we are COWing a node/leaf from the extent, chunk, device or free |
894 | + * space trees, make sure that we do not finish block group creation of |
895 | + * pending block groups. We do this to avoid a deadlock. |
896 | * COWing can result in allocation of a new chunk, and flushing pending |
897 | * block groups (btrfs_create_pending_block_groups()) can be triggered |
898 | * when finishing allocation of a new chunk. Creation of a pending block |
899 | - * group modifies the extent, chunk and device trees, therefore we could |
900 | - * deadlock with ourselves since we are holding a lock on an extent |
901 | - * buffer that btrfs_create_pending_block_groups() may try to COW later. |
902 | + * group modifies the extent, chunk, device and free space trees, |
903 | + * therefore we could deadlock with ourselves since we are holding a |
904 | + * lock on an extent buffer that btrfs_create_pending_block_groups() may |
905 | + * try to COW later. |
906 | */ |
907 | if (root == fs_info->extent_root || |
908 | root == fs_info->chunk_root || |
909 | - root == fs_info->dev_root) |
910 | + root == fs_info->dev_root || |
911 | + root == fs_info->free_space_root) |
912 | trans->can_flush_pending_bgs = false; |
913 | |
914 | cow = btrfs_alloc_tree_block(trans, root, parent_start, |
915 | diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c |
916 | index ff434663d65b..e1fcb28ad4cc 100644 |
917 | --- a/fs/btrfs/qgroup.c |
918 | +++ b/fs/btrfs/qgroup.c |
919 | @@ -1013,16 +1013,22 @@ out_add_root: |
920 | btrfs_abort_transaction(trans, ret); |
921 | goto out_free_path; |
922 | } |
923 | - spin_lock(&fs_info->qgroup_lock); |
924 | - fs_info->quota_root = quota_root; |
925 | - set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
926 | - spin_unlock(&fs_info->qgroup_lock); |
927 | |
928 | ret = btrfs_commit_transaction(trans); |
929 | trans = NULL; |
930 | if (ret) |
931 | goto out_free_path; |
932 | |
933 | + /* |
934 | + * Set quota enabled flag after committing the transaction, to avoid |
935 | + * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot |
936 | + * creation. |
937 | + */ |
938 | + spin_lock(&fs_info->qgroup_lock); |
939 | + fs_info->quota_root = quota_root; |
940 | + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
941 | + spin_unlock(&fs_info->qgroup_lock); |
942 | + |
943 | ret = qgroup_rescan_init(fs_info, 0, 1); |
944 | if (!ret) { |
945 | qgroup_rescan_zero_tracking(fs_info); |
946 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
947 | index f4405e430da6..223334f08530 100644 |
948 | --- a/fs/btrfs/volumes.c |
949 | +++ b/fs/btrfs/volumes.c |
950 | @@ -3712,6 +3712,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, |
951 | int ret; |
952 | u64 num_devices; |
953 | unsigned seq; |
954 | + bool reducing_integrity; |
955 | |
956 | if (btrfs_fs_closing(fs_info) || |
957 | atomic_read(&fs_info->balance_pause_req) || |
958 | @@ -3796,24 +3797,30 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, |
959 | !(bctl->sys.target & allowed)) || |
960 | ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && |
961 | (fs_info->avail_metadata_alloc_bits & allowed) && |
962 | - !(bctl->meta.target & allowed))) { |
963 | - if (bctl->flags & BTRFS_BALANCE_FORCE) { |
964 | - btrfs_info(fs_info, |
965 | - "balance: force reducing metadata integrity"); |
966 | - } else { |
967 | - btrfs_err(fs_info, |
968 | - "balance: reduces metadata integrity, use --force if you want this"); |
969 | - ret = -EINVAL; |
970 | - goto out; |
971 | - } |
972 | - } |
973 | + !(bctl->meta.target & allowed))) |
974 | + reducing_integrity = true; |
975 | + else |
976 | + reducing_integrity = false; |
977 | + |
978 | + /* if we're not converting, the target field is uninitialized */ |
979 | + meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? |
980 | + bctl->meta.target : fs_info->avail_metadata_alloc_bits; |
981 | + data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? |
982 | + bctl->data.target : fs_info->avail_data_alloc_bits; |
983 | } while (read_seqretry(&fs_info->profiles_lock, seq)); |
984 | |
985 | - /* if we're not converting, the target field is uninitialized */ |
986 | - meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? |
987 | - bctl->meta.target : fs_info->avail_metadata_alloc_bits; |
988 | - data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? |
989 | - bctl->data.target : fs_info->avail_data_alloc_bits; |
990 | + if (reducing_integrity) { |
991 | + if (bctl->flags & BTRFS_BALANCE_FORCE) { |
992 | + btrfs_info(fs_info, |
993 | + "balance: force reducing metadata integrity"); |
994 | + } else { |
995 | + btrfs_err(fs_info, |
996 | + "balance: reduces metadata integrity, use --force if you want this"); |
997 | + ret = -EINVAL; |
998 | + goto out; |
999 | + } |
1000 | + } |
1001 | + |
1002 | if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < |
1003 | btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { |
1004 | int meta_index = btrfs_bg_flags_to_raid_index(meta_target); |
1005 | diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c |
1006 | index ea78c3d6dcfc..f141b45ce349 100644 |
1007 | --- a/fs/btrfs/xattr.c |
1008 | +++ b/fs/btrfs/xattr.c |
1009 | @@ -11,6 +11,7 @@ |
1010 | #include <linux/security.h> |
1011 | #include <linux/posix_acl_xattr.h> |
1012 | #include <linux/iversion.h> |
1013 | +#include <linux/sched/mm.h> |
1014 | #include "ctree.h" |
1015 | #include "btrfs_inode.h" |
1016 | #include "transaction.h" |
1017 | @@ -422,9 +423,15 @@ static int btrfs_initxattrs(struct inode *inode, |
1018 | { |
1019 | const struct xattr *xattr; |
1020 | struct btrfs_trans_handle *trans = fs_info; |
1021 | + unsigned int nofs_flag; |
1022 | char *name; |
1023 | int err = 0; |
1024 | |
1025 | + /* |
1026 | + * We're holding a transaction handle, so use a NOFS memory allocation |
1027 | + * context to avoid deadlock if reclaim happens. |
1028 | + */ |
1029 | + nofs_flag = memalloc_nofs_save(); |
1030 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { |
1031 | name = kmalloc(XATTR_SECURITY_PREFIX_LEN + |
1032 | strlen(xattr->name) + 1, GFP_KERNEL); |
1033 | @@ -440,6 +447,7 @@ static int btrfs_initxattrs(struct inode *inode, |
1034 | if (err < 0) |
1035 | break; |
1036 | } |
1037 | + memalloc_nofs_restore(nofs_flag); |
1038 | return err; |
1039 | } |
1040 | |
1041 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
1042 | index 8d41ca7bfcf1..7b637fc27990 100644 |
1043 | --- a/fs/cifs/file.c |
1044 | +++ b/fs/cifs/file.c |
1045 | @@ -1120,10 +1120,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) |
1046 | |
1047 | /* |
1048 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
1049 | - * and check it for zero before using. |
1050 | + * and check it before using. |
1051 | */ |
1052 | max_buf = tcon->ses->server->maxBuf; |
1053 | - if (!max_buf) { |
1054 | + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { |
1055 | free_xid(xid); |
1056 | return -EINVAL; |
1057 | } |
1058 | @@ -1460,10 +1460,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
1059 | |
1060 | /* |
1061 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
1062 | - * and check it for zero before using. |
1063 | + * and check it before using. |
1064 | */ |
1065 | max_buf = tcon->ses->server->maxBuf; |
1066 | - if (!max_buf) |
1067 | + if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) |
1068 | return -EINVAL; |
1069 | |
1070 | max_num = (max_buf - sizeof(struct smb_hdr)) / |
1071 | diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c |
1072 | index 4ed10dd086e6..2fc3d31967ee 100644 |
1073 | --- a/fs/cifs/smb2file.c |
1074 | +++ b/fs/cifs/smb2file.c |
1075 | @@ -122,10 +122,10 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, |
1076 | |
1077 | /* |
1078 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
1079 | - * and check it for zero before using. |
1080 | + * and check it before using. |
1081 | */ |
1082 | max_buf = tcon->ses->server->maxBuf; |
1083 | - if (!max_buf) |
1084 | + if (max_buf < sizeof(struct smb2_lock_element)) |
1085 | return -EINVAL; |
1086 | |
1087 | max_num = max_buf / sizeof(struct smb2_lock_element); |
1088 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
1089 | index f54d07bda067..dba986524917 100644 |
1090 | --- a/fs/cifs/smb2pdu.c |
1091 | +++ b/fs/cifs/smb2pdu.c |
1092 | @@ -3185,12 +3185,14 @@ smb2_async_readv(struct cifs_readdata *rdata) |
1093 | if (rdata->credits) { |
1094 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, |
1095 | SMB2_MAX_BUFFER_SIZE)); |
1096 | - shdr->CreditRequest = shdr->CreditCharge; |
1097 | + shdr->CreditRequest = |
1098 | + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); |
1099 | spin_lock(&server->req_lock); |
1100 | server->credits += rdata->credits - |
1101 | le16_to_cpu(shdr->CreditCharge); |
1102 | spin_unlock(&server->req_lock); |
1103 | wake_up(&server->request_q); |
1104 | + rdata->credits = le16_to_cpu(shdr->CreditCharge); |
1105 | flags |= CIFS_HAS_CREDITS; |
1106 | } |
1107 | |
1108 | @@ -3462,12 +3464,14 @@ smb2_async_writev(struct cifs_writedata *wdata, |
1109 | if (wdata->credits) { |
1110 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, |
1111 | SMB2_MAX_BUFFER_SIZE)); |
1112 | - shdr->CreditRequest = shdr->CreditCharge; |
1113 | + shdr->CreditRequest = |
1114 | + cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); |
1115 | spin_lock(&server->req_lock); |
1116 | server->credits += wdata->credits - |
1117 | le16_to_cpu(shdr->CreditCharge); |
1118 | spin_unlock(&server->req_lock); |
1119 | wake_up(&server->request_q); |
1120 | + wdata->credits = le16_to_cpu(shdr->CreditCharge); |
1121 | flags |= CIFS_HAS_CREDITS; |
1122 | } |
1123 | |
1124 | diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c |
1125 | index 333729cf46cd..66348b3d28e6 100644 |
1126 | --- a/fs/cifs/transport.c |
1127 | +++ b/fs/cifs/transport.c |
1128 | @@ -378,7 +378,7 @@ smbd_done: |
1129 | if (rc < 0 && rc != -EINTR) |
1130 | cifs_dbg(VFS, "Error %d sending data on socket to server\n", |
1131 | rc); |
1132 | - else |
1133 | + else if (rc > 0) |
1134 | rc = 0; |
1135 | |
1136 | return rc; |
1137 | @@ -786,7 +786,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, |
1138 | int i, j, rc = 0; |
1139 | int timeout, optype; |
1140 | struct mid_q_entry *midQ[MAX_COMPOUND]; |
1141 | - unsigned int credits = 0; |
1142 | + bool cancelled_mid[MAX_COMPOUND] = {false}; |
1143 | + unsigned int credits[MAX_COMPOUND] = {0}; |
1144 | char *buf; |
1145 | |
1146 | timeout = flags & CIFS_TIMEOUT_MASK; |
1147 | @@ -804,13 +805,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, |
1148 | return -ENOENT; |
1149 | |
1150 | /* |
1151 | - * Ensure that we do not send more than 50 overlapping requests |
1152 | - * to the same server. We may make this configurable later or |
1153 | - * use ses->maxReq. |
1154 | + * Ensure we obtain 1 credit per request in the compound chain. |
1155 | + * It can be optimized further by waiting for all the credits |
1156 | + * at once but this can wait long enough if we don't have enough |
1157 | + * credits due to some heavy operations in progress or the server |
1158 | + * not granting us much, so a fallback to the current approach is |
1159 | + * needed anyway. |
1160 | */ |
1161 | - rc = wait_for_free_request(ses->server, timeout, optype); |
1162 | - if (rc) |
1163 | - return rc; |
1164 | + for (i = 0; i < num_rqst; i++) { |
1165 | + rc = wait_for_free_request(ses->server, timeout, optype); |
1166 | + if (rc) { |
1167 | + /* |
1168 | + * We haven't sent an SMB packet to the server yet but |
1169 | + * we already obtained credits for i requests in the |
1170 | + * compound chain - need to return those credits back |
1171 | + * for future use. Note that we need to call add_credits |
1172 | + * multiple times to match the way we obtained credits |
1173 | + * in the first place and to account for in flight |
1174 | + * requests correctly. |
1175 | + */ |
1176 | + for (j = 0; j < i; j++) |
1177 | + add_credits(ses->server, 1, optype); |
1178 | + return rc; |
1179 | + } |
1180 | + credits[i] = 1; |
1181 | + } |
1182 | |
1183 | /* |
1184 | * Make sure that we sign in the same order that we send on this socket |
1185 | @@ -826,8 +845,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, |
1186 | for (j = 0; j < i; j++) |
1187 | cifs_delete_mid(midQ[j]); |
1188 | mutex_unlock(&ses->server->srv_mutex); |
1189 | + |
1190 | /* Update # of requests on wire to server */ |
1191 | - add_credits(ses->server, 1, optype); |
1192 | + for (j = 0; j < num_rqst; j++) |
1193 | + add_credits(ses->server, credits[j], optype); |
1194 | return PTR_ERR(midQ[i]); |
1195 | } |
1196 | |
1197 | @@ -874,19 +895,16 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, |
1198 | if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { |
1199 | midQ[i]->mid_flags |= MID_WAIT_CANCELLED; |
1200 | midQ[i]->callback = DeleteMidQEntry; |
1201 | - spin_unlock(&GlobalMid_Lock); |
1202 | - add_credits(ses->server, 1, optype); |
1203 | - return rc; |
1204 | + cancelled_mid[i] = true; |
1205 | } |
1206 | spin_unlock(&GlobalMid_Lock); |
1207 | } |
1208 | } |
1209 | |
1210 | for (i = 0; i < num_rqst; i++) |
1211 | - if (midQ[i]->resp_buf) |
1212 | - credits += ses->server->ops->get_credits(midQ[i]); |
1213 | - if (!credits) |
1214 | - credits = 1; |
1215 | + if (!cancelled_mid[i] && midQ[i]->resp_buf |
1216 | + && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED)) |
1217 | + credits[i] = ses->server->ops->get_credits(midQ[i]); |
1218 | |
1219 | for (i = 0; i < num_rqst; i++) { |
1220 | if (rc < 0) |
1221 | @@ -894,8 +912,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, |
1222 | |
1223 | rc = cifs_sync_mid_result(midQ[i], ses->server); |
1224 | if (rc != 0) { |
1225 | - add_credits(ses->server, credits, optype); |
1226 | - return rc; |
1227 | + /* mark this mid as cancelled to not free it below */ |
1228 | + cancelled_mid[i] = true; |
1229 | + goto out; |
1230 | } |
1231 | |
1232 | if (!midQ[i]->resp_buf || |
1233 | @@ -942,9 +961,11 @@ out: |
1234 | * This is prevented above by using a noop callback that will not |
1235 | * wake this thread except for the very last PDU. |
1236 | */ |
1237 | - for (i = 0; i < num_rqst; i++) |
1238 | - cifs_delete_mid(midQ[i]); |
1239 | - add_credits(ses->server, credits, optype); |
1240 | + for (i = 0; i < num_rqst; i++) { |
1241 | + if (!cancelled_mid[i]) |
1242 | + cifs_delete_mid(midQ[i]); |
1243 | + add_credits(ses->server, credits[i], optype); |
1244 | + } |
1245 | |
1246 | return rc; |
1247 | } |
1248 | diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c |
1249 | index 26a7fe5c4fd3..712f00995390 100644 |
1250 | --- a/fs/ext4/fsync.c |
1251 | +++ b/fs/ext4/fsync.c |
1252 | @@ -116,8 +116,16 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
1253 | goto out; |
1254 | } |
1255 | |
1256 | + ret = file_write_and_wait_range(file, start, end); |
1257 | + if (ret) |
1258 | + return ret; |
1259 | + |
1260 | if (!journal) { |
1261 | - ret = __generic_file_fsync(file, start, end, datasync); |
1262 | + struct writeback_control wbc = { |
1263 | + .sync_mode = WB_SYNC_ALL |
1264 | + }; |
1265 | + |
1266 | + ret = ext4_write_inode(inode, &wbc); |
1267 | if (!ret) |
1268 | ret = ext4_sync_parent(inode); |
1269 | if (test_opt(inode->i_sb, BARRIER)) |
1270 | @@ -125,9 +133,6 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
1271 | goto out; |
1272 | } |
1273 | |
1274 | - ret = file_write_and_wait_range(file, start, end); |
1275 | - if (ret) |
1276 | - return ret; |
1277 | /* |
1278 | * data=writeback,ordered: |
1279 | * The caller's filemap_fdatawrite()/wait will sync the data. |
1280 | @@ -159,6 +164,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
1281 | ret = err; |
1282 | } |
1283 | out: |
1284 | + err = file_check_and_advance_wb_err(file); |
1285 | + if (ret == 0) |
1286 | + ret = err; |
1287 | trace_ext4_sync_file_exit(inode, ret); |
1288 | return ret; |
1289 | } |
1290 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c |
1291 | index 27373d88b5f0..56f6e1782d5f 100644 |
1292 | --- a/fs/ext4/inline.c |
1293 | +++ b/fs/ext4/inline.c |
1294 | @@ -1890,12 +1890,12 @@ int ext4_inline_data_fiemap(struct inode *inode, |
1295 | physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; |
1296 | physical += offsetof(struct ext4_inode, i_block); |
1297 | |
1298 | - if (physical) |
1299 | - error = fiemap_fill_next_extent(fieinfo, start, physical, |
1300 | - inline_len, flags); |
1301 | brelse(iloc.bh); |
1302 | out: |
1303 | up_read(&EXT4_I(inode)->xattr_sem); |
1304 | + if (physical) |
1305 | + error = fiemap_fill_next_extent(fieinfo, start, physical, |
1306 | + inline_len, flags); |
1307 | return (error < 0 ? error : 0); |
1308 | } |
1309 | |
1310 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
1311 | index 36abbdafb26e..2c43c5b92229 100644 |
1312 | --- a/fs/ext4/inode.c |
1313 | +++ b/fs/ext4/inode.c |
1314 | @@ -2748,7 +2748,8 @@ static int ext4_writepages(struct address_space *mapping, |
1315 | * We may need to convert up to one extent per block in |
1316 | * the page and we may dirty the inode. |
1317 | */ |
1318 | - rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); |
1319 | + rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, |
1320 | + PAGE_SIZE >> inode->i_blkbits); |
1321 | } |
1322 | |
1323 | /* |
1324 | @@ -4802,7 +4803,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, |
1325 | gid_t i_gid; |
1326 | projid_t i_projid; |
1327 | |
1328 | - if (((flags & EXT4_IGET_NORMAL) && |
1329 | + if ((!(flags & EXT4_IGET_SPECIAL) && |
1330 | (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || |
1331 | (ino < EXT4_ROOT_INO) || |
1332 | (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { |
1333 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
1334 | index ee0f30852835..a1cf7d68b4f0 100644 |
1335 | --- a/fs/ext4/super.c |
1336 | +++ b/fs/ext4/super.c |
1337 | @@ -4904,7 +4904,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) |
1338 | ext4_superblock_csum_set(sb); |
1339 | if (sync) |
1340 | lock_buffer(sbh); |
1341 | - if (buffer_write_io_error(sbh)) { |
1342 | + if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { |
1343 | /* |
1344 | * Oh, dear. A previous attempt to write the |
1345 | * superblock failed. This could happen because the |
1346 | diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h |
1347 | index 4d36b27214fd..0242f6eec4ea 100644 |
1348 | --- a/include/linux/compiler-gcc.h |
1349 | +++ b/include/linux/compiler-gcc.h |
1350 | @@ -75,7 +75,7 @@ |
1351 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
1352 | #endif |
1353 | |
1354 | -#ifdef RETPOLINE |
1355 | +#ifdef CONFIG_RETPOLINE |
1356 | #define __noretpoline __attribute__((indirect_branch("keep"))) |
1357 | #endif |
1358 | |
1359 | diff --git a/include/linux/module.h b/include/linux/module.h |
1360 | index e19ae08c7fb8..904f94628132 100644 |
1361 | --- a/include/linux/module.h |
1362 | +++ b/include/linux/module.h |
1363 | @@ -818,7 +818,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, |
1364 | static inline void module_bug_cleanup(struct module *mod) {} |
1365 | #endif /* CONFIG_GENERIC_BUG */ |
1366 | |
1367 | -#ifdef RETPOLINE |
1368 | +#ifdef CONFIG_RETPOLINE |
1369 | extern bool retpoline_module_ok(bool has_retpoline); |
1370 | #else |
1371 | static inline bool retpoline_module_ok(bool has_retpoline) |
1372 | diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h |
1373 | index 73e130a840ce..fdb6b317d974 100644 |
1374 | --- a/include/linux/sunrpc/svc.h |
1375 | +++ b/include/linux/sunrpc/svc.h |
1376 | @@ -295,9 +295,12 @@ struct svc_rqst { |
1377 | struct svc_cacherep * rq_cacherep; /* cache info */ |
1378 | struct task_struct *rq_task; /* service thread */ |
1379 | spinlock_t rq_lock; /* per-request lock */ |
1380 | + struct net *rq_bc_net; /* pointer to backchannel's |
1381 | + * net namespace |
1382 | + */ |
1383 | }; |
1384 | |
1385 | -#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) |
1386 | +#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) |
1387 | |
1388 | /* |
1389 | * Rigorous type checking on sockaddr type conversions |
1390 | diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h |
1391 | index bbb08a3ef5cc..a2644c494a9c 100644 |
1392 | --- a/include/trace/events/sunrpc.h |
1393 | +++ b/include/trace/events/sunrpc.h |
1394 | @@ -582,7 +582,8 @@ TRACE_EVENT(svc_process, |
1395 | __field(u32, vers) |
1396 | __field(u32, proc) |
1397 | __string(service, name) |
1398 | - __string(addr, rqst->rq_xprt->xpt_remotebuf) |
1399 | + __string(addr, rqst->rq_xprt ? |
1400 | + rqst->rq_xprt->xpt_remotebuf : "(null)") |
1401 | ), |
1402 | |
1403 | TP_fast_assign( |
1404 | @@ -590,7 +591,8 @@ TRACE_EVENT(svc_process, |
1405 | __entry->vers = rqst->rq_vers; |
1406 | __entry->proc = rqst->rq_proc; |
1407 | __assign_str(service, name); |
1408 | - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); |
1409 | + __assign_str(addr, rqst->rq_xprt ? |
1410 | + rqst->rq_xprt->xpt_remotebuf : "(null)"); |
1411 | ), |
1412 | |
1413 | TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u", |
1414 | diff --git a/mm/memory.c b/mm/memory.c |
1415 | index 5c5df53dbdf9..281172540a9c 100644 |
1416 | --- a/mm/memory.c |
1417 | +++ b/mm/memory.c |
1418 | @@ -3237,6 +3237,29 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) |
1419 | struct vm_area_struct *vma = vmf->vma; |
1420 | vm_fault_t ret; |
1421 | |
1422 | + /* |
1423 | + * Preallocate pte before we take page_lock because this might lead to |
1424 | + * deadlocks for memcg reclaim which waits for pages under writeback: |
1425 | + * lock_page(A) |
1426 | + * SetPageWriteback(A) |
1427 | + * unlock_page(A) |
1428 | + * lock_page(B) |
1429 | + * lock_page(B) |
1430 | + * pte_alloc_pne |
1431 | + * shrink_page_list |
1432 | + * wait_on_page_writeback(A) |
1433 | + * SetPageWriteback(B) |
1434 | + * unlock_page(B) |
1435 | + * # flush A, B to clear the writeback |
1436 | + */ |
1437 | + if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { |
1438 | + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, |
1439 | + vmf->address); |
1440 | + if (!vmf->prealloc_pte) |
1441 | + return VM_FAULT_OOM; |
1442 | + smp_wmb(); /* See comment in __pte_alloc() */ |
1443 | + } |
1444 | + |
1445 | ret = vma->vm_ops->fault(vmf); |
1446 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | |
1447 | VM_FAULT_DONE_COW))) |
1448 | diff --git a/mm/slab.c b/mm/slab.c |
1449 | index d73c7a4820a4..fad6839e8eab 100644 |
1450 | --- a/mm/slab.c |
1451 | +++ b/mm/slab.c |
1452 | @@ -679,8 +679,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries, |
1453 | struct alien_cache *alc = NULL; |
1454 | |
1455 | alc = kmalloc_node(memsize, gfp, node); |
1456 | - init_arraycache(&alc->ac, entries, batch); |
1457 | - spin_lock_init(&alc->lock); |
1458 | + if (alc) { |
1459 | + init_arraycache(&alc->ac, entries, batch); |
1460 | + spin_lock_init(&alc->lock); |
1461 | + } |
1462 | return alc; |
1463 | } |
1464 | |
1465 | diff --git a/mm/usercopy.c b/mm/usercopy.c |
1466 | index 852eb4e53f06..14faadcedd06 100644 |
1467 | --- a/mm/usercopy.c |
1468 | +++ b/mm/usercopy.c |
1469 | @@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); |
1470 | /* |
1471 | * Validates that the given object is: |
1472 | * - not bogus address |
1473 | - * - known-safe heap or stack object |
1474 | + * - fully contained by stack (or stack frame, when available) |
1475 | + * - fully within SLAB object (or object whitelist area, when available) |
1476 | * - not in kernel text |
1477 | */ |
1478 | void __check_object_size(const void *ptr, unsigned long n, bool to_user) |
1479 | @@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) |
1480 | /* Check for invalid addresses. */ |
1481 | check_bogus_address((const unsigned long)ptr, n, to_user); |
1482 | |
1483 | - /* Check for bad heap object. */ |
1484 | - check_heap_object(ptr, n, to_user); |
1485 | - |
1486 | /* Check for bad stack object. */ |
1487 | switch (check_stack_object(ptr, n)) { |
1488 | case NOT_STACK: |
1489 | @@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) |
1490 | usercopy_abort("process stack", NULL, to_user, 0, n); |
1491 | } |
1492 | |
1493 | + /* Check for bad heap object. */ |
1494 | + check_heap_object(ptr, n, to_user); |
1495 | + |
1496 | /* Check for object in kernel to avoid text exposure. */ |
1497 | check_kernel_text_object((const unsigned long)ptr, n, to_user); |
1498 | } |
1499 | diff --git a/mm/util.c b/mm/util.c |
1500 | index 9e3ebd2ef65f..6a24a1025d77 100644 |
1501 | --- a/mm/util.c |
1502 | +++ b/mm/util.c |
1503 | @@ -485,7 +485,7 @@ bool page_mapped(struct page *page) |
1504 | return true; |
1505 | if (PageHuge(page)) |
1506 | return false; |
1507 | - for (i = 0; i < hpage_nr_pages(page); i++) { |
1508 | + for (i = 0; i < (1 << compound_order(page)); i++) { |
1509 | if (atomic_read(&page[i]._mapcount) >= 0) |
1510 | return true; |
1511 | } |
1512 | diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c |
1513 | index d13e05f1a990..d65f8d35de87 100644 |
1514 | --- a/net/sunrpc/svc.c |
1515 | +++ b/net/sunrpc/svc.c |
1516 | @@ -1144,6 +1144,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) |
1517 | static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} |
1518 | #endif |
1519 | |
1520 | +extern void svc_tcp_prep_reply_hdr(struct svc_rqst *); |
1521 | + |
1522 | /* |
1523 | * Common routine for processing the RPC request. |
1524 | */ |
1525 | @@ -1172,7 +1174,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) |
1526 | clear_bit(RQ_DROPME, &rqstp->rq_flags); |
1527 | |
1528 | /* Setup reply header */ |
1529 | - rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); |
1530 | + if (rqstp->rq_prot == IPPROTO_TCP) |
1531 | + svc_tcp_prep_reply_hdr(rqstp); |
1532 | |
1533 | svc_putu32(resv, rqstp->rq_xid); |
1534 | |
1535 | @@ -1244,7 +1247,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) |
1536 | * for lower versions. RPC_PROG_MISMATCH seems to be the closest |
1537 | * fit. |
1538 | */ |
1539 | - if (versp->vs_need_cong_ctrl && |
1540 | + if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && |
1541 | !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) |
1542 | goto err_bad_vers; |
1543 | |
1544 | @@ -1336,7 +1339,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) |
1545 | return 0; |
1546 | |
1547 | close: |
1548 | - if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) |
1549 | + if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) |
1550 | svc_close_xprt(rqstp->rq_xprt); |
1551 | dprintk("svc: svc_process close\n"); |
1552 | return 0; |
1553 | @@ -1459,10 +1462,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, |
1554 | dprintk("svc: %s(%p)\n", __func__, req); |
1555 | |
1556 | /* Build the svc_rqst used by the common processing routine */ |
1557 | - rqstp->rq_xprt = serv->sv_bc_xprt; |
1558 | rqstp->rq_xid = req->rq_xid; |
1559 | rqstp->rq_prot = req->rq_xprt->prot; |
1560 | rqstp->rq_server = serv; |
1561 | + rqstp->rq_bc_net = req->rq_xprt->xprt_net; |
1562 | |
1563 | rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); |
1564 | memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); |
1565 | diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c |
1566 | index 83ccd0221c98..6cf0fd37cbf0 100644 |
1567 | --- a/net/sunrpc/svc_xprt.c |
1568 | +++ b/net/sunrpc/svc_xprt.c |
1569 | @@ -469,10 +469,11 @@ out: |
1570 | */ |
1571 | void svc_reserve(struct svc_rqst *rqstp, int space) |
1572 | { |
1573 | + struct svc_xprt *xprt = rqstp->rq_xprt; |
1574 | + |
1575 | space += rqstp->rq_res.head[0].iov_len; |
1576 | |
1577 | - if (space < rqstp->rq_reserved) { |
1578 | - struct svc_xprt *xprt = rqstp->rq_xprt; |
1579 | + if (xprt && space < rqstp->rq_reserved) { |
1580 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); |
1581 | rqstp->rq_reserved = space; |
1582 | |
1583 | diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c |
1584 | index fc1c0d9ef57d..97a8282955a8 100644 |
1585 | --- a/net/sunrpc/svcsock.c |
1586 | +++ b/net/sunrpc/svcsock.c |
1587 | @@ -1198,7 +1198,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) |
1588 | /* |
1589 | * Setup response header. TCP has a 4B record length field. |
1590 | */ |
1591 | -static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) |
1592 | +void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) |
1593 | { |
1594 | struct kvec *resv = &rqstp->rq_res.head[0]; |
1595 | |
1596 | diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c |
1597 | index 0d998c54564d..5a5b3780456f 100644 |
1598 | --- a/scripts/mod/modpost.c |
1599 | +++ b/scripts/mod/modpost.c |
1600 | @@ -2157,7 +2157,7 @@ static void add_intree_flag(struct buffer *b, int is_intree) |
1601 | /* Cannot check for assembler */ |
1602 | static void add_retpoline(struct buffer *b) |
1603 | { |
1604 | - buf_printf(b, "\n#ifdef RETPOLINE\n"); |
1605 | + buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n"); |
1606 | buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); |
1607 | buf_printf(b, "#endif\n"); |
1608 | } |
1609 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
1610 | index 854d63c01dd2..8b9f2487969b 100644 |
1611 | --- a/sound/pci/hda/patch_realtek.c |
1612 | +++ b/sound/pci/hda/patch_realtek.c |
1613 | @@ -4102,6 +4102,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) |
1614 | case 0x10ec0295: |
1615 | case 0x10ec0289: |
1616 | case 0x10ec0299: |
1617 | + alc_process_coef_fw(codec, alc225_pre_hsmode); |
1618 | alc_process_coef_fw(codec, coef0225); |
1619 | break; |
1620 | case 0x10ec0867: |
1621 | @@ -5380,6 +5381,13 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, |
1622 | snd_hda_override_wcaps(codec, 0x03, 0); |
1623 | } |
1624 | |
1625 | +static void alc_fixup_disable_mic_vref(struct hda_codec *codec, |
1626 | + const struct hda_fixup *fix, int action) |
1627 | +{ |
1628 | + if (action == HDA_FIXUP_ACT_PRE_PROBE) |
1629 | + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); |
1630 | +} |
1631 | + |
1632 | /* for hda_fixup_thinkpad_acpi() */ |
1633 | #include "thinkpad_helper.c" |
1634 | |
1635 | @@ -5492,6 +5500,7 @@ enum { |
1636 | ALC293_FIXUP_LENOVO_SPK_NOISE, |
1637 | ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, |
1638 | ALC255_FIXUP_DELL_SPK_NOISE, |
1639 | + ALC225_FIXUP_DISABLE_MIC_VREF, |
1640 | ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, |
1641 | ALC295_FIXUP_DISABLE_DAC3, |
1642 | ALC280_FIXUP_HP_HEADSET_MIC, |
1643 | @@ -6191,6 +6200,12 @@ static const struct hda_fixup alc269_fixups[] = { |
1644 | .chained = true, |
1645 | .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE |
1646 | }, |
1647 | + [ALC225_FIXUP_DISABLE_MIC_VREF] = { |
1648 | + .type = HDA_FIXUP_FUNC, |
1649 | + .v.func = alc_fixup_disable_mic_vref, |
1650 | + .chained = true, |
1651 | + .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE |
1652 | + }, |
1653 | [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { |
1654 | .type = HDA_FIXUP_VERBS, |
1655 | .v.verbs = (const struct hda_verb[]) { |
1656 | @@ -6200,7 +6215,7 @@ static const struct hda_fixup alc269_fixups[] = { |
1657 | {} |
1658 | }, |
1659 | .chained = true, |
1660 | - .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE |
1661 | + .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF |
1662 | }, |
1663 | [ALC280_FIXUP_HP_HEADSET_MIC] = { |
1664 | .type = HDA_FIXUP_FUNC, |
1665 | @@ -6503,6 +6518,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
1666 | SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), |
1667 | SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), |
1668 | SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), |
1669 | + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), |
1670 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
1671 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
1672 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
1673 | diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c |
1674 | index 8fb31a7cc22c..91495045ad5a 100644 |
1675 | --- a/virt/kvm/arm/arm.c |
1676 | +++ b/virt/kvm/arm/arm.c |
1677 | @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); |
1678 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); |
1679 | static u32 kvm_next_vmid; |
1680 | static unsigned int kvm_vmid_bits __read_mostly; |
1681 | -static DEFINE_RWLOCK(kvm_vmid_lock); |
1682 | +static DEFINE_SPINLOCK(kvm_vmid_lock); |
1683 | |
1684 | static bool vgic_present; |
1685 | |
1686 | @@ -482,7 +482,9 @@ void force_vm_exit(const cpumask_t *mask) |
1687 | */ |
1688 | static bool need_new_vmid_gen(struct kvm *kvm) |
1689 | { |
1690 | - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); |
1691 | + u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); |
1692 | + smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ |
1693 | + return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen); |
1694 | } |
1695 | |
1696 | /** |
1697 | @@ -497,16 +499,11 @@ static void update_vttbr(struct kvm *kvm) |
1698 | { |
1699 | phys_addr_t pgd_phys; |
1700 | u64 vmid; |
1701 | - bool new_gen; |
1702 | |
1703 | - read_lock(&kvm_vmid_lock); |
1704 | - new_gen = need_new_vmid_gen(kvm); |
1705 | - read_unlock(&kvm_vmid_lock); |
1706 | - |
1707 | - if (!new_gen) |
1708 | + if (!need_new_vmid_gen(kvm)) |
1709 | return; |
1710 | |
1711 | - write_lock(&kvm_vmid_lock); |
1712 | + spin_lock(&kvm_vmid_lock); |
1713 | |
1714 | /* |
1715 | * We need to re-check the vmid_gen here to ensure that if another vcpu |
1716 | @@ -514,7 +511,7 @@ static void update_vttbr(struct kvm *kvm) |
1717 | * use the same vmid. |
1718 | */ |
1719 | if (!need_new_vmid_gen(kvm)) { |
1720 | - write_unlock(&kvm_vmid_lock); |
1721 | + spin_unlock(&kvm_vmid_lock); |
1722 | return; |
1723 | } |
1724 | |
1725 | @@ -537,7 +534,6 @@ static void update_vttbr(struct kvm *kvm) |
1726 | kvm_call_hyp(__kvm_flush_vm_context); |
1727 | } |
1728 | |
1729 | - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); |
1730 | kvm->arch.vmid = kvm_next_vmid; |
1731 | kvm_next_vmid++; |
1732 | kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; |
1733 | @@ -548,7 +544,10 @@ static void update_vttbr(struct kvm *kvm) |
1734 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); |
1735 | kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; |
1736 | |
1737 | - write_unlock(&kvm_vmid_lock); |
1738 | + smp_wmb(); |
1739 | + WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen)); |
1740 | + |
1741 | + spin_unlock(&kvm_vmid_lock); |
1742 | } |
1743 | |
1744 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |