Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0289-5.4.190-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 60261 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index cbb71900d3dcf..fd239ec16278b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 189
10 +SUBLEVEL = 190
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
15 index 5b3549f1236c5..b2ede9bf82dff 100644
16 --- a/arch/arm/mach-davinci/board-da850-evm.c
17 +++ b/arch/arm/mach-davinci/board-da850-evm.c
18 @@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
19 int ret;
20 u32 val;
21 struct davinci_soc_info *soc_info = &davinci_soc_info;
22 - u8 rmii_en = soc_info->emac_pdata->rmii_en;
23 + u8 rmii_en;
24
25 if (!machine_is_davinci_da850_evm())
26 return 0;
27
28 + rmii_en = soc_info->emac_pdata->rmii_en;
29 +
30 cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
31
32 val = __raw_readl(cfg_chip3_base);
33 diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
34 index 73039949b5ce2..5f8e4c2df53cc 100644
35 --- a/arch/arm64/kernel/alternative.c
36 +++ b/arch/arm64/kernel/alternative.c
37 @@ -41,7 +41,7 @@ bool alternative_is_applied(u16 cpufeature)
38 /*
39 * Check if the target PC is within an alternative block.
40 */
41 -static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
42 +static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
43 {
44 unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
45 return !(pc >= replptr && pc <= (replptr + alt->alt_len));
46 @@ -49,7 +49,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
47
48 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
49
50 -static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
51 +static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
52 {
53 u32 insn;
54
55 @@ -94,7 +94,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
56 return insn;
57 }
58
59 -static void patch_alternative(struct alt_instr *alt,
60 +static noinstr void patch_alternative(struct alt_instr *alt,
61 __le32 *origptr, __le32 *updptr, int nr_inst)
62 {
63 __le32 *replptr;
64 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
65 index 6ba5adb96a3be..0d8f9246ce153 100644
66 --- a/arch/powerpc/include/asm/page.h
67 +++ b/arch/powerpc/include/asm/page.h
68 @@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
69 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
70 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
71
72 -#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
73 +#define virt_addr_valid(vaddr) ({ \
74 + unsigned long _addr = (unsigned long)vaddr; \
75 + _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
76 + pfn_valid(virt_to_pfn(_addr)); \
77 +})
78
79 /*
80 * On Book-E parts we need __va to parse the device tree and we can't
81 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
82 index dca1590f295d0..af8a1bac93458 100644
83 --- a/drivers/ata/libata-core.c
84 +++ b/drivers/ata/libata-core.c
85 @@ -4580,6 +4580,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
86 ATA_HORKAGE_ZERO_AFTER_TRIM, },
87 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
88 ATA_HORKAGE_ZERO_AFTER_TRIM, },
89 + { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
90 + ATA_HORKAGE_NO_DMA_LOG |
91 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
92 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
93 ATA_HORKAGE_ZERO_AFTER_TRIM, },
94 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
95 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
96 index 13c6eee481da7..d71c7b9b96650 100644
97 --- a/drivers/gpio/gpiolib-acpi.c
98 +++ b/drivers/gpio/gpiolib-acpi.c
99 @@ -275,8 +275,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
100 pin = agpio->pin_table[0];
101
102 if (pin <= 255) {
103 - char ev_name[5];
104 - sprintf(ev_name, "_%c%02hhX",
105 + char ev_name[8];
106 + sprintf(ev_name, "_%c%02X",
107 agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
108 pin);
109 if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
110 diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
111 index 5b393622f5920..a0f0a17e224fe 100644
112 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
113 +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
114 @@ -119,6 +119,7 @@
115 #define CONNECTOR_OBJECT_ID_eDP 0x14
116 #define CONNECTOR_OBJECT_ID_MXM 0x15
117 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
118 +#define CONNECTOR_OBJECT_ID_USBC 0x17
119
120 /* deleted */
121
122 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
123 index e8e1720104160..ffd7547135225 100644
124 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
125 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
126 @@ -633,7 +633,7 @@ MODULE_PARM_DESC(sched_policy,
127 * Maximum number of processes that HWS can schedule concurrently. The maximum is the
128 * number of VMIDs assigned to the HWS, which is also the default.
129 */
130 -int hws_max_conc_proc = 8;
131 +int hws_max_conc_proc = -1;
132 module_param(hws_max_conc_proc, int, 0444);
133 MODULE_PARM_DESC(hws_max_conc_proc,
134 "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
135 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
136 index ad9483b9eea32..60ee1a8321129 100644
137 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
138 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
139 @@ -609,15 +609,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
140 - kfd->vm_info.first_vmid_kfd + 1;
141
142 /* Verify module parameters regarding mapped process number*/
143 - if ((hws_max_conc_proc < 0)
144 - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
145 - dev_err(kfd_device,
146 - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
147 - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
148 - kfd->vm_info.vmid_num_kfd);
149 + if (hws_max_conc_proc >= 0)
150 + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
151 + else
152 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
153 - } else
154 - kfd->max_proc_per_quantum = hws_max_conc_proc;
155
156 /* Allocate global GWS that is shared by all KFD processes */
157 if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
158 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
159 index d674d4b3340fa..adbb2fec2e0f2 100644
160 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
161 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
162 @@ -532,6 +532,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
163 event_waiters = kmalloc_array(num_events,
164 sizeof(struct kfd_event_waiter),
165 GFP_KERNEL);
166 + if (!event_waiters)
167 + return NULL;
168
169 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
170 init_wait(&event_waiters[i].wait);
171 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
172 index c5231c50c4126..de33864af70b8 100644
173 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
174 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
175 @@ -1210,7 +1210,8 @@ static int dm_resume(void *handle)
176 * this is the case when traversing through already created
177 * MST connectors, should be skipped
178 */
179 - if (aconnector->mst_port)
180 + if (aconnector->dc_link &&
181 + aconnector->dc_link->type == dc_connection_mst_branch)
182 continue;
183
184 mutex_lock(&aconnector->hpd_lock);
185 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
186 index 95a5310e9e661..de246e183d6ba 100644
187 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
188 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
189 @@ -1546,8 +1546,8 @@ bool dc_is_stream_unchanged(
190 if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
191 return false;
192
193 - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
194 - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
195 + /*compare audio info*/
196 + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
197 return false;
198
199 return true;
200 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
201 index bc5ebea1abede..fa3acf60e7bd2 100644
202 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
203 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
204 @@ -2202,14 +2202,18 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
205 &blnd_cfg.black_color);
206 }
207
208 - if (per_pixel_alpha)
209 - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
210 - else
211 - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
212 -
213 blnd_cfg.overlap_only = false;
214 blnd_cfg.global_gain = 0xff;
215
216 + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
217 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
218 + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
219 + } else if (per_pixel_alpha) {
220 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
221 + } else {
222 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
223 + }
224 +
225 if (pipe_ctx->plane_state->global_alpha)
226 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
227 else
228 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
229 index 03a2e1d7f0673..f7965a5d24442 100644
230 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
231 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
232 @@ -1740,14 +1740,18 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
233 pipe_ctx, &blnd_cfg.black_color);
234 }
235
236 - if (per_pixel_alpha)
237 - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
238 - else
239 - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
240 -
241 blnd_cfg.overlap_only = false;
242 blnd_cfg.global_gain = 0xff;
243
244 + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
245 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
246 + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
247 + } else if (per_pixel_alpha) {
248 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
249 + } else {
250 + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
251 + }
252 +
253 if (pipe_ctx->plane_state->global_alpha)
254 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
255 else
256 diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
257 index d885d642ed7fc..537736713598b 100644
258 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
259 +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
260 @@ -85,7 +85,8 @@
261 //PB7 = MD0
262 #define MASK_VTEM_MD0__VRR_EN 0x01
263 #define MASK_VTEM_MD0__M_CONST 0x02
264 -#define MASK_VTEM_MD0__RESERVED2 0x0C
265 +#define MASK_VTEM_MD0__QMS_EN 0x04
266 +#define MASK_VTEM_MD0__RESERVED2 0x08
267 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0
268
269 //MD1
270 @@ -94,7 +95,7 @@
271 //MD2
272 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
273 #define MASK_VTEM_MD2__RB 0x04
274 -#define MASK_VTEM_MD2__RESERVED3 0xF8
275 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8
276
277 //MD3
278 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
279 diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
280 index 73127948f54d9..f3ff2cdc288ba 100644
281 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
282 +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
283 @@ -625,7 +625,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
284 return connector;
285
286 fail:
287 - connector->funcs->destroy(msm_dsi->connector);
288 + connector->funcs->destroy(connector);
289 return ERR_PTR(ret);
290 }
291
292 diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
293 index b4a31d506fccf..74eca68891add 100644
294 --- a/drivers/gpu/ipu-v3/ipu-di.c
295 +++ b/drivers/gpu/ipu-v3/ipu-di.c
296 @@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
297
298 error = rate / (sig->mode.pixelclock / 1000);
299
300 - dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
301 - rate, div, (signed)(error - 1000) / 10, error % 10);
302 + dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
303 + rate, div, error < 1000 ? '-' : '+',
304 + abs(error - 1000) / 10, abs(error - 1000) % 10);
305
306 /* Allow a 1% error */
307 if (error < 1010 && error >= 990) {
308 diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
309 index 9a03b163cbbda..59f1e64908b1d 100644
310 --- a/drivers/hv/ring_buffer.c
311 +++ b/drivers/hv/ring_buffer.c
312 @@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
313 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
314 {
315 u32 priv_read_loc = rbi->priv_read_index;
316 - u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
317 + u32 write_loc;
318 +
319 + /*
320 + * The Hyper-V host writes the packet data, then uses
321 + * store_release() to update the write_index. Use load_acquire()
322 + * here to prevent loads of the packet data from being re-ordered
323 + * before the read of the write_index and potentially getting
324 + * stale data.
325 + */
326 + write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
327
328 if (write_loc >= priv_read_loc)
329 return write_loc - priv_read_loc;
330 diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
331 index 20f2772c0e79b..2c909522f0f38 100644
332 --- a/drivers/i2c/busses/i2c-pasemi.c
333 +++ b/drivers/i2c/busses/i2c-pasemi.c
334 @@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
335
336 TXFIFO_WR(smbus, msg->buf[msg->len-1] |
337 (stop ? MTXFIFO_STOP : 0));
338 +
339 + if (stop) {
340 + err = pasemi_smb_waitready(smbus);
341 + if (err)
342 + goto reset_out;
343 + }
344 }
345
346 return 0;
347 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
348 index 9f4d657dd36c8..28a9eeae83b66 100644
349 --- a/drivers/md/dm-integrity.c
350 +++ b/drivers/md/dm-integrity.c
351 @@ -4054,6 +4054,7 @@ try_smaller_buffer:
352 }
353
354 if (ic->internal_hash) {
355 + size_t recalc_tags_size;
356 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
357 if (!ic->recalc_wq ) {
358 ti->error = "Cannot allocate workqueue";
359 @@ -4067,8 +4068,10 @@ try_smaller_buffer:
360 r = -ENOMEM;
361 goto bad;
362 }
363 - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
364 - ic->tag_size, GFP_KERNEL);
365 + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
366 + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
367 + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
368 + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
369 if (!ic->recalc_tags) {
370 ti->error = "Cannot allocate tags for recalculating";
371 r = -ENOMEM;
372 diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
373 index 89646896a1833..6f9cf6270a437 100644
374 --- a/drivers/memory/atmel-ebi.c
375 +++ b/drivers/memory/atmel-ebi.c
376 @@ -545,20 +545,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
377 smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
378
379 ebi->smc.regmap = syscon_node_to_regmap(smc_np);
380 - if (IS_ERR(ebi->smc.regmap))
381 - return PTR_ERR(ebi->smc.regmap);
382 + if (IS_ERR(ebi->smc.regmap)) {
383 + ret = PTR_ERR(ebi->smc.regmap);
384 + goto put_node;
385 + }
386
387 ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
388 - if (IS_ERR(ebi->smc.layout))
389 - return PTR_ERR(ebi->smc.layout);
390 + if (IS_ERR(ebi->smc.layout)) {
391 + ret = PTR_ERR(ebi->smc.layout);
392 + goto put_node;
393 + }
394
395 ebi->smc.clk = of_clk_get(smc_np, 0);
396 if (IS_ERR(ebi->smc.clk)) {
397 - if (PTR_ERR(ebi->smc.clk) != -ENOENT)
398 - return PTR_ERR(ebi->smc.clk);
399 + if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
400 + ret = PTR_ERR(ebi->smc.clk);
401 + goto put_node;
402 + }
403
404 ebi->smc.clk = NULL;
405 }
406 + of_node_put(smc_np);
407 ret = clk_prepare_enable(ebi->smc.clk);
408 if (ret)
409 return ret;
410 @@ -609,6 +616,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
411 }
412
413 return of_platform_populate(np, NULL, NULL, dev);
414 +
415 +put_node:
416 + of_node_put(smc_np);
417 + return ret;
418 }
419
420 static __maybe_unused int atmel_ebi_resume(struct device *dev)
421 diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
422 index 95f408d0e103c..7cc4c30af1a71 100644
423 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
424 +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
425 @@ -649,6 +649,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
426 return 0;
427
428 errout:
429 + mutex_destroy(&mlxsw_i2c->cmd.lock);
430 i2c_set_clientdata(client, NULL);
431
432 return err;
433 diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
434 index b9c4d48e28e42..120ed4633a096 100644
435 --- a/drivers/net/ethernet/micrel/Kconfig
436 +++ b/drivers/net/ethernet/micrel/Kconfig
437 @@ -37,6 +37,7 @@ config KS8851
438 config KS8851_MLL
439 tristate "Micrel KS8851 MLL"
440 depends on HAS_IOMEM
441 + depends on PTP_1588_CLOCK_OPTIONAL
442 select MII
443 ---help---
444 This platform driver is for Micrel KS8851 Address/data bus
445 diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
446 index cd478d2cd871a..00f6d347eaf75 100644
447 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
448 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
449 @@ -57,10 +57,6 @@
450 #define TSE_PCS_USE_SGMII_ENA BIT(0)
451 #define TSE_PCS_IF_USE_SGMII 0x03
452
453 -#define SGMII_ADAPTER_CTRL_REG 0x00
454 -#define SGMII_ADAPTER_DISABLE 0x0001
455 -#define SGMII_ADAPTER_ENABLE 0x0000
456 -
457 #define AUTONEGO_LINK_TIMER 20
458
459 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
460 @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
461 unsigned int speed)
462 {
463 void __iomem *tse_pcs_base = pcs->tse_pcs_base;
464 - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
465 u32 val;
466
467 - writew(SGMII_ADAPTER_ENABLE,
468 - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
469 -
470 pcs->autoneg = phy_dev->autoneg;
471
472 if (phy_dev->autoneg == AUTONEG_ENABLE) {
473 diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
474 index 442812c0a4bdc..694ac25ef426b 100644
475 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
476 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
477 @@ -10,6 +10,10 @@
478 #include <linux/phy.h>
479 #include <linux/timer.h>
480
481 +#define SGMII_ADAPTER_CTRL_REG 0x00
482 +#define SGMII_ADAPTER_ENABLE 0x0000
483 +#define SGMII_ADAPTER_DISABLE 0x0001
484 +
485 struct tse_pcs {
486 struct device *dev;
487 void __iomem *tse_pcs_base;
488 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
489 index 70d41783329dd..72e47621d27c7 100644
490 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
491 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
492 @@ -18,9 +18,6 @@
493
494 #include "altr_tse_pcs.h"
495
496 -#define SGMII_ADAPTER_CTRL_REG 0x00
497 -#define SGMII_ADAPTER_DISABLE 0x0001
498 -
499 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
500 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
501 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
502 @@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
503 {
504 struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
505 void __iomem *splitter_base = dwmac->splitter_base;
506 - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
507 void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
508 struct device *dev = dwmac->dev;
509 struct net_device *ndev = dev_get_drvdata(dev);
510 struct phy_device *phy_dev = ndev->phydev;
511 u32 val;
512
513 - if ((tse_pcs_base) && (sgmii_adapter_base))
514 - writew(SGMII_ADAPTER_DISABLE,
515 - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
516 + writew(SGMII_ADAPTER_DISABLE,
517 + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
518
519 if (splitter_base) {
520 val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
521 @@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
522 writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
523 }
524
525 - if (tse_pcs_base && sgmii_adapter_base)
526 + writew(SGMII_ADAPTER_ENABLE,
527 + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
528 + if (phy_dev)
529 tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
530 }
531
532 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
533 index 8e56a41dd7585..096617982998f 100644
534 --- a/drivers/net/slip/slip.c
535 +++ b/drivers/net/slip/slip.c
536 @@ -471,7 +471,7 @@ static void sl_tx_timeout(struct net_device *dev)
537 spin_lock(&sl->lock);
538
539 if (netif_queue_stopped(dev)) {
540 - if (!netif_running(dev))
541 + if (!netif_running(dev) || !sl->tty)
542 goto out;
543
544 /* May be we must check transmitter timeout here ?
545 diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
546 index 7e44110746dd0..68912e266826b 100644
547 --- a/drivers/net/usb/aqc111.c
548 +++ b/drivers/net/usb/aqc111.c
549 @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
550 if (start_of_descs != desc_offset)
551 goto err;
552
553 - /* self check desc_offset from header*/
554 - if (desc_offset >= skb_len)
555 + /* self check desc_offset from header and make sure that the
556 + * bounds of the metadata array are inside the SKB
557 + */
558 + if (pkt_count * 2 + desc_offset >= skb_len)
559 goto err;
560
561 + /* Packets must not overlap the metadata array */
562 + skb_trim(skb, desc_offset);
563 +
564 if (pkt_count == 0)
565 goto err;
566
567 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
568 index 10a876f8831c7..683425e3a353c 100644
569 --- a/drivers/net/veth.c
570 +++ b/drivers/net/veth.c
571 @@ -245,7 +245,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
572
573 rcu_read_lock();
574 rcv = rcu_dereference(priv->peer);
575 - if (unlikely(!rcv)) {
576 + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
577 kfree_skb(skb);
578 goto drop;
579 }
580 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
581 index ec13bd8d5487d..eb5751a45f266 100644
582 --- a/drivers/net/wireless/ath/ath9k/main.c
583 +++ b/drivers/net/wireless/ath/ath9k/main.c
584 @@ -836,7 +836,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
585 continue;
586
587 txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
588 - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
589 + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
590 if (fi->keyix == keyix)
591 return true;
592 }
593 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
594 index 14e6871a14054..fdb2152345eba 100644
595 --- a/drivers/net/wireless/ath/ath9k/xmit.c
596 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
597 @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
598 {
599 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
600 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
601 - sizeof(tx_info->rate_driver_data));
602 - return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
603 + sizeof(tx_info->status.status_driver_data));
604 + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
605 }
606
607 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
608 @@ -2498,6 +2498,16 @@ skip_tx_complete:
609 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
610 }
611
612 +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
613 +{
614 + void *ptr = &tx_info->status;
615 +
616 + memset(ptr + sizeof(tx_info->status.rates), 0,
617 + sizeof(tx_info->status) -
618 + sizeof(tx_info->status.rates) -
619 + sizeof(tx_info->status.status_driver_data));
620 +}
621 +
622 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
623 struct ath_tx_status *ts, int nframes, int nbad,
624 int txok)
625 @@ -2509,6 +2519,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
626 struct ath_hw *ah = sc->sc_ah;
627 u8 i, tx_rateindex;
628
629 + ath_clear_tx_status(tx_info);
630 +
631 if (txok)
632 tx_info->status.ack_signal = ts->ts_rssi;
633
634 @@ -2523,6 +2535,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
635 tx_info->status.ampdu_len = nframes;
636 tx_info->status.ampdu_ack_len = nframes - nbad;
637
638 + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
639 +
640 + for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
641 + tx_info->status.rates[i].count = 0;
642 + tx_info->status.rates[i].idx = -1;
643 + }
644 +
645 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
646 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
647 /*
648 @@ -2544,16 +2563,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
649 tx_info->status.rates[tx_rateindex].count =
650 hw->max_rate_tries;
651 }
652 -
653 - for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
654 - tx_info->status.rates[i].count = 0;
655 - tx_info->status.rates[i].idx = -1;
656 - }
657 -
658 - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
659 -
660 - /* we report airtime in ath_tx_count_airtime(), don't report twice */
661 - tx_info->status.tx_time = 0;
662 }
663
664 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
665 diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
666 index 726ed8f59868c..912a220a9db92 100644
667 --- a/drivers/perf/fsl_imx8_ddr_perf.c
668 +++ b/drivers/perf/fsl_imx8_ddr_perf.c
669 @@ -29,7 +29,7 @@
670 #define CNTL_OVER_MASK 0xFFFFFFFE
671
672 #define CNTL_CSV_SHIFT 24
673 -#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
674 +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
675
676 #define EVENT_CYCLES_ID 0
677 #define EVENT_CYCLES_COUNTER 0
678 diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
679 index cadea0344486f..40befdd9dfa92 100644
680 --- a/drivers/regulator/wm8994-regulator.c
681 +++ b/drivers/regulator/wm8994-regulator.c
682 @@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
683 };
684
685 static const struct regulator_desc wm8994_ldo_desc[] = {
686 + {
687 + .name = "LDO1",
688 + .id = 1,
689 + .type = REGULATOR_VOLTAGE,
690 + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
691 + .vsel_reg = WM8994_LDO_1,
692 + .vsel_mask = WM8994_LDO1_VSEL_MASK,
693 + .ops = &wm8994_ldo1_ops,
694 + .min_uV = 2400000,
695 + .uV_step = 100000,
696 + .enable_time = 3000,
697 + .off_on_delay = 36000,
698 + .owner = THIS_MODULE,
699 + },
700 + {
701 + .name = "LDO2",
702 + .id = 2,
703 + .type = REGULATOR_VOLTAGE,
704 + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
705 + .vsel_reg = WM8994_LDO_2,
706 + .vsel_mask = WM8994_LDO2_VSEL_MASK,
707 + .ops = &wm8994_ldo2_ops,
708 + .enable_time = 3000,
709 + .off_on_delay = 36000,
710 + .owner = THIS_MODULE,
711 + },
712 +};
713 +
714 +static const struct regulator_desc wm8958_ldo_desc[] = {
715 {
716 .name = "LDO1",
717 .id = 1,
718 @@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
719 * regulator core and we need not worry about it on the
720 * error path.
721 */
722 - ldo->regulator = devm_regulator_register(&pdev->dev,
723 - &wm8994_ldo_desc[id],
724 - &config);
725 + if (ldo->wm8994->type == WM8994) {
726 + ldo->regulator = devm_regulator_register(&pdev->dev,
727 + &wm8994_ldo_desc[id],
728 + &config);
729 + } else {
730 + ldo->regulator = devm_regulator_register(&pdev->dev,
731 + &wm8958_ldo_desc[id],
732 + &config);
733 + }
734 +
735 if (IS_ERR(ldo->regulator)) {
736 ret = PTR_ERR(ldo->regulator);
737 dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
738 diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
739 index a929fe76102b0..d5b2917aea44f 100644
740 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
741 +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
742 @@ -35,7 +35,7 @@
743
744 #define IBMVSCSIS_VERSION "v0.2"
745
746 -#define INITIAL_SRP_LIMIT 800
747 +#define INITIAL_SRP_LIMIT 1024
748 #define DEFAULT_MAX_SECTORS 256
749 #define MAX_TXU 1024 * 1024
750
751 diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
752 index 3d43ac9772f7e..aa62cc8ffd0af 100644
753 --- a/drivers/scsi/megaraid/megaraid_sas.h
754 +++ b/drivers/scsi/megaraid/megaraid_sas.h
755 @@ -2551,6 +2551,9 @@ struct megasas_instance_template {
756 #define MEGASAS_IS_LOGICAL(sdev) \
757 ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
758
759 +#define MEGASAS_IS_LUN_VALID(sdev) \
760 + (((sdev)->lun == 0) ? 1 : 0)
761 +
762 #define MEGASAS_DEV_INDEX(scp) \
763 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
764 scp->device->id)
765 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
766 index 6700d43b12ff5..a261ce511e9ed 100644
767 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
768 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
769 @@ -2102,6 +2102,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
770 goto scan_target;
771 }
772 return -ENXIO;
773 + } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
774 + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
775 + return -ENXIO;
776 }
777
778 scan_target:
779 @@ -2132,6 +2135,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
780 instance = megasas_lookup_instance(sdev->host->host_no);
781
782 if (MEGASAS_IS_LOGICAL(sdev)) {
783 + if (!MEGASAS_IS_LUN_VALID(sdev)) {
784 + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
785 + return;
786 + }
787 ld_tgt_id = MEGASAS_TARGET_ID(sdev);
788 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
789 if (megasas_dbg_lvl & LD_PD_DEBUG)
790 diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
791 index c16d7fb0fdcbb..0c5e2c6105867 100644
792 --- a/drivers/scsi/mvsas/mv_init.c
793 +++ b/drivers/scsi/mvsas/mv_init.c
794 @@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = {
795 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
796 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
797 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
798 + { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
799 { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
800 { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
801 { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
802 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
803 index 71144e33272a3..077c56cbed4e1 100644
804 --- a/drivers/target/target_core_user.c
805 +++ b/drivers/target/target_core_user.c
806 @@ -1488,6 +1488,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
807 mutex_lock(&udev->cmdr_lock);
808 page = tcmu_get_block_page(udev, dbi);
809 if (likely(page)) {
810 + get_page(page);
811 mutex_unlock(&udev->cmdr_lock);
812 return page;
813 }
814 @@ -1526,6 +1527,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
815 /* For the vmalloc()ed cmd area pages */
816 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
817 page = vmalloc_to_page(addr);
818 + get_page(page);
819 } else {
820 uint32_t dbi;
821
822 @@ -1536,7 +1538,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
823 return VM_FAULT_SIGBUS;
824 }
825
826 - get_page(page);
827 vmf->page = page;
828 return 0;
829 }
830 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
831 index e98d6ea35ea80..bcf19dfb0af35 100644
832 --- a/fs/btrfs/block-group.c
833 +++ b/fs/btrfs/block-group.c
834 @@ -2388,7 +2388,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
835 struct btrfs_path *path = NULL;
836 LIST_HEAD(dirty);
837 struct list_head *io = &cur_trans->io_bgs;
838 - int num_started = 0;
839 int loops = 0;
840
841 spin_lock(&cur_trans->dirty_bgs_lock);
842 @@ -2455,7 +2454,6 @@ again:
843 cache->io_ctl.inode = NULL;
844 ret = btrfs_write_out_cache(trans, cache, path);
845 if (ret == 0 && cache->io_ctl.inode) {
846 - num_started++;
847 should_put = 0;
848
849 /*
850 @@ -2556,7 +2554,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
851 int should_put;
852 struct btrfs_path *path;
853 struct list_head *io = &cur_trans->io_bgs;
854 - int num_started = 0;
855
856 path = btrfs_alloc_path();
857 if (!path)
858 @@ -2614,7 +2611,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
859 cache->io_ctl.inode = NULL;
860 ret = btrfs_write_out_cache(trans, cache, path);
861 if (ret == 0 && cache->io_ctl.inode) {
862 - num_started++;
863 should_put = 0;
864 list_add_tail(&cache->io_list, io);
865 } else {
866 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
867 index 344d18de1f08c..8898682c91038 100644
868 --- a/fs/btrfs/volumes.c
869 +++ b/fs/btrfs/volumes.c
870 @@ -4320,10 +4320,12 @@ static int balance_kthread(void *data)
871 struct btrfs_fs_info *fs_info = data;
872 int ret = 0;
873
874 + sb_start_write(fs_info->sb);
875 mutex_lock(&fs_info->balance_mutex);
876 if (fs_info->balance_ctl)
877 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
878 mutex_unlock(&fs_info->balance_mutex);
879 + sb_end_write(fs_info->sb);
880
881 return ret;
882 }
883 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
884 index b736acd3917bb..a24bcbbb50337 100644
885 --- a/fs/cifs/link.c
886 +++ b/fs/cifs/link.c
887 @@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
888 if (rc != 1)
889 return -EINVAL;
890
891 + if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
892 + return -EINVAL;
893 +
894 rc = symlink_hash(link_len, link_str, md5_hash);
895 if (rc) {
896 cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
897 diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
898 index 46294ef620ff9..268674c1d5685 100644
899 --- a/include/asm-generic/tlb.h
900 +++ b/include/asm-generic/tlb.h
901 @@ -547,10 +547,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
902 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
903 do { \
904 unsigned long _sz = huge_page_size(h); \
905 - if (_sz == PMD_SIZE) \
906 - tlb_flush_pmd_range(tlb, address, _sz); \
907 - else if (_sz == PUD_SIZE) \
908 + if (_sz >= P4D_SIZE) \
909 + tlb_flush_p4d_range(tlb, address, _sz); \
910 + else if (_sz >= PUD_SIZE) \
911 tlb_flush_pud_range(tlb, address, _sz); \
912 + else if (_sz >= PMD_SIZE) \
913 + tlb_flush_pmd_range(tlb, address, _sz); \
914 + else \
915 + tlb_flush_pte_range(tlb, address, _sz); \
916 __tlb_remove_tlb_entry(tlb, ptep, address); \
917 } while (0)
918
919 diff --git a/include/net/ax25.h b/include/net/ax25.h
920 index 8b7eb46ad72d8..aadff553e4b73 100644
921 --- a/include/net/ax25.h
922 +++ b/include/net/ax25.h
923 @@ -236,6 +236,7 @@ typedef struct ax25_dev {
924 #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
925 ax25_dama_info dama;
926 #endif
927 + refcount_t refcount;
928 } ax25_dev;
929
930 typedef struct ax25_cb {
931 @@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
932 }
933 }
934
935 +static inline void ax25_dev_hold(ax25_dev *ax25_dev)
936 +{
937 + refcount_inc(&ax25_dev->refcount);
938 +}
939 +
940 +static inline void ax25_dev_put(ax25_dev *ax25_dev)
941 +{
942 + if (refcount_dec_and_test(&ax25_dev->refcount)) {
943 + kfree(ax25_dev);
944 + }
945 +}
946 static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
947 {
948 skb->dev = dev;
949 diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
950 index 78f6437cbc3a8..02171416c68eb 100644
951 --- a/include/net/flow_dissector.h
952 +++ b/include/net/flow_dissector.h
953 @@ -51,6 +51,8 @@ struct flow_dissector_key_vlan {
954 vlan_dei:1,
955 vlan_priority:3;
956 __be16 vlan_tpid;
957 + __be16 vlan_eth_type;
958 + u16 padding;
959 };
960
961 struct flow_dissector_key_mpls {
962 diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
963 index 0a093a675b632..f04cfc2e9e01a 100644
964 --- a/kernel/dma/direct.c
965 +++ b/kernel/dma/direct.c
966 @@ -306,7 +306,8 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
967 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
968
969 if (unlikely(is_swiotlb_buffer(phys)))
970 - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
971 + swiotlb_tbl_unmap_single(dev, phys, size, size, dir,
972 + attrs | DMA_ATTR_SKIP_CPU_SYNC);
973 }
974 EXPORT_SYMBOL(dma_direct_unmap_page);
975
976 diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
977 index 4d89ad4fae3bb..5fb78addff51b 100644
978 --- a/kernel/irq/affinity.c
979 +++ b/kernel/irq/affinity.c
980 @@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
981 */
982 if (numvecs <= nodes) {
983 for_each_node_mask(n, nodemsk) {
984 - cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
985 - node_to_cpumask[n]);
986 + /* Ensure that only CPUs which are in both masks are set */
987 + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
988 + cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
989 if (++curvec == last_affv)
990 curvec = firstvec;
991 }
992 diff --git a/kernel/smp.c b/kernel/smp.c
993 index 3a390932f8b25..be65b76cb8036 100644
994 --- a/kernel/smp.c
995 +++ b/kernel/smp.c
996 @@ -222,7 +222,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
997
998 /* There shouldn't be any pending callbacks on an offline CPU. */
999 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
1000 - !warned && !llist_empty(head))) {
1001 + !warned && entry != NULL)) {
1002 warned = true;
1003 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
1004
1005 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1006 index 4419486d7413c..5eb04bb598026 100644
1007 --- a/kernel/time/tick-sched.c
1008 +++ b/kernel/time/tick-sched.c
1009 @@ -131,7 +131,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
1010 */
1011 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
1012 #ifdef CONFIG_NO_HZ_FULL
1013 - WARN_ON(tick_nohz_full_running);
1014 + WARN_ON_ONCE(tick_nohz_full_running);
1015 #endif
1016 tick_do_timer_cpu = cpu;
1017 }
1018 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
1019 index d8cde7292bf92..3761c79137b17 100644
1020 --- a/mm/kmemleak.c
1021 +++ b/mm/kmemleak.c
1022 @@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
1023 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1024 gfp_t gfp)
1025 {
1026 - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1027 + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
1028 kmemleak_alloc(__va(phys), size, min_count, gfp);
1029 }
1030 EXPORT_SYMBOL(kmemleak_alloc_phys);
1031 @@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
1032 */
1033 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1034 {
1035 - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1036 + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
1037 kmemleak_free_part(__va(phys), size);
1038 }
1039 EXPORT_SYMBOL(kmemleak_free_part_phys);
1040 @@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
1041 */
1042 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1043 {
1044 - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1045 + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
1046 kmemleak_not_leak(__va(phys));
1047 }
1048 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1049 @@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
1050 */
1051 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1052 {
1053 - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1054 + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
1055 kmemleak_ignore(__va(phys));
1056 }
1057 EXPORT_SYMBOL(kmemleak_ignore_phys);
1058 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1059 index 5038611563dfb..7048ea59d58bd 100644
1060 --- a/mm/page_alloc.c
1061 +++ b/mm/page_alloc.c
1062 @@ -5481,7 +5481,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
1063 do {
1064 zone_type--;
1065 zone = pgdat->node_zones + zone_type;
1066 - if (managed_zone(zone)) {
1067 + if (populated_zone(zone)) {
1068 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
1069 check_highest_zone(zone_type);
1070 }
1071 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
1072 index 093b73c454d28..aff991ca0e4a4 100644
1073 --- a/net/ax25/af_ax25.c
1074 +++ b/net/ax25/af_ax25.c
1075 @@ -89,17 +89,21 @@ again:
1076 sk = s->sk;
1077 if (!sk) {
1078 spin_unlock_bh(&ax25_list_lock);
1079 - s->ax25_dev = NULL;
1080 ax25_disconnect(s, ENETUNREACH);
1081 + s->ax25_dev = NULL;
1082 spin_lock_bh(&ax25_list_lock);
1083 goto again;
1084 }
1085 sock_hold(sk);
1086 spin_unlock_bh(&ax25_list_lock);
1087 lock_sock(sk);
1088 + ax25_disconnect(s, ENETUNREACH);
1089 s->ax25_dev = NULL;
1090 + if (sk->sk_socket) {
1091 + dev_put(ax25_dev->dev);
1092 + ax25_dev_put(ax25_dev);
1093 + }
1094 release_sock(sk);
1095 - ax25_disconnect(s, ENETUNREACH);
1096 spin_lock_bh(&ax25_list_lock);
1097 sock_put(sk);
1098 /* The entry could have been deleted from the
1099 @@ -365,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
1100 if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
1101 return -EFAULT;
1102
1103 - if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
1104 - return -ENODEV;
1105 -
1106 if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
1107 return -EINVAL;
1108
1109 if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
1110 return -EINVAL;
1111
1112 + ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
1113 + if (!ax25_dev)
1114 + return -ENODEV;
1115 +
1116 digi.ndigi = ax25_ctl.digi_count;
1117 for (k = 0; k < digi.ndigi; k++)
1118 digi.calls[k] = ax25_ctl.digi_addr[k];
1119
1120 - if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
1121 + ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
1122 + if (!ax25) {
1123 + ax25_dev_put(ax25_dev);
1124 return -ENOTCONN;
1125 + }
1126
1127 switch (ax25_ctl.cmd) {
1128 case AX25_KILL:
1129 @@ -446,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
1130 }
1131
1132 out_put:
1133 + ax25_dev_put(ax25_dev);
1134 ax25_cb_put(ax25);
1135 return ret;
1136
1137 @@ -971,14 +980,16 @@ static int ax25_release(struct socket *sock)
1138 {
1139 struct sock *sk = sock->sk;
1140 ax25_cb *ax25;
1141 + ax25_dev *ax25_dev;
1142
1143 if (sk == NULL)
1144 return 0;
1145
1146 sock_hold(sk);
1147 - sock_orphan(sk);
1148 lock_sock(sk);
1149 + sock_orphan(sk);
1150 ax25 = sk_to_ax25(sk);
1151 + ax25_dev = ax25->ax25_dev;
1152
1153 if (sk->sk_type == SOCK_SEQPACKET) {
1154 switch (ax25->state) {
1155 @@ -1040,6 +1051,15 @@ static int ax25_release(struct socket *sock)
1156 sk->sk_state_change(sk);
1157 ax25_destroy_socket(ax25);
1158 }
1159 + if (ax25_dev) {
1160 + del_timer_sync(&ax25->timer);
1161 + del_timer_sync(&ax25->t1timer);
1162 + del_timer_sync(&ax25->t2timer);
1163 + del_timer_sync(&ax25->t3timer);
1164 + del_timer_sync(&ax25->idletimer);
1165 + dev_put(ax25_dev->dev);
1166 + ax25_dev_put(ax25_dev);
1167 + }
1168
1169 sock->sk = NULL;
1170 release_sock(sk);
1171 @@ -1116,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1172 }
1173 }
1174
1175 - if (ax25_dev != NULL)
1176 + if (ax25_dev) {
1177 ax25_fillin_cb(ax25, ax25_dev);
1178 + dev_hold(ax25_dev->dev);
1179 + }
1180
1181 done:
1182 ax25_cb_add(ax25);
1183 diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
1184 index 4ac2e0847652a..d2e0cc67d91a7 100644
1185 --- a/net/ax25/ax25_dev.c
1186 +++ b/net/ax25/ax25_dev.c
1187 @@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
1188 for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
1189 if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
1190 res = ax25_dev;
1191 + ax25_dev_hold(ax25_dev);
1192 }
1193 spin_unlock_bh(&ax25_dev_lock);
1194
1195 @@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev)
1196 return;
1197 }
1198
1199 + refcount_set(&ax25_dev->refcount, 1);
1200 dev->ax25_ptr = ax25_dev;
1201 ax25_dev->dev = dev;
1202 dev_hold(dev);
1203 @@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev)
1204 ax25_dev->next = ax25_dev_list;
1205 ax25_dev_list = ax25_dev;
1206 spin_unlock_bh(&ax25_dev_lock);
1207 + ax25_dev_hold(ax25_dev);
1208
1209 ax25_register_dev_sysctl(ax25_dev);
1210 }
1211 @@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev)
1212 if ((s = ax25_dev_list) == ax25_dev) {
1213 ax25_dev_list = s->next;
1214 spin_unlock_bh(&ax25_dev_lock);
1215 + ax25_dev_put(ax25_dev);
1216 dev->ax25_ptr = NULL;
1217 dev_put(dev);
1218 - kfree(ax25_dev);
1219 + ax25_dev_put(ax25_dev);
1220 return;
1221 }
1222
1223 @@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev)
1224 if (s->next == ax25_dev) {
1225 s->next = ax25_dev->next;
1226 spin_unlock_bh(&ax25_dev_lock);
1227 + ax25_dev_put(ax25_dev);
1228 dev->ax25_ptr = NULL;
1229 dev_put(dev);
1230 - kfree(ax25_dev);
1231 + ax25_dev_put(ax25_dev);
1232 return;
1233 }
1234
1235 @@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev)
1236 }
1237 spin_unlock_bh(&ax25_dev_lock);
1238 dev->ax25_ptr = NULL;
1239 + ax25_dev_put(ax25_dev);
1240 }
1241
1242 int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
1243 @@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
1244
1245 switch (cmd) {
1246 case SIOCAX25ADDFWD:
1247 - if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
1248 + fwd_dev = ax25_addr_ax25dev(&fwd->port_to);
1249 + if (!fwd_dev) {
1250 + ax25_dev_put(ax25_dev);
1251 return -EINVAL;
1252 - if (ax25_dev->forward != NULL)
1253 + }
1254 + if (ax25_dev->forward) {
1255 + ax25_dev_put(fwd_dev);
1256 + ax25_dev_put(ax25_dev);
1257 return -EINVAL;
1258 + }
1259 ax25_dev->forward = fwd_dev->dev;
1260 + ax25_dev_put(fwd_dev);
1261 + ax25_dev_put(ax25_dev);
1262 break;
1263
1264 case SIOCAX25DELFWD:
1265 - if (ax25_dev->forward == NULL)
1266 + if (!ax25_dev->forward) {
1267 + ax25_dev_put(ax25_dev);
1268 return -EINVAL;
1269 + }
1270 ax25_dev->forward = NULL;
1271 + ax25_dev_put(ax25_dev);
1272 break;
1273
1274 default:
1275 + ax25_dev_put(ax25_dev);
1276 return -EINVAL;
1277 }
1278
1279 diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
1280 index b40e0bce67ead..dc2168d2a32a9 100644
1281 --- a/net/ax25/ax25_route.c
1282 +++ b/net/ax25/ax25_route.c
1283 @@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1284 ax25_dev *ax25_dev;
1285 int i;
1286
1287 - if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
1288 - return -EINVAL;
1289 if (route->digi_count > AX25_MAX_DIGIS)
1290 return -EINVAL;
1291
1292 + ax25_dev = ax25_addr_ax25dev(&route->port_addr);
1293 + if (!ax25_dev)
1294 + return -EINVAL;
1295 +
1296 write_lock_bh(&ax25_route_lock);
1297
1298 ax25_rt = ax25_route_list;
1299 @@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1300 if (route->digi_count != 0) {
1301 if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
1302 write_unlock_bh(&ax25_route_lock);
1303 + ax25_dev_put(ax25_dev);
1304 return -ENOMEM;
1305 }
1306 ax25_rt->digipeat->lastrepeat = -1;
1307 @@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1308 }
1309 }
1310 write_unlock_bh(&ax25_route_lock);
1311 + ax25_dev_put(ax25_dev);
1312 return 0;
1313 }
1314 ax25_rt = ax25_rt->next;
1315 @@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1316
1317 if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
1318 write_unlock_bh(&ax25_route_lock);
1319 + ax25_dev_put(ax25_dev);
1320 return -ENOMEM;
1321 }
1322
1323 @@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1324 if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
1325 write_unlock_bh(&ax25_route_lock);
1326 kfree(ax25_rt);
1327 + ax25_dev_put(ax25_dev);
1328 return -ENOMEM;
1329 }
1330 ax25_rt->digipeat->lastrepeat = -1;
1331 @@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
1332 ax25_rt->next = ax25_route_list;
1333 ax25_route_list = ax25_rt;
1334 write_unlock_bh(&ax25_route_lock);
1335 + ax25_dev_put(ax25_dev);
1336
1337 return 0;
1338 }
1339 @@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
1340 }
1341 }
1342 write_unlock_bh(&ax25_route_lock);
1343 + ax25_dev_put(ax25_dev);
1344
1345 return 0;
1346 }
1347 @@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
1348
1349 out:
1350 write_unlock_bh(&ax25_route_lock);
1351 + ax25_dev_put(ax25_dev);
1352 return err;
1353 }
1354
1355 diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
1356 index 15ab812c4fe4b..3a476e4f6cd0b 100644
1357 --- a/net/ax25/ax25_subr.c
1358 +++ b/net/ax25/ax25_subr.c
1359 @@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
1360 {
1361 ax25_clear_queues(ax25);
1362
1363 - if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
1364 - ax25_stop_heartbeat(ax25);
1365 - ax25_stop_t1timer(ax25);
1366 - ax25_stop_t2timer(ax25);
1367 - ax25_stop_t3timer(ax25);
1368 - ax25_stop_idletimer(ax25);
1369 + if (reason == ENETUNREACH) {
1370 + del_timer_sync(&ax25->timer);
1371 + del_timer_sync(&ax25->t1timer);
1372 + del_timer_sync(&ax25->t2timer);
1373 + del_timer_sync(&ax25->t3timer);
1374 + del_timer_sync(&ax25->idletimer);
1375 + } else {
1376 + if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
1377 + ax25_stop_heartbeat(ax25);
1378 + ax25_stop_t1timer(ax25);
1379 + ax25_stop_t2timer(ax25);
1380 + ax25_stop_t3timer(ax25);
1381 + ax25_stop_idletimer(ax25);
1382 + }
1383
1384 ax25->state = AX25_STATE_0;
1385
1386 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1387 index b740a74f06f22..4dac27c986231 100644
1388 --- a/net/core/flow_dissector.c
1389 +++ b/net/core/flow_dissector.c
1390 @@ -1149,6 +1149,7 @@ proto_again:
1391 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1392 }
1393 key_vlan->vlan_tpid = saved_vlan_tpid;
1394 + key_vlan->vlan_eth_type = proto;
1395 }
1396
1397 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1398 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1399 index 3606796009572..5585e3a94f3ca 100644
1400 --- a/net/ipv6/ip6_output.c
1401 +++ b/net/ipv6/ip6_output.c
1402 @@ -506,7 +506,7 @@ int ip6_forward(struct sk_buff *skb)
1403 goto drop;
1404
1405 if (!net->ipv6.devconf_all->disable_policy &&
1406 - !idev->cnf.disable_policy &&
1407 + (!idev || !idev->cnf.disable_policy) &&
1408 !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
1409 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
1410 goto drop;
1411 diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
1412 index b8ecb002e6238..b2e922fcc70da 100644
1413 --- a/net/nfc/nci/core.c
1414 +++ b/net/nfc/nci/core.c
1415 @@ -548,6 +548,10 @@ static int nci_close_device(struct nci_dev *ndev)
1416 mutex_lock(&ndev->req_lock);
1417
1418 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
1419 + /* Need to flush the cmd wq in case
1420 + * there is a queued/running cmd_work
1421 + */
1422 + flush_workqueue(ndev->cmd_wq);
1423 del_timer_sync(&ndev->cmd_timer);
1424 del_timer_sync(&ndev->data_timer);
1425 mutex_unlock(&ndev->req_lock);
1426 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1427 index 80205b138d113..919c7fa5f02d6 100644
1428 --- a/net/sched/cls_api.c
1429 +++ b/net/sched/cls_api.c
1430 @@ -1639,10 +1639,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
1431 if (chain->flushing)
1432 return -EAGAIN;
1433
1434 + RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1435 if (*chain_info->pprev == chain->filter_chain)
1436 tcf_chain0_head_change(chain, tp);
1437 tcf_proto_get(tp);
1438 - RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1439 rcu_assign_pointer(*chain_info->pprev, tp);
1440
1441 return 0;
1442 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1443 index 26979b4853bdb..007fbc1993522 100644
1444 --- a/net/sched/cls_flower.c
1445 +++ b/net/sched/cls_flower.c
1446 @@ -784,6 +784,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
1447 static void fl_set_key_vlan(struct nlattr **tb,
1448 __be16 ethertype,
1449 int vlan_id_key, int vlan_prio_key,
1450 + int vlan_next_eth_type_key,
1451 struct flow_dissector_key_vlan *key_val,
1452 struct flow_dissector_key_vlan *key_mask)
1453 {
1454 @@ -802,6 +803,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
1455 }
1456 key_val->vlan_tpid = ethertype;
1457 key_mask->vlan_tpid = cpu_to_be16(~0);
1458 + if (tb[vlan_next_eth_type_key]) {
1459 + key_val->vlan_eth_type =
1460 + nla_get_be16(tb[vlan_next_eth_type_key]);
1461 + key_mask->vlan_eth_type = cpu_to_be16(~0);
1462 + }
1463 }
1464
1465 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1466 @@ -1076,8 +1082,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
1467
1468 if (eth_type_vlan(ethertype)) {
1469 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1470 - TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1471 - &mask->vlan);
1472 + TCA_FLOWER_KEY_VLAN_PRIO,
1473 + TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1474 + &key->vlan, &mask->vlan);
1475
1476 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1477 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1478 @@ -1085,6 +1092,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
1479 fl_set_key_vlan(tb, ethertype,
1480 TCA_FLOWER_KEY_CVLAN_ID,
1481 TCA_FLOWER_KEY_CVLAN_PRIO,
1482 + TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1483 &key->cvlan, &mask->cvlan);
1484 fl_set_key_val(tb, &key->basic.n_proto,
1485 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1486 @@ -2272,13 +2280,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
1487 goto nla_put_failure;
1488
1489 if (mask->basic.n_proto) {
1490 - if (mask->cvlan.vlan_tpid) {
1491 + if (mask->cvlan.vlan_eth_type) {
1492 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1493 key->basic.n_proto))
1494 goto nla_put_failure;
1495 - } else if (mask->vlan.vlan_tpid) {
1496 + } else if (mask->vlan.vlan_eth_type) {
1497 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1498 - key->basic.n_proto))
1499 + key->vlan.vlan_eth_type))
1500 goto nla_put_failure;
1501 }
1502 }
1503 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
1504 index b268e61304515..4c26f7fb32b34 100644
1505 --- a/net/sched/sch_taprio.c
1506 +++ b/net/sched/sch_taprio.c
1507 @@ -427,7 +427,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1508 if (unlikely(!child))
1509 return qdisc_drop(skb, sch, to_free);
1510
1511 - if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
1512 + /* sk_flags are only safe to use on full sockets. */
1513 + if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
1514 if (!is_valid_interval(skb, sch))
1515 return qdisc_drop(skb, sch, to_free);
1516 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1517 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1518 index 565aa77fe5cbe..c76b40322ac7d 100644
1519 --- a/net/sctp/socket.c
1520 +++ b/net/sctp/socket.c
1521 @@ -5682,7 +5682,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
1522 * Set the daddr and initialize id to something more random and also
1523 * copy over any ip options.
1524 */
1525 - sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
1526 + sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
1527 sp->pf->copy_ip_options(sk, sock->sk);
1528
1529 /* Populate the fields of the newsk from the oldsk and migrate the
1530 diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
1531 index 571e6d84da3ba..660608202f284 100644
1532 --- a/net/smc/smc_pnet.c
1533 +++ b/net/smc/smc_pnet.c
1534 @@ -295,8 +295,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
1535 list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
1536 if (!strncmp(ibdev->ibdev->name, ib_name,
1537 sizeof(ibdev->ibdev->name)) ||
1538 - !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
1539 - IB_DEVICE_NAME_MAX - 1)) {
1540 + (ibdev->ibdev->dev.parent &&
1541 + !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
1542 + IB_DEVICE_NAME_MAX - 1))) {
1543 goto out;
1544 }
1545 }
1546 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
1547 index 6cefaad3b7f84..6bb9437af28bf 100644
1548 --- a/net/wireless/scan.c
1549 +++ b/net/wireless/scan.c
1550 @@ -1457,11 +1457,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
1551 /* this is a nontransmitting bss, we need to add it to
1552 * transmitting bss' list if it is not there
1553 */
1554 + spin_lock_bh(&rdev->bss_lock);
1555 if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
1556 &res->pub)) {
1557 if (__cfg80211_unlink_bss(rdev, res))
1558 rdev->bss_generation++;
1559 }
1560 + spin_unlock_bh(&rdev->bss_lock);
1561 }
1562
1563 trace_cfg80211_return_bss(&res->pub);
1564 diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
1565 index cbe1d6c4b1a51..c84bef1d28955 100644
1566 --- a/scripts/gcc-plugins/latent_entropy_plugin.c
1567 +++ b/scripts/gcc-plugins/latent_entropy_plugin.c
1568 @@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
1569 .help = "disable\tturn off latent entropy instrumentation\n",
1570 };
1571
1572 -static unsigned HOST_WIDE_INT seed;
1573 -/*
1574 - * get_random_seed() (this is a GCC function) generates the seed.
1575 - * This is a simple random generator without any cryptographic security because
1576 - * the entropy doesn't come from here.
1577 - */
1578 +static unsigned HOST_WIDE_INT deterministic_seed;
1579 +static unsigned HOST_WIDE_INT rnd_buf[32];
1580 +static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
1581 +static int urandom_fd = -1;
1582 +
1583 static unsigned HOST_WIDE_INT get_random_const(void)
1584 {
1585 - unsigned int i;
1586 - unsigned HOST_WIDE_INT ret = 0;
1587 -
1588 - for (i = 0; i < 8 * sizeof(ret); i++) {
1589 - ret = (ret << 1) | (seed & 1);
1590 - seed >>= 1;
1591 - if (ret & 1)
1592 - seed ^= 0xD800000000000000ULL;
1593 + if (deterministic_seed) {
1594 + unsigned HOST_WIDE_INT w = deterministic_seed;
1595 + w ^= w << 13;
1596 + w ^= w >> 7;
1597 + w ^= w << 17;
1598 + deterministic_seed = w;
1599 + return deterministic_seed;
1600 }
1601
1602 - return ret;
1603 + if (urandom_fd < 0) {
1604 + urandom_fd = open("/dev/urandom", O_RDONLY);
1605 + gcc_assert(urandom_fd >= 0);
1606 + }
1607 + if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
1608 + gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
1609 + rnd_idx = 0;
1610 + }
1611 + return rnd_buf[rnd_idx++];
1612 }
1613
1614 static tree tree_get_random_const(tree type)
1615 @@ -549,8 +555,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
1616 tree type, id;
1617 int quals;
1618
1619 - seed = get_random_seed(false);
1620 -
1621 if (in_lto_p)
1622 return;
1623
1624 @@ -585,6 +589,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
1625 const struct plugin_argument * const argv = plugin_info->argv;
1626 int i;
1627
1628 + /*
1629 + * Call get_random_seed() with noinit=true, so that this returns
1630 + * 0 in the case where no seed has been passed via -frandom-seed.
1631 + */
1632 + deterministic_seed = get_random_seed(true);
1633 +
1634 static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
1635 {
1636 .base = &latent_entropy_decl,
1637 diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
1638 index c4eb561d20086..0956be39b0355 100644
1639 --- a/sound/core/pcm_misc.c
1640 +++ b/sound/core/pcm_misc.c
1641 @@ -423,7 +423,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
1642 return 0;
1643 width = pcm_formats[(INT)format].phys; /* physical width */
1644 pat = pcm_formats[(INT)format].silence;
1645 - if (! width)
1646 + if (!width || !pat)
1647 return -EINVAL;
1648 /* signed or 1 byte data */
1649 if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
1650 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1651 index 05ca4196cb0fd..851ea79da31cd 100644
1652 --- a/sound/pci/hda/patch_realtek.c
1653 +++ b/sound/pci/hda/patch_realtek.c
1654 @@ -2568,6 +2568,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
1655 SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1656 SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1657 SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1658 + SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1659 SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1660 SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1661 SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
1662 diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
1663 index b019e0b8221c7..84fda3b490735 100644
1664 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c
1665 +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
1666 @@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
1667 if (in_shutdown++)
1668 return;
1669
1670 + /* Free the cpu_set allocated using CPU_ALLOC in main function */
1671 + CPU_FREE(cpu_set);
1672 +
1673 for (i = 0; i < num_cpus_to_pin; i++)
1674 if (cpu_threads[i]) {
1675 pthread_kill(cpu_threads[i], SIGUSR1);
1676 @@ -551,6 +554,12 @@ int main(int argc, char *argv[])
1677 perror("sysconf(_SC_NPROCESSORS_ONLN)");
1678 exit(1);
1679 }
1680 +
1681 + if (getuid() != 0)
1682 + ksft_exit_skip("Not running as root, but almost all tests "
1683 + "require root in order to modify\nsystem settings. "
1684 + "Exiting.\n");
1685 +
1686 cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
1687 cpu_set = CPU_ALLOC(cpus_online);
1688 if (cpu_set == NULL) {
1689 @@ -589,7 +598,7 @@ int main(int argc, char *argv[])
1690 cpu_set)) {
1691 fprintf(stderr, "Any given CPU may "
1692 "only be given once.\n");
1693 - exit(1);
1694 + goto err_code;
1695 } else
1696 CPU_SET_S(cpus_to_pin[cpu],
1697 cpu_set_size, cpu_set);
1698 @@ -607,7 +616,7 @@ int main(int argc, char *argv[])
1699 queue_path = malloc(strlen(option) + 2);
1700 if (!queue_path) {
1701 perror("malloc()");
1702 - exit(1);
1703 + goto err_code;
1704 }
1705 queue_path[0] = '/';
1706 queue_path[1] = 0;
1707 @@ -622,17 +631,12 @@ int main(int argc, char *argv[])
1708 fprintf(stderr, "Must pass at least one CPU to continuous "
1709 "mode.\n");
1710 poptPrintUsage(popt_context, stderr, 0);
1711 - exit(1);
1712 + goto err_code;
1713 } else if (!continuous_mode) {
1714 num_cpus_to_pin = 1;
1715 cpus_to_pin[0] = cpus_online - 1;
1716 }
1717
1718 - if (getuid() != 0)
1719 - ksft_exit_skip("Not running as root, but almost all tests "
1720 - "require root in order to modify\nsystem settings. "
1721 - "Exiting.\n");
1722 -
1723 max_msgs = fopen(MAX_MSGS, "r+");
1724 max_msgsize = fopen(MAX_MSGSIZE, "r+");
1725 if (!max_msgs)
1726 @@ -740,4 +744,9 @@ int main(int argc, char *argv[])
1727 sleep(1);
1728 }
1729 shutdown(0, "", 0);
1730 +
1731 +err_code:
1732 + CPU_FREE(cpu_set);
1733 + exit(1);
1734 +
1735 }