Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0315-4.9.216-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3590 - (show annotations) (download)
Thu Aug 13 10:21:32 2020 UTC (3 years, 8 months ago) by niro
File size: 100543 byte(s)
linux-216
1 diff --git a/Makefile b/Makefile
2 index b594484788a8..f0290097784a 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 215
9 +SUBLEVEL = 216
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
14 index 27133c3a4b12..0de4ba698d1d 100644
15 --- a/arch/arm/boot/dts/ls1021a.dtsi
16 +++ b/arch/arm/boot/dts/ls1021a.dtsi
17 @@ -505,7 +505,7 @@
18 };
19
20 mdio0: mdio@2d24000 {
21 - compatible = "fsl,etsec2-mdio";
22 + compatible = "gianfar";
23 device_type = "mdio";
24 #address-cells = <1>;
25 #size-cells = <0>;
26 @@ -513,7 +513,7 @@
27 };
28
29 mdio1: mdio@2d64000 {
30 - compatible = "fsl,etsec2-mdio";
31 + compatible = "gianfar";
32 device_type = "mdio";
33 #address-cells = <1>;
34 #size-cells = <0>;
35 diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
36 index cab128913e72..3a4014870a91 100644
37 --- a/arch/arm/mach-imx/Makefile
38 +++ b/arch/arm/mach-imx/Makefile
39 @@ -86,6 +86,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
40 obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
41 obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
42 endif
43 +AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
44 +obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
45 obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
46
47 obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
48 diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
49 index c4436d9c52ff..a3f6885cefbf 100644
50 --- a/arch/arm/mach-imx/common.h
51 +++ b/arch/arm/mach-imx/common.h
52 @@ -112,17 +112,17 @@ void imx_cpu_die(unsigned int cpu);
53 int imx_cpu_kill(unsigned int cpu);
54
55 #ifdef CONFIG_SUSPEND
56 -void v7_cpu_resume(void);
57 void imx53_suspend(void __iomem *ocram_vbase);
58 extern const u32 imx53_suspend_sz;
59 void imx6_suspend(void __iomem *ocram_vbase);
60 #else
61 -static inline void v7_cpu_resume(void) {}
62 static inline void imx53_suspend(void __iomem *ocram_vbase) {}
63 static const u32 imx53_suspend_sz;
64 static inline void imx6_suspend(void __iomem *ocram_vbase) {}
65 #endif
66
67 +void v7_cpu_resume(void);
68 +
69 void imx6_pm_ccm_init(const char *ccm_compat);
70 void imx6q_pm_init(void);
71 void imx6dl_pm_init(void);
72 diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
73 new file mode 100644
74 index 000000000000..5bd1ba7ef15b
75 --- /dev/null
76 +++ b/arch/arm/mach-imx/resume-imx6.S
77 @@ -0,0 +1,24 @@
78 +/* SPDX-License-Identifier: GPL-2.0-or-later */
79 +/*
80 + * Copyright 2014 Freescale Semiconductor, Inc.
81 + */
82 +
83 +#include <linux/linkage.h>
84 +#include <asm/assembler.h>
85 +#include <asm/asm-offsets.h>
86 +#include <asm/hardware/cache-l2x0.h>
87 +#include "hardware.h"
88 +
89 +/*
90 + * The following code must assume it is running from physical address
91 + * where absolute virtual addresses to the data section have to be
92 + * turned into relative ones.
93 + */
94 +
95 +ENTRY(v7_cpu_resume)
96 + bl v7_invalidate_l1
97 +#ifdef CONFIG_CACHE_L2X0
98 + bl l2c310_early_resume
99 +#endif
100 + b cpu_resume
101 +ENDPROC(v7_cpu_resume)
102 diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
103 index 76ee2ceec8d5..7d84b617af48 100644
104 --- a/arch/arm/mach-imx/suspend-imx6.S
105 +++ b/arch/arm/mach-imx/suspend-imx6.S
106 @@ -333,17 +333,3 @@ resume:
107
108 ret lr
109 ENDPROC(imx6_suspend)
110 -
111 -/*
112 - * The following code must assume it is running from physical address
113 - * where absolute virtual addresses to the data section have to be
114 - * turned into relative ones.
115 - */
116 -
117 -ENTRY(v7_cpu_resume)
118 - bl v7_invalidate_l1
119 -#ifdef CONFIG_CACHE_L2X0
120 - bl l2c310_early_resume
121 -#endif
122 - b cpu_resume
123 -ENDPROC(v7_cpu_resume)
124 diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
125 index 544ea21bfef9..b2683aca401f 100644
126 --- a/arch/mips/kernel/vpe.c
127 +++ b/arch/mips/kernel/vpe.c
128 @@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
129 {
130 list_del(&v->list);
131 if (v->load_addr)
132 - release_progmem(v);
133 + release_progmem(v->load_addr);
134 kfree(v);
135 }
136
137 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
138 index 7471ed48f41f..514e04b62261 100644
139 --- a/arch/powerpc/kernel/cputable.c
140 +++ b/arch/powerpc/kernel/cputable.c
141 @@ -2199,11 +2199,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
142 * oprofile_cpu_type already has a value, then we are
143 * possibly overriding a real PVR with a logical one,
144 * and, in that case, keep the current value for
145 - * oprofile_cpu_type.
146 + * oprofile_cpu_type. Futhermore, let's ensure that the
147 + * fix for the PMAO bug is enabled on compatibility mode.
148 */
149 if (old.oprofile_cpu_type != NULL) {
150 t->oprofile_cpu_type = old.oprofile_cpu_type;
151 t->oprofile_type = old.oprofile_type;
152 + t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
153 }
154 }
155
156 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
157 index 477df9782fdf..f490a4fab2f7 100644
158 --- a/arch/x86/kernel/cpu/common.c
159 +++ b/arch/x86/kernel/cpu/common.c
160 @@ -388,7 +388,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
161 * cpuid bit to be set. We need to ensure that we
162 * update that bit in this CPU's "cpu_info".
163 */
164 - get_cpu_cap(c);
165 + set_cpu_cap(c, X86_FEATURE_OSPKE);
166 }
167
168 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
169 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
170 index aaf2f810d170..b28f45aca2ef 100644
171 --- a/crypto/algif_skcipher.c
172 +++ b/crypto/algif_skcipher.c
173 @@ -538,7 +538,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
174 lock_sock(sk);
175 tx_nents = skcipher_all_sg_nents(ctx);
176 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
177 - if (unlikely(!sreq->tsg))
178 + if (unlikely(ZERO_OR_NULL_PTR(sreq->tsg)))
179 goto unlock;
180 sg_init_table(sreq->tsg, tx_nents);
181 memcpy(iv, ctx->iv, ivsize);
182 diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
183 index 396e358c2cee..7ef0a0e105e1 100644
184 --- a/drivers/acpi/acpi_watchdog.c
185 +++ b/drivers/acpi/acpi_watchdog.c
186 @@ -129,12 +129,11 @@ void __init acpi_watchdog_init(void)
187 gas = &entries[i].register_region;
188
189 res.start = gas->address;
190 + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
191 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
192 res.flags = IORESOURCE_MEM;
193 - res.end = res.start + ALIGN(gas->access_width, 4) - 1;
194 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
195 res.flags = IORESOURCE_IO;
196 - res.end = res.start + gas->access_width - 1;
197 } else {
198 pr_warn("Unsupported address space: %u\n",
199 gas->space_id);
200 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
201 index 996b9ae15404..a4ef9a6bd367 100644
202 --- a/drivers/char/ipmi/ipmi_ssif.c
203 +++ b/drivers/char/ipmi/ipmi_ssif.c
204 @@ -746,10 +746,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
205 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
206 msg = ssif_info->curr_msg;
207 if (msg) {
208 + if (data) {
209 + if (len > IPMI_MAX_MSG_LENGTH)
210 + len = IPMI_MAX_MSG_LENGTH;
211 + memcpy(msg->rsp, data, len);
212 + } else {
213 + len = 0;
214 + }
215 msg->rsp_size = len;
216 - if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
217 - msg->rsp_size = IPMI_MAX_MSG_LENGTH;
218 - memcpy(msg->rsp, data, msg->rsp_size);
219 ssif_info->curr_msg = NULL;
220 }
221
222 diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
223 index 6d7d2d54eacf..f0932f25a9b1 100644
224 --- a/drivers/dma/coh901318.c
225 +++ b/drivers/dma/coh901318.c
226 @@ -1944,8 +1944,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
227 return;
228 }
229
230 - spin_lock(&cohc->lock);
231 -
232 /*
233 * When we reach this point, at least one queue item
234 * should have been moved over from cohc->queue to
235 @@ -1966,8 +1964,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
236 if (coh901318_queue_start(cohc) == NULL)
237 cohc->busy = 0;
238
239 - spin_unlock(&cohc->lock);
240 -
241 /*
242 * This tasklet will remove items from cohc->active
243 * and thus terminates them.
244 diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
245 index 22f7f0c68a48..4eaf92b2b886 100644
246 --- a/drivers/dma/tegra20-apb-dma.c
247 +++ b/drivers/dma/tegra20-apb-dma.c
248 @@ -288,7 +288,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
249
250 /* Do not allocate if desc are waiting for ack */
251 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
252 - if (async_tx_test_ack(&dma_desc->txd)) {
253 + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
254 list_del(&dma_desc->node);
255 spin_unlock_irqrestore(&tdc->lock, flags);
256 dma_desc->txd.flags = 0;
257 @@ -755,10 +755,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
258 bool was_busy;
259
260 spin_lock_irqsave(&tdc->lock, flags);
261 - if (list_empty(&tdc->pending_sg_req)) {
262 - spin_unlock_irqrestore(&tdc->lock, flags);
263 - return 0;
264 - }
265
266 if (!tdc->busy)
267 goto skip_dma_stop;
268 diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
269 index c8d1f19c9a6d..d46b9e75a847 100644
270 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
271 +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
272 @@ -306,7 +306,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
273 return num;
274 }
275
276 -static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
277 +static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
278 struct drm_display_mode *mode)
279 {
280 int id = dsi_mgr_connector_get_id(connector);
281 @@ -438,6 +438,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
282 struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
283 struct mipi_dsi_host *host = msm_dsi->host;
284 struct drm_panel *panel = msm_dsi->panel;
285 + struct msm_dsi_pll *src_pll;
286 bool is_dual_dsi = IS_DUAL_DSI();
287 int ret;
288
289 @@ -471,6 +472,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
290 id, ret);
291 }
292
293 + /* Save PLL status if it is a clock source */
294 + src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
295 + msm_dsi_pll_save_state(src_pll);
296 +
297 ret = msm_dsi_host_power_off(host);
298 if (ret)
299 pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
300 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
301 index 6abf315fd6da..ce32f41fc28a 100644
302 --- a/drivers/gpu/drm/msm/msm_drv.c
303 +++ b/drivers/gpu/drm/msm/msm_drv.c
304 @@ -396,6 +396,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
305 if (ret)
306 goto fail;
307
308 + if (!dev->dma_parms) {
309 + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
310 + GFP_KERNEL);
311 + if (!dev->dma_parms)
312 + return -ENOMEM;
313 + }
314 + dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
315 +
316 msm_gem_shrinker_init(ddev);
317
318 switch (get_mdp_ver(pdev)) {
319 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
320 index e382d6f23097..b4b9d8152536 100644
321 --- a/drivers/hid/hid-core.c
322 +++ b/drivers/hid/hid-core.c
323 @@ -1547,7 +1547,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
324
325 rsize = ((report->size - 1) >> 3) + 1;
326
327 - if (rsize > HID_MAX_BUFFER_SIZE)
328 + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
329 + rsize = HID_MAX_BUFFER_SIZE - 1;
330 + else if (rsize > HID_MAX_BUFFER_SIZE)
331 rsize = HID_MAX_BUFFER_SIZE;
332
333 if (csize < rsize) {
334 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
335 index 8903ea09ac58..dbdd265075da 100644
336 --- a/drivers/hid/usbhid/hiddev.c
337 +++ b/drivers/hid/usbhid/hiddev.c
338 @@ -962,9 +962,9 @@ void hiddev_disconnect(struct hid_device *hid)
339 hiddev->exist = 0;
340
341 if (hiddev->open) {
342 - mutex_unlock(&hiddev->existancelock);
343 usbhid_close(hiddev->hid);
344 wake_up_interruptible(&hiddev->wait);
345 + mutex_unlock(&hiddev->existancelock);
346 } else {
347 mutex_unlock(&hiddev->existancelock);
348 kfree(hiddev);
349 diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
350 index 5929e126da63..d9923d63eb4f 100644
351 --- a/drivers/hwmon/adt7462.c
352 +++ b/drivers/hwmon/adt7462.c
353 @@ -426,7 +426,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
354 return 0x95;
355 break;
356 }
357 - return -ENODEV;
358 + return 0;
359 }
360
361 /* Provide labels for sysfs */
362 diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
363 index 30132c3957cd..41ca9ff7b5da 100644
364 --- a/drivers/i2c/busses/i2c-jz4780.c
365 +++ b/drivers/i2c/busses/i2c-jz4780.c
366 @@ -82,25 +82,6 @@
367 #define JZ4780_I2C_STA_TFNF BIT(1)
368 #define JZ4780_I2C_STA_ACT BIT(0)
369
370 -static const char * const jz4780_i2c_abrt_src[] = {
371 - "ABRT_7B_ADDR_NOACK",
372 - "ABRT_10ADDR1_NOACK",
373 - "ABRT_10ADDR2_NOACK",
374 - "ABRT_XDATA_NOACK",
375 - "ABRT_GCALL_NOACK",
376 - "ABRT_GCALL_READ",
377 - "ABRT_HS_ACKD",
378 - "SBYTE_ACKDET",
379 - "ABRT_HS_NORSTRT",
380 - "SBYTE_NORSTRT",
381 - "ABRT_10B_RD_NORSTRT",
382 - "ABRT_MASTER_DIS",
383 - "ARB_LOST",
384 - "SLVFLUSH_TXFIFO",
385 - "SLV_ARBLOST",
386 - "SLVRD_INTX",
387 -};
388 -
389 #define JZ4780_I2C_INTST_IGC BIT(11)
390 #define JZ4780_I2C_INTST_ISTT BIT(10)
391 #define JZ4780_I2C_INTST_ISTP BIT(9)
392 @@ -538,21 +519,8 @@ done:
393
394 static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
395 {
396 - int i;
397 -
398 - dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
399 - dev_err(&i2c->adap.dev, "device addr=%x\n",
400 - jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
401 - dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
402 - i2c->cmd, i2c->cmd_buf[i2c->cmd]);
403 - dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
404 - i2c->cmd, i2c->data_buf[i2c->cmd]);
405 -
406 - for (i = 0; i < 16; i++) {
407 - if (src & BIT(i))
408 - dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
409 - i, jz4780_i2c_abrt_src[i]);
410 - }
411 + dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
412 + src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
413 }
414
415 static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
416 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
417 index 71c7c4c328ef..304429fd04dd 100644
418 --- a/drivers/infiniband/core/cm.c
419 +++ b/drivers/infiniband/core/cm.c
420 @@ -1073,6 +1073,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
421 /* Sharing an ib_cm_id with different handlers is not
422 * supported */
423 spin_unlock_irqrestore(&cm.lock, flags);
424 + ib_destroy_cm_id(cm_id);
425 return ERR_PTR(-EINVAL);
426 }
427 atomic_inc(&cm_id_priv->refcount);
428 diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
429 index 5495e22839a7..1f71c306923f 100644
430 --- a/drivers/infiniband/core/iwcm.c
431 +++ b/drivers/infiniband/core/iwcm.c
432 @@ -137,8 +137,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
433 {
434 struct list_head *e, *tmp;
435
436 - list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
437 + list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
438 + list_del(e);
439 kfree(list_entry(e, struct iwcm_work, free_list));
440 + }
441 }
442
443 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
444 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
445 index 58b97226050f..1b7d77080d6b 100644
446 --- a/drivers/md/dm-cache-target.c
447 +++ b/drivers/md/dm-cache-target.c
448 @@ -2192,8 +2192,8 @@ static void wait_for_migrations(struct cache *cache)
449
450 static void stop_worker(struct cache *cache)
451 {
452 - cancel_delayed_work(&cache->waker);
453 - flush_workqueue(cache->wq);
454 + cancel_delayed_work_sync(&cache->waker);
455 + drain_workqueue(cache->wq);
456 }
457
458 static void requeue_deferred_cells(struct cache *cache)
459 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
460 index 912dc09bc7a7..905911f78693 100644
461 --- a/drivers/net/ethernet/amazon/ena/ena_com.c
462 +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
463 @@ -199,6 +199,11 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
464 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
465 u16 command_id, bool capture)
466 {
467 + if (unlikely(!queue->comp_ctx)) {
468 + pr_err("Completion context is NULL\n");
469 + return NULL;
470 + }
471 +
472 if (unlikely(command_id >= queue->q_depth)) {
473 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
474 command_id, queue->q_depth);
475 @@ -839,6 +844,24 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
476 0);
477 }
478
479 +static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
480 +{
481 + struct ena_admin_feature_rss_flow_hash_control *hash_key =
482 + (ena_dev->rss).hash_key;
483 +
484 + netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
485 + /* The key is stored in the device in u32 array
486 + * as well as the API requires the key to be passed in this
487 + * format. Thus the size of our array should be divided by 4
488 + */
489 + hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
490 +}
491 +
492 +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
493 +{
494 + return ena_dev->rss.hash_func;
495 +}
496 +
497 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
498 {
499 struct ena_rss *rss = &ena_dev->rss;
500 @@ -2034,15 +2057,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
501
502 switch (func) {
503 case ENA_ADMIN_TOEPLITZ:
504 - if (key_len > sizeof(hash_key->key)) {
505 - pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
506 - key_len, sizeof(hash_key->key));
507 - return -EINVAL;
508 + if (key) {
509 + if (key_len != sizeof(hash_key->key)) {
510 + pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
511 + key_len, sizeof(hash_key->key));
512 + return -EINVAL;
513 + }
514 + memcpy(hash_key->key, key, key_len);
515 + rss->hash_init_val = init_val;
516 + hash_key->keys_num = key_len >> 2;
517 }
518 -
519 - memcpy(hash_key->key, key, key_len);
520 - rss->hash_init_val = init_val;
521 - hash_key->keys_num = key_len >> 2;
522 break;
523 case ENA_ADMIN_CRC32:
524 rss->hash_init_val = init_val;
525 @@ -2079,7 +2103,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
526 if (unlikely(rc))
527 return rc;
528
529 - rss->hash_func = get_resp.u.flow_hash_func.selected_func;
530 + /* ffs() returns 1 in case the lsb is set */
531 + rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
532 + if (rss->hash_func)
533 + rss->hash_func--;
534 +
535 if (func)
536 *func = rss->hash_func;
537
538 @@ -2366,6 +2394,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
539 if (unlikely(rc))
540 goto err_hash_key;
541
542 + ena_com_hash_key_fill_default_key(ena_dev);
543 +
544 rc = ena_com_hash_ctrl_init(ena_dev);
545 if (unlikely(rc))
546 goto err_hash_ctrl;
547 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
548 index 509d7b8e15ab..98b2ad20f599 100644
549 --- a/drivers/net/ethernet/amazon/ena/ena_com.h
550 +++ b/drivers/net/ethernet/amazon/ena/ena_com.h
551 @@ -41,6 +41,7 @@
552 #include <linux/spinlock.h>
553 #include <linux/types.h>
554 #include <linux/wait.h>
555 +#include <linux/netdevice.h>
556
557 #include "ena_common_defs.h"
558 #include "ena_admin_defs.h"
559 @@ -622,6 +623,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
560 */
561 void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
562
563 +/* ena_com_get_current_hash_function - Get RSS hash function
564 + * @ena_dev: ENA communication layer struct
565 + *
566 + * Return the current hash function.
567 + * @return: 0 or one of the ena_admin_hash_functions values.
568 + */
569 +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
570 +
571 /* ena_com_fill_hash_function - Fill RSS hash function
572 * @ena_dev: ENA communication layer struct
573 * @func: The hash function (Toeplitz or crc)
574 diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
575 index 06fd061a20e9..0ef0a7b75751 100644
576 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
577 +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
578 @@ -651,6 +651,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
579 return ENA_HASH_KEY_SIZE;
580 }
581
582 +static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
583 +{
584 + struct ena_com_dev *ena_dev = adapter->ena_dev;
585 + int i, rc;
586 +
587 + if (!indir)
588 + return 0;
589 +
590 + rc = ena_com_indirect_table_get(ena_dev, indir);
591 + if (rc)
592 + return rc;
593 +
594 + /* Our internal representation of the indices is: even indices
595 + * for Tx and uneven indices for Rx. We need to convert the Rx
596 + * indices to be consecutive
597 + */
598 + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
599 + indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
600 +
601 + return rc;
602 +}
603 +
604 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
605 u8 *hfunc)
606 {
607 @@ -659,11 +681,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
608 u8 func;
609 int rc;
610
611 - rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
612 + rc = ena_indirection_table_get(adapter, indir);
613 if (rc)
614 return rc;
615
616 + /* We call this function in order to check if the device
617 + * supports getting/setting the hash function.
618 + */
619 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
620 +
621 + if (rc) {
622 + if (rc == -EOPNOTSUPP) {
623 + key = NULL;
624 + hfunc = NULL;
625 + rc = 0;
626 + }
627 +
628 + return rc;
629 + }
630 +
631 if (rc)
632 return rc;
633
634 @@ -715,6 +751,9 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
635 }
636
637 switch (hfunc) {
638 + case ETH_RSS_HASH_NO_CHANGE:
639 + func = ena_com_get_current_hash_function(ena_dev);
640 + break;
641 case ETH_RSS_HASH_TOP:
642 func = ENA_ADMIN_TOEPLITZ;
643 break;
644 @@ -819,6 +858,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
645 .get_channels = ena_get_channels,
646 .get_tunable = ena_get_tunable,
647 .set_tunable = ena_set_tunable,
648 + .get_ts_info = ethtool_op_get_ts_info,
649 };
650
651 void ena_set_ethtool_ops(struct net_device *netdev)
652 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
653 index 008f2d594d40..326c2e1437b3 100644
654 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
655 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
656 @@ -110,6 +110,8 @@
657
658 #define ENA_IO_TXQ_IDX(q) (2 * (q))
659 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
660 +#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
661 +#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
662
663 #define ENA_MGMNT_IRQ_IDX 0
664 #define ENA_IO_IRQ_FIRST_IDX 1
665 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
666 index de4b5d267c30..17c07837033f 100644
667 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
668 +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
669 @@ -1711,7 +1711,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
670 int ret;
671
672 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
673 - XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
674 + XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
675 if (!ndev)
676 return -ENOMEM;
677
678 diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
679 index 8dc1f0277117..d94e151cff12 100644
680 --- a/drivers/net/ethernet/micrel/ks8851_mll.c
681 +++ b/drivers/net/ethernet/micrel/ks8851_mll.c
682 @@ -474,24 +474,6 @@ static int msg_enable;
683 * chip is busy transferring packet data (RX/TX FIFO accesses).
684 */
685
686 -/**
687 - * ks_rdreg8 - read 8 bit register from device
688 - * @ks : The chip information
689 - * @offset: The register address
690 - *
691 - * Read a 8bit register from the chip, returning the result
692 - */
693 -static u8 ks_rdreg8(struct ks_net *ks, int offset)
694 -{
695 - u16 data;
696 - u8 shift_bit = offset & 0x03;
697 - u8 shift_data = (offset & 1) << 3;
698 - ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
699 - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
700 - data = ioread16(ks->hw_addr);
701 - return (u8)(data >> shift_data);
702 -}
703 -
704 /**
705 * ks_rdreg16 - read 16 bit register from device
706 * @ks : The chip information
707 @@ -502,27 +484,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
708
709 static u16 ks_rdreg16(struct ks_net *ks, int offset)
710 {
711 - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
712 + ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
713 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
714 return ioread16(ks->hw_addr);
715 }
716
717 -/**
718 - * ks_wrreg8 - write 8bit register value to chip
719 - * @ks: The chip information
720 - * @offset: The register address
721 - * @value: The value to write
722 - *
723 - */
724 -static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
725 -{
726 - u8 shift_bit = (offset & 0x03);
727 - u16 value_write = (u16)(value << ((offset & 1) << 3));
728 - ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
729 - iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
730 - iowrite16(value_write, ks->hw_addr);
731 -}
732 -
733 /**
734 * ks_wrreg16 - write 16bit register value to chip
735 * @ks: The chip information
736 @@ -533,7 +499,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
737
738 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
739 {
740 - ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
741 + ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
742 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
743 iowrite16(value, ks->hw_addr);
744 }
745 @@ -549,7 +515,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
746 {
747 len >>= 1;
748 while (len--)
749 - *wptr++ = (u16)ioread16(ks->hw_addr);
750 + *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
751 }
752
753 /**
754 @@ -563,7 +529,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
755 {
756 len >>= 1;
757 while (len--)
758 - iowrite16(*wptr++, ks->hw_addr);
759 + iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
760 }
761
762 static void ks_disable_int(struct ks_net *ks)
763 @@ -642,8 +608,7 @@ static void ks_read_config(struct ks_net *ks)
764 u16 reg_data = 0;
765
766 /* Regardless of bus width, 8 bit read should always work.*/
767 - reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
768 - reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
769 + reg_data = ks_rdreg16(ks, KS_CCR);
770
771 /* addr/data bus are multiplexed */
772 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
773 @@ -747,7 +712,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
774
775 /* 1. set sudo DMA mode */
776 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
777 - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
778 + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
779
780 /* 2. read prepend data */
781 /**
782 @@ -764,7 +729,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
783 ks_inblk(ks, buf, ALIGN(len, 4));
784
785 /* 4. reset sudo DMA Mode */
786 - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
787 + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
788 }
789
790 /**
791 @@ -997,13 +962,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
792 ks->txh.txw[1] = cpu_to_le16(len);
793
794 /* 1. set sudo-DMA mode */
795 - ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
796 + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
797 /* 2. write status/lenth info */
798 ks_outblk(ks, ks->txh.txw, 4);
799 /* 3. write pkt data */
800 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
801 /* 4. reset sudo-DMA mode */
802 - ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
803 + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
804 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
805 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
806 /* 6. wait until TXQCR_METFE is auto-cleared */
807 diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
808 index 46fe1ae919a3..51ce3ea17fb3 100644
809 --- a/drivers/net/phy/mdio-bcm-iproc.c
810 +++ b/drivers/net/phy/mdio-bcm-iproc.c
811 @@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
812 return 0;
813 }
814
815 +#ifdef CONFIG_PM_SLEEP
816 +int iproc_mdio_resume(struct device *dev)
817 +{
818 + struct platform_device *pdev = to_platform_device(dev);
819 + struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
820 +
821 + /* restore the mii clock configuration */
822 + iproc_mdio_config_clk(priv->base);
823 +
824 + return 0;
825 +}
826 +
827 +static const struct dev_pm_ops iproc_mdio_pm_ops = {
828 + .resume = iproc_mdio_resume
829 +};
830 +#endif /* CONFIG_PM_SLEEP */
831 +
832 static const struct of_device_id iproc_mdio_of_match[] = {
833 { .compatible = "brcm,iproc-mdio", },
834 { /* sentinel */ },
835 @@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = {
836 .driver = {
837 .name = "iproc-mdio",
838 .of_match_table = iproc_mdio_of_match,
839 +#ifdef CONFIG_PM_SLEEP
840 + .pm = &iproc_mdio_pm_ops,
841 +#endif
842 },
843 .probe = iproc_mdio_probe,
844 .remove = iproc_mdio_remove,
845 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
846 index 2af09c3851a5..cc841126147e 100644
847 --- a/drivers/net/slip/slip.c
848 +++ b/drivers/net/slip/slip.c
849 @@ -868,7 +868,6 @@ err_free_chan:
850 tty->disc_data = NULL;
851 clear_bit(SLF_INUSE, &sl->flags);
852 sl_free_netdev(sl->dev);
853 - free_netdev(sl->dev);
854
855 err_exit:
856 rtnl_unlock();
857 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
858 index 17be1f6a813f..44b16d945e33 100644
859 --- a/drivers/net/tun.c
860 +++ b/drivers/net/tun.c
861 @@ -1106,6 +1106,13 @@ static void tun_net_init(struct net_device *dev)
862 }
863 }
864
865 +static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
866 +{
867 + struct sock *sk = tfile->socket.sk;
868 +
869 + return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
870 +}
871 +
872 /* Character device part */
873
874 /* Poll */
875 @@ -1128,10 +1135,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
876 if (!skb_array_empty(&tfile->tx_array))
877 mask |= POLLIN | POLLRDNORM;
878
879 - if (tun->dev->flags & IFF_UP &&
880 - (sock_writeable(sk) ||
881 - (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
882 - sock_writeable(sk))))
883 + /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
884 + * guarantee EPOLLOUT to be raised by either here or
885 + * tun_sock_write_space(). Then process could get notification
886 + * after it writes to a down device and meets -EIO.
887 + */
888 + if (tun_sock_writeable(tun, tfile) ||
889 + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
890 + tun_sock_writeable(tun, tfile)))
891 mask |= POLLOUT | POLLWRNORM;
892
893 if (tun->dev->reg_state != NETREG_REGISTERED)
894 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
895 index de7b431fdd6b..97f6b8130db3 100644
896 --- a/drivers/net/usb/qmi_wwan.c
897 +++ b/drivers/net/usb/qmi_wwan.c
898 @@ -951,6 +951,7 @@ static const struct usb_device_id products[] = {
899 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
900 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
901 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
902 + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
903 {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
904 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
905 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
906 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
907 index a2ebe46bcfc5..395bbe2c0f98 100644
908 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
909 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
910 @@ -898,9 +898,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
911 return err;
912 }
913 def_rxq = trans_pcie->rxq;
914 - if (!rba->alloc_wq)
915 + if (!rba->alloc_wq) {
916 rba->alloc_wq = alloc_workqueue("rb_allocator",
917 WQ_HIGHPRI | WQ_UNBOUND, 1);
918 + if (!rba->alloc_wq)
919 + return -ENOMEM;
920 + }
921 +
922 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
923
924 cancel_work_sync(&rba->rx_alloc);
925 diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
926 index f837c39a8017..0f6905123b0a 100644
927 --- a/drivers/nfc/pn544/i2c.c
928 +++ b/drivers/nfc/pn544/i2c.c
929 @@ -240,6 +240,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
930
931 out:
932 gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
933 + usleep_range(10000, 15000);
934 }
935
936 static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
937 diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
938 index 9082476b51db..4e9f794176d3 100644
939 --- a/drivers/s390/cio/blacklist.c
940 +++ b/drivers/s390/cio/blacklist.c
941 @@ -302,8 +302,10 @@ static void *
942 cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
943 {
944 struct ccwdev_iter *iter;
945 + loff_t p = *offset;
946
947 - if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
948 + (*offset)++;
949 + if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
950 return NULL;
951 iter = it;
952 if (iter->devno == __MAX_SUBCHANNEL) {
953 @@ -313,7 +315,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
954 return NULL;
955 } else
956 iter->devno++;
957 - (*offset)++;
958 return iter;
959 }
960
961 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
962 index e8819aa20415..c4e9eba36023 100644
963 --- a/drivers/tty/serial/8250/8250_core.c
964 +++ b/drivers/tty/serial/8250/8250_core.c
965 @@ -181,7 +181,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
966 struct hlist_head *h;
967 struct hlist_node *n;
968 struct irq_info *i;
969 - int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
970 + int ret;
971
972 mutex_lock(&hash_mutex);
973
974 @@ -216,9 +216,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
975 INIT_LIST_HEAD(&up->list);
976 i->head = &up->list;
977 spin_unlock_irq(&i->lock);
978 - irq_flags |= up->port.irqflags;
979 ret = request_irq(up->port.irq, serial8250_interrupt,
980 - irq_flags, "serial", i);
981 + up->port.irqflags, "serial", i);
982 if (ret < 0)
983 serial_do_unlink(i, up);
984 }
985 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
986 index 8f1233324586..c7a7574172fa 100644
987 --- a/drivers/tty/serial/8250/8250_port.c
988 +++ b/drivers/tty/serial/8250/8250_port.c
989 @@ -2199,6 +2199,10 @@ int serial8250_do_startup(struct uart_port *port)
990 }
991 }
992
993 + /* Check if we need to have shared IRQs */
994 + if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
995 + up->port.irqflags |= IRQF_SHARED;
996 +
997 if (port->irq) {
998 unsigned char iir1;
999 /*
1000 diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
1001 index d4462512605b..246f4aab7407 100644
1002 --- a/drivers/tty/serial/ar933x_uart.c
1003 +++ b/drivers/tty/serial/ar933x_uart.c
1004 @@ -289,6 +289,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
1005 ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
1006 AR933X_UART_CS_HOST_INT_EN);
1007
1008 + /* enable RX and TX ready overide */
1009 + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
1010 + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
1011 +
1012 /* reenable the UART */
1013 ar933x_uart_rmw(up, AR933X_UART_CS_REG,
1014 AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
1015 @@ -421,6 +425,10 @@ static int ar933x_uart_startup(struct uart_port *port)
1016 ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
1017 AR933X_UART_CS_HOST_INT_EN);
1018
1019 + /* enable RX and TX ready overide */
1020 + ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
1021 + AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
1022 +
1023 /* Enable RX interrupts */
1024 up->ier = AR933X_UART_INT_RX_VALID;
1025 ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
1026 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
1027 index 401c983ec5f3..a10e4aa9e18e 100644
1028 --- a/drivers/tty/serial/mvebu-uart.c
1029 +++ b/drivers/tty/serial/mvebu-uart.c
1030 @@ -581,7 +581,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
1031
1032 port->membase = devm_ioremap_resource(&pdev->dev, reg);
1033 if (IS_ERR(port->membase))
1034 - return -PTR_ERR(port->membase);
1035 + return PTR_ERR(port->membase);
1036
1037 data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
1038 GFP_KERNEL);
1039 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
1040 index 53cbf4ebef10..b6ff01131eef 100644
1041 --- a/drivers/tty/sysrq.c
1042 +++ b/drivers/tty/sysrq.c
1043 @@ -543,7 +543,6 @@ void __handle_sysrq(int key, bool check_mask)
1044 */
1045 orig_log_level = console_loglevel;
1046 console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
1047 - pr_info("SysRq : ");
1048
1049 op_p = __sysrq_get_key_op(key);
1050 if (op_p) {
1051 @@ -552,14 +551,15 @@ void __handle_sysrq(int key, bool check_mask)
1052 * should not) and is the invoked operation enabled?
1053 */
1054 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
1055 - pr_cont("%s\n", op_p->action_msg);
1056 + pr_info("%s\n", op_p->action_msg);
1057 console_loglevel = orig_log_level;
1058 op_p->handler(key);
1059 } else {
1060 - pr_cont("This sysrq operation is disabled.\n");
1061 + pr_info("This sysrq operation is disabled.\n");
1062 + console_loglevel = orig_log_level;
1063 }
1064 } else {
1065 - pr_cont("HELP : ");
1066 + pr_info("HELP : ");
1067 /* Only print the help msg once per handler */
1068 for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
1069 if (sysrq_key_table[i]) {
1070 diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
1071 index 6ac05021c4a7..1edc1a36db4a 100644
1072 --- a/drivers/tty/vt/selection.c
1073 +++ b/drivers/tty/vt/selection.c
1074 @@ -13,6 +13,7 @@
1075 #include <linux/tty.h>
1076 #include <linux/sched.h>
1077 #include <linux/mm.h>
1078 +#include <linux/mutex.h>
1079 #include <linux/slab.h>
1080 #include <linux/types.h>
1081
1082 @@ -40,6 +41,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
1083 static int sel_end;
1084 static int sel_buffer_lth;
1085 static char *sel_buffer;
1086 +static DEFINE_MUTEX(sel_lock);
1087
1088 /* clear_selection, highlight and highlight_pointer can be called
1089 from interrupt (via scrollback/front) */
1090 @@ -156,14 +158,14 @@ static int store_utf8(u16 c, char *p)
1091 * The entire selection process is managed under the console_lock. It's
1092 * a lot under the lock but its hardly a performance path
1093 */
1094 -int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
1095 +static int __set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
1096 {
1097 struct vc_data *vc = vc_cons[fg_console].d;
1098 int sel_mode, new_sel_start, new_sel_end, spc;
1099 char *bp, *obp;
1100 int i, ps, pe, multiplier;
1101 u16 c;
1102 - int mode;
1103 + int mode, ret = 0;
1104
1105 poke_blanked_console();
1106
1107 @@ -324,7 +326,21 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
1108 }
1109 }
1110 sel_buffer_lth = bp - sel_buffer;
1111 - return 0;
1112 +
1113 + return ret;
1114 +}
1115 +
1116 +int set_selection(const struct tiocl_selection __user *v, struct tty_struct *tty)
1117 +{
1118 + int ret;
1119 +
1120 + mutex_lock(&sel_lock);
1121 + console_lock();
1122 + ret = __set_selection(v, tty);
1123 + console_unlock();
1124 + mutex_unlock(&sel_lock);
1125 +
1126 + return ret;
1127 }
1128
1129 /* Insert the contents of the selection buffer into the
1130 @@ -353,6 +369,7 @@ int paste_selection(struct tty_struct *tty)
1131 tty_buffer_lock_exclusive(&vc->port);
1132
1133 add_wait_queue(&vc->paste_wait, &wait);
1134 + mutex_lock(&sel_lock);
1135 while (sel_buffer && sel_buffer_lth > pasted) {
1136 set_current_state(TASK_INTERRUPTIBLE);
1137 if (signal_pending(current)) {
1138 @@ -360,7 +377,9 @@ int paste_selection(struct tty_struct *tty)
1139 break;
1140 }
1141 if (tty_throttled(tty)) {
1142 + mutex_unlock(&sel_lock);
1143 schedule();
1144 + mutex_lock(&sel_lock);
1145 continue;
1146 }
1147 __set_current_state(TASK_RUNNING);
1148 @@ -369,6 +388,7 @@ int paste_selection(struct tty_struct *tty)
1149 count);
1150 pasted += count;
1151 }
1152 + mutex_unlock(&sel_lock);
1153 remove_wait_queue(&vc->paste_wait, &wait);
1154 __set_current_state(TASK_RUNNING);
1155
1156 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1157 index 232cb0a760b9..c1d3b685a587 100644
1158 --- a/drivers/tty/vt/vt.c
1159 +++ b/drivers/tty/vt/vt.c
1160 @@ -2690,9 +2690,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
1161 switch (type)
1162 {
1163 case TIOCL_SETSEL:
1164 - console_lock();
1165 ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
1166 - console_unlock();
1167 break;
1168 case TIOCL_PASTESEL:
1169 ret = paste_selection(tty);
1170 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1171 index 3fcc3e74ae2e..c958cf42a1bb 100644
1172 --- a/drivers/usb/core/hub.c
1173 +++ b/drivers/usb/core/hub.c
1174 @@ -954,13 +954,17 @@ int usb_remove_device(struct usb_device *udev)
1175 {
1176 struct usb_hub *hub;
1177 struct usb_interface *intf;
1178 + int ret;
1179
1180 if (!udev->parent) /* Can't remove a root hub */
1181 return -EINVAL;
1182 hub = usb_hub_to_struct_hub(udev->parent);
1183 intf = to_usb_interface(hub->intfdev);
1184
1185 - usb_autopm_get_interface(intf);
1186 + ret = usb_autopm_get_interface(intf);
1187 + if (ret < 0)
1188 + return ret;
1189 +
1190 set_bit(udev->portnum, hub->removed_bits);
1191 hub_port_logical_disconnect(hub, udev->portnum);
1192 usb_autopm_put_interface(intf);
1193 diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
1194 index 460c855be0d0..53c1f6e604b1 100644
1195 --- a/drivers/usb/core/port.c
1196 +++ b/drivers/usb/core/port.c
1197 @@ -179,7 +179,10 @@ static int usb_port_runtime_resume(struct device *dev)
1198 if (!port_dev->is_superspeed && peer)
1199 pm_runtime_get_sync(&peer->dev);
1200
1201 - usb_autopm_get_interface(intf);
1202 + retval = usb_autopm_get_interface(intf);
1203 + if (retval < 0)
1204 + return retval;
1205 +
1206 retval = usb_hub_set_port_power(hdev, hub, port1, true);
1207 msleep(hub_power_on_good_delay(hub));
1208 if (udev && !retval) {
1209 @@ -232,7 +235,10 @@ static int usb_port_runtime_suspend(struct device *dev)
1210 if (usb_port_block_power_off)
1211 return -EBUSY;
1212
1213 - usb_autopm_get_interface(intf);
1214 + retval = usb_autopm_get_interface(intf);
1215 + if (retval < 0)
1216 + return retval;
1217 +
1218 retval = usb_hub_set_port_power(hdev, hub, port1, false);
1219 usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
1220 if (!port_dev->is_superspeed)
1221 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1222 index ad8307140df8..64c03e871f2d 100644
1223 --- a/drivers/usb/core/quirks.c
1224 +++ b/drivers/usb/core/quirks.c
1225 @@ -86,6 +86,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1226 /* Logitech PTZ Pro Camera */
1227 { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
1228
1229 + /* Logitech Screen Share */
1230 + { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
1231 +
1232 /* Logitech Quickcam Fusion */
1233 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
1234
1235 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1236 index 4d7df2f6caf5..3a0452ff1a56 100644
1237 --- a/drivers/usb/gadget/composite.c
1238 +++ b/drivers/usb/gadget/composite.c
1239 @@ -438,9 +438,13 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
1240 if (!val)
1241 return 0;
1242 if (speed < USB_SPEED_SUPER)
1243 - return DIV_ROUND_UP(val, 2);
1244 + return min(val, 500U) / 2;
1245 else
1246 - return DIV_ROUND_UP(val, 8);
1247 + /*
1248 + * USB 3.x supports up to 900mA, but since 900 isn't divisible
1249 + * by 8 the integral division will effectively cap to 896mA.
1250 + */
1251 + return min(val, 900U) / 8;
1252 }
1253
1254 static int config_buf(struct usb_configuration *config,
1255 @@ -833,6 +837,10 @@ static int set_config(struct usb_composite_dev *cdev,
1256
1257 /* when we return, be sure our power usage is valid */
1258 power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
1259 + if (gadget->speed < USB_SPEED_SUPER)
1260 + power = min(power, 500U);
1261 + else
1262 + power = min(power, 900U);
1263 done:
1264 usb_gadget_vbus_draw(gadget, power);
1265 if (result >= 0 && cdev->delayed_status)
1266 @@ -2272,7 +2280,7 @@ void composite_resume(struct usb_gadget *gadget)
1267 {
1268 struct usb_composite_dev *cdev = get_gadget_data(gadget);
1269 struct usb_function *f;
1270 - u16 maxpower;
1271 + unsigned maxpower;
1272
1273 /* REVISIT: should we have config level
1274 * suspend/resume callbacks?
1275 @@ -2286,10 +2294,14 @@ void composite_resume(struct usb_gadget *gadget)
1276 f->resume(f);
1277 }
1278
1279 - maxpower = cdev->config->MaxPower;
1280 + maxpower = cdev->config->MaxPower ?
1281 + cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
1282 + if (gadget->speed < USB_SPEED_SUPER)
1283 + maxpower = min(maxpower, 500U);
1284 + else
1285 + maxpower = min(maxpower, 900U);
1286
1287 - usb_gadget_vbus_draw(gadget, maxpower ?
1288 - maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
1289 + usb_gadget_vbus_draw(gadget, maxpower);
1290 }
1291
1292 cdev->suspended = 0;
1293 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1294 index d1278d2d544b..b5747f1270a6 100644
1295 --- a/drivers/usb/gadget/function/f_fs.c
1296 +++ b/drivers/usb/gadget/function/f_fs.c
1297 @@ -1077,18 +1077,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
1298 {
1299 struct ffs_io_data *io_data = kiocb->private;
1300 struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
1301 + unsigned long flags;
1302 int value;
1303
1304 ENTER();
1305
1306 - spin_lock_irq(&epfile->ffs->eps_lock);
1307 + spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
1308
1309 if (likely(io_data && io_data->ep && io_data->req))
1310 value = usb_ep_dequeue(io_data->ep, io_data->req);
1311 else
1312 value = -EINVAL;
1313
1314 - spin_unlock_irq(&epfile->ffs->eps_lock);
1315 + spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
1316
1317 return value;
1318 }
1319 diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
1320 index 510a54f88963..5d7d0f2e80a5 100644
1321 --- a/drivers/usb/gadget/function/u_serial.c
1322 +++ b/drivers/usb/gadget/function/u_serial.c
1323 @@ -715,8 +715,10 @@ static int gs_start_io(struct gs_port *port)
1324 port->n_read = 0;
1325 started = gs_start_rx(port);
1326
1327 - /* unblock any pending writes into our circular buffer */
1328 if (started) {
1329 + gs_start_tx(port);
1330 + /* Unblock any pending writes into our circular buffer, in case
1331 + * we didn't in gs_start_tx() */
1332 tty_wakeup(port->port.tty);
1333 } else {
1334 gs_free_requests(ep, head, &port->read_allocated);
1335 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1336 index 3ebf6307217c..a52ae34fb1c3 100644
1337 --- a/drivers/usb/storage/unusual_devs.h
1338 +++ b/drivers/usb/storage/unusual_devs.h
1339 @@ -1277,6 +1277,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
1340 USB_SC_RBC, USB_PR_BULK, NULL,
1341 0 ),
1342
1343 +UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
1344 + "Samsung",
1345 + "Flash Drive FIT",
1346 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1347 + US_FL_MAX_SECTORS_64),
1348 +
1349 /* aeb */
1350 UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1351 "Feiya",
1352 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1353 index dd8798bf88e7..861f43f8f9ce 100644
1354 --- a/drivers/vhost/net.c
1355 +++ b/drivers/vhost/net.c
1356 @@ -914,11 +914,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
1357
1358 static struct socket *get_raw_socket(int fd)
1359 {
1360 - struct {
1361 - struct sockaddr_ll sa;
1362 - char buf[MAX_ADDR_LEN];
1363 - } uaddr;
1364 - int uaddr_len = sizeof uaddr, r;
1365 + int r;
1366 struct socket *sock = sockfd_lookup(fd, &r);
1367
1368 if (!sock)
1369 @@ -930,12 +926,7 @@ static struct socket *get_raw_socket(int fd)
1370 goto err;
1371 }
1372
1373 - r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
1374 - &uaddr_len, 0);
1375 - if (r)
1376 - goto err;
1377 -
1378 - if (uaddr.sa.sll_family != AF_PACKET) {
1379 + if (sock->sk->sk_family != AF_PACKET) {
1380 r = -EPFNOSUPPORT;
1381 goto err;
1382 }
1383 diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
1384 index dda1c4b3a229..42c0a26646f6 100644
1385 --- a/drivers/video/console/vgacon.c
1386 +++ b/drivers/video/console/vgacon.c
1387 @@ -1323,6 +1323,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
1388 static int vgacon_resize(struct vc_data *c, unsigned int width,
1389 unsigned int height, unsigned int user)
1390 {
1391 + if ((width << 1) * height > vga_vram_size)
1392 + return -EINVAL;
1393 +
1394 if (width % 2 || width > screen_info.orig_video_cols ||
1395 height > (screen_info.orig_video_lines * vga_default_font_height)/
1396 c->vc_font.height)
1397 diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
1398 index 7386111220d5..daeb645fcea8 100644
1399 --- a/drivers/watchdog/da9062_wdt.c
1400 +++ b/drivers/watchdog/da9062_wdt.c
1401 @@ -126,13 +126,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
1402 struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
1403 int ret;
1404
1405 - ret = da9062_reset_watchdog_timer(wdt);
1406 - if (ret) {
1407 - dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
1408 - ret);
1409 - return ret;
1410 - }
1411 -
1412 ret = regmap_update_bits(wdt->hw->regmap,
1413 DA9062AA_CONTROL_D,
1414 DA9062AA_TWDSCALE_MASK,
1415 diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
1416 index 0da9943d405f..c310e841561c 100644
1417 --- a/drivers/watchdog/wdat_wdt.c
1418 +++ b/drivers/watchdog/wdat_wdt.c
1419 @@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
1420
1421 memset(&r, 0, sizeof(r));
1422 r.start = gas->address;
1423 - r.end = r.start + gas->access_width - 1;
1424 + r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
1425 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1426 r.flags = IORESOURCE_MEM;
1427 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1428 diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
1429 index 15bac390dff9..10aedc2a4c2d 100644
1430 --- a/fs/cifs/cifsacl.c
1431 +++ b/fs/cifs/cifsacl.c
1432 @@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
1433 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
1434 *pmode |= (S_IXUGO & (*pbits_to_set));
1435
1436 - cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
1437 + cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
1438 return;
1439 }
1440
1441 @@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
1442 if (mode & S_IXUGO)
1443 *pace_flags |= SET_FILE_EXEC_RIGHTS;
1444
1445 - cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
1446 + cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
1447 mode, *pace_flags);
1448 return;
1449 }
1450 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1451 index 961fcb40183a..f2707ff795d4 100644
1452 --- a/fs/cifs/connect.c
1453 +++ b/fs/cifs/connect.c
1454 @@ -3401,7 +3401,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
1455 cifs_sb->mnt_gid = pvolume_info->linux_gid;
1456 cifs_sb->mnt_file_mode = pvolume_info->file_mode;
1457 cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
1458 - cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
1459 + cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
1460 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
1461
1462 cifs_sb->actimeo = pvolume_info->actimeo;
1463 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1464 index b1c0961e6b3f..dfa85ad5b481 100644
1465 --- a/fs/cifs/inode.c
1466 +++ b/fs/cifs/inode.c
1467 @@ -1573,7 +1573,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
1468 struct TCP_Server_Info *server;
1469 char *full_path;
1470
1471 - cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
1472 + cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
1473 mode, inode);
1474
1475 cifs_sb = CIFS_SB(inode->i_sb);
1476 @@ -1990,6 +1990,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
1477 struct inode *inode = d_inode(dentry);
1478 struct super_block *sb = dentry->d_sb;
1479 char *full_path = NULL;
1480 + int count = 0;
1481
1482 if (inode == NULL)
1483 return -ENOENT;
1484 @@ -2011,15 +2012,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
1485 full_path, inode, inode->i_count.counter,
1486 dentry, cifs_get_time(dentry), jiffies);
1487
1488 +again:
1489 if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
1490 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
1491 else
1492 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
1493 xid, NULL);
1494 -
1495 + if (rc == -EAGAIN && count++ < 10)
1496 + goto again;
1497 out:
1498 kfree(full_path);
1499 free_xid(xid);
1500 +
1501 return rc;
1502 }
1503
1504 diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
1505 index 3f3ec50bf773..b134315fb69d 100644
1506 --- a/fs/ecryptfs/keystore.c
1507 +++ b/fs/ecryptfs/keystore.c
1508 @@ -1285,7 +1285,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
1509 printk(KERN_ERR "Enter w/ first byte != 0x%.2x\n",
1510 ECRYPTFS_TAG_1_PACKET_TYPE);
1511 rc = -EINVAL;
1512 - goto out_free;
1513 + goto out;
1514 }
1515 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
1516 * at end of function upon failure */
1517 @@ -1335,7 +1335,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
1518 printk(KERN_WARNING "Tag 1 packet contains key larger "
1519 "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES");
1520 rc = -EINVAL;
1521 - goto out;
1522 + goto out_free;
1523 }
1524 memcpy((*new_auth_tok)->session_key.encrypted_key,
1525 &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
1526 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
1527 index 2455fe1446d6..de601f3c023d 100644
1528 --- a/fs/ext4/balloc.c
1529 +++ b/fs/ext4/balloc.c
1530 @@ -279,6 +279,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
1531 ext4_group_t ngroups = ext4_get_groups_count(sb);
1532 struct ext4_group_desc *desc;
1533 struct ext4_sb_info *sbi = EXT4_SB(sb);
1534 + struct buffer_head *bh_p;
1535
1536 if (block_group >= ngroups) {
1537 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
1538 @@ -289,7 +290,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
1539
1540 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
1541 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
1542 - if (!sbi->s_group_desc[group_desc]) {
1543 + bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
1544 + /*
1545 + * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
1546 + * the pointer being dereferenced won't be dereferenced again. By
1547 + * looking at the usage in add_new_gdb() the value isn't modified,
1548 + * just the pointer, and so it remains valid.
1549 + */
1550 + if (!bh_p) {
1551 ext4_error(sb, "Group descriptor not loaded - "
1552 "block_group = %u, group_desc = %u, desc = %u",
1553 block_group, group_desc, offset);
1554 @@ -297,10 +305,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
1555 }
1556
1557 desc = (struct ext4_group_desc *)(
1558 - (__u8 *)sbi->s_group_desc[group_desc]->b_data +
1559 + (__u8 *)bh_p->b_data +
1560 offset * EXT4_DESC_SIZE(sb));
1561 if (bh)
1562 - *bh = sbi->s_group_desc[group_desc];
1563 + *bh = bh_p;
1564 return desc;
1565 }
1566
1567 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1568 index 9713d3d41412..eb0ec5068423 100644
1569 --- a/fs/ext4/ext4.h
1570 +++ b/fs/ext4/ext4.h
1571 @@ -1367,7 +1367,7 @@ struct ext4_sb_info {
1572 loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
1573 struct buffer_head * s_sbh; /* Buffer containing the super block */
1574 struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
1575 - struct buffer_head **s_group_desc;
1576 + struct buffer_head * __rcu *s_group_desc;
1577 unsigned int s_mount_opt;
1578 unsigned int s_mount_opt2;
1579 unsigned int s_mount_flags;
1580 @@ -1427,7 +1427,7 @@ struct ext4_sb_info {
1581 #endif
1582
1583 /* for buddy allocator */
1584 - struct ext4_group_info ***s_group_info;
1585 + struct ext4_group_info ** __rcu *s_group_info;
1586 struct inode *s_buddy_cache;
1587 spinlock_t s_md_lock;
1588 unsigned short *s_mb_offsets;
1589 @@ -1475,7 +1475,7 @@ struct ext4_sb_info {
1590 unsigned int s_extent_max_zeroout_kb;
1591
1592 unsigned int s_log_groups_per_flex;
1593 - struct flex_groups *s_flex_groups;
1594 + struct flex_groups * __rcu *s_flex_groups;
1595 ext4_group_t s_flex_groups_allocated;
1596
1597 /* workqueue for reserved extent conversions (buffered io) */
1598 @@ -1549,6 +1549,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
1599 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
1600 }
1601
1602 +/*
1603 + * Returns: sbi->field[index]
1604 + * Used to access an array element from the following sbi fields which require
1605 + * rcu protection to avoid dereferencing an invalid pointer due to reassignment
1606 + * - s_group_desc
1607 + * - s_group_info
1608 + * - s_flex_group
1609 + */
1610 +#define sbi_array_rcu_deref(sbi, field, index) \
1611 +({ \
1612 + typeof(*((sbi)->field)) _v; \
1613 + rcu_read_lock(); \
1614 + _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
1615 + rcu_read_unlock(); \
1616 + _v; \
1617 +})
1618 +
1619 /*
1620 * Inode dynamic state flags
1621 */
1622 @@ -2558,6 +2575,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
1623 extern bool ext4_empty_dir(struct inode *inode);
1624
1625 /* resize.c */
1626 +extern void ext4_kvfree_array_rcu(void *to_free);
1627 extern int ext4_group_add(struct super_block *sb,
1628 struct ext4_new_group_data *input);
1629 extern int ext4_group_extend(struct super_block *sb,
1630 @@ -2798,13 +2816,13 @@ static inline
1631 struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
1632 ext4_group_t group)
1633 {
1634 - struct ext4_group_info ***grp_info;
1635 + struct ext4_group_info **grp_info;
1636 long indexv, indexh;
1637 BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
1638 - grp_info = EXT4_SB(sb)->s_group_info;
1639 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
1640 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
1641 - return grp_info[indexv][indexh];
1642 + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
1643 + return grp_info[indexh];
1644 }
1645
1646 /*
1647 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1648 index 4f78e099de1d..c5af7bbf906f 100644
1649 --- a/fs/ext4/ialloc.c
1650 +++ b/fs/ext4/ialloc.c
1651 @@ -331,11 +331,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
1652
1653 percpu_counter_inc(&sbi->s_freeinodes_counter);
1654 if (sbi->s_log_groups_per_flex) {
1655 - ext4_group_t f = ext4_flex_group(sbi, block_group);
1656 + struct flex_groups *fg;
1657
1658 - atomic_inc(&sbi->s_flex_groups[f].free_inodes);
1659 + fg = sbi_array_rcu_deref(sbi, s_flex_groups,
1660 + ext4_flex_group(sbi, block_group));
1661 + atomic_inc(&fg->free_inodes);
1662 if (is_directory)
1663 - atomic_dec(&sbi->s_flex_groups[f].used_dirs);
1664 + atomic_dec(&fg->used_dirs);
1665 }
1666 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
1667 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
1668 @@ -376,12 +378,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
1669 int flex_size, struct orlov_stats *stats)
1670 {
1671 struct ext4_group_desc *desc;
1672 - struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
1673
1674 if (flex_size > 1) {
1675 - stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
1676 - stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
1677 - stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
1678 + struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
1679 + s_flex_groups, g);
1680 + stats->free_inodes = atomic_read(&fg->free_inodes);
1681 + stats->free_clusters = atomic64_read(&fg->free_clusters);
1682 + stats->used_dirs = atomic_read(&fg->used_dirs);
1683 return;
1684 }
1685
1686 @@ -988,7 +991,8 @@ got:
1687 if (sbi->s_log_groups_per_flex) {
1688 ext4_group_t f = ext4_flex_group(sbi, group);
1689
1690 - atomic_inc(&sbi->s_flex_groups[f].used_dirs);
1691 + atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
1692 + f)->used_dirs);
1693 }
1694 }
1695 if (ext4_has_group_desc_csum(sb)) {
1696 @@ -1011,7 +1015,8 @@ got:
1697
1698 if (sbi->s_log_groups_per_flex) {
1699 flex_group = ext4_flex_group(sbi, group);
1700 - atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
1701 + atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1702 + flex_group)->free_inodes);
1703 }
1704
1705 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1706 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1707 index a49d0e5d7baf..c18668e3135e 100644
1708 --- a/fs/ext4/mballoc.c
1709 +++ b/fs/ext4/mballoc.c
1710 @@ -2377,7 +2377,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
1711 {
1712 struct ext4_sb_info *sbi = EXT4_SB(sb);
1713 unsigned size;
1714 - struct ext4_group_info ***new_groupinfo;
1715 + struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
1716
1717 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
1718 EXT4_DESC_PER_BLOCK_BITS(sb);
1719 @@ -2390,13 +2390,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
1720 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
1721 return -ENOMEM;
1722 }
1723 - if (sbi->s_group_info) {
1724 - memcpy(new_groupinfo, sbi->s_group_info,
1725 + rcu_read_lock();
1726 + old_groupinfo = rcu_dereference(sbi->s_group_info);
1727 + if (old_groupinfo)
1728 + memcpy(new_groupinfo, old_groupinfo,
1729 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
1730 - kvfree(sbi->s_group_info);
1731 - }
1732 - sbi->s_group_info = new_groupinfo;
1733 + rcu_read_unlock();
1734 + rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
1735 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
1736 + if (old_groupinfo)
1737 + ext4_kvfree_array_rcu(old_groupinfo);
1738 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
1739 sbi->s_group_info_size);
1740 return 0;
1741 @@ -2408,6 +2411,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
1742 {
1743 int i;
1744 int metalen = 0;
1745 + int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
1746 struct ext4_sb_info *sbi = EXT4_SB(sb);
1747 struct ext4_group_info **meta_group_info;
1748 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
1749 @@ -2426,12 +2430,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
1750 "for a buddy group");
1751 goto exit_meta_group_info;
1752 }
1753 - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
1754 - meta_group_info;
1755 + rcu_read_lock();
1756 + rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
1757 + rcu_read_unlock();
1758 }
1759
1760 - meta_group_info =
1761 - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
1762 + meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
1763 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
1764
1765 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
1766 @@ -2479,8 +2483,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
1767 exit_group_info:
1768 /* If a meta_group_info table has been allocated, release it now */
1769 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
1770 - kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
1771 - sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
1772 + struct ext4_group_info ***group_info;
1773 +
1774 + rcu_read_lock();
1775 + group_info = rcu_dereference(sbi->s_group_info);
1776 + kfree(group_info[idx]);
1777 + group_info[idx] = NULL;
1778 + rcu_read_unlock();
1779 }
1780 exit_meta_group_info:
1781 return -ENOMEM;
1782 @@ -2493,6 +2502,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
1783 struct ext4_sb_info *sbi = EXT4_SB(sb);
1784 int err;
1785 struct ext4_group_desc *desc;
1786 + struct ext4_group_info ***group_info;
1787 struct kmem_cache *cachep;
1788
1789 err = ext4_mb_alloc_groupinfo(sb, ngroups);
1790 @@ -2527,11 +2537,16 @@ err_freebuddy:
1791 while (i-- > 0)
1792 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
1793 i = sbi->s_group_info_size;
1794 + rcu_read_lock();
1795 + group_info = rcu_dereference(sbi->s_group_info);
1796 while (i-- > 0)
1797 - kfree(sbi->s_group_info[i]);
1798 + kfree(group_info[i]);
1799 + rcu_read_unlock();
1800 iput(sbi->s_buddy_cache);
1801 err_freesgi:
1802 - kvfree(sbi->s_group_info);
1803 + rcu_read_lock();
1804 + kvfree(rcu_dereference(sbi->s_group_info));
1805 + rcu_read_unlock();
1806 return -ENOMEM;
1807 }
1808
1809 @@ -2720,7 +2735,7 @@ int ext4_mb_release(struct super_block *sb)
1810 ext4_group_t ngroups = ext4_get_groups_count(sb);
1811 ext4_group_t i;
1812 int num_meta_group_infos;
1813 - struct ext4_group_info *grinfo;
1814 + struct ext4_group_info *grinfo, ***group_info;
1815 struct ext4_sb_info *sbi = EXT4_SB(sb);
1816 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
1817
1818 @@ -2738,9 +2753,12 @@ int ext4_mb_release(struct super_block *sb)
1819 num_meta_group_infos = (ngroups +
1820 EXT4_DESC_PER_BLOCK(sb) - 1) >>
1821 EXT4_DESC_PER_BLOCK_BITS(sb);
1822 + rcu_read_lock();
1823 + group_info = rcu_dereference(sbi->s_group_info);
1824 for (i = 0; i < num_meta_group_infos; i++)
1825 - kfree(sbi->s_group_info[i]);
1826 - kvfree(sbi->s_group_info);
1827 + kfree(group_info[i]);
1828 + kvfree(group_info);
1829 + rcu_read_unlock();
1830 }
1831 kfree(sbi->s_mb_offsets);
1832 kfree(sbi->s_mb_maxs);
1833 @@ -2998,7 +3016,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
1834 ext4_group_t flex_group = ext4_flex_group(sbi,
1835 ac->ac_b_ex.fe_group);
1836 atomic64_sub(ac->ac_b_ex.fe_len,
1837 - &sbi->s_flex_groups[flex_group].free_clusters);
1838 + &sbi_array_rcu_deref(sbi, s_flex_groups,
1839 + flex_group)->free_clusters);
1840 }
1841
1842 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
1843 @@ -4888,7 +4907,8 @@ do_more:
1844 if (sbi->s_log_groups_per_flex) {
1845 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
1846 atomic64_add(count_clusters,
1847 - &sbi->s_flex_groups[flex_group].free_clusters);
1848 + &sbi_array_rcu_deref(sbi, s_flex_groups,
1849 + flex_group)->free_clusters);
1850 }
1851
1852 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
1853 @@ -5033,7 +5053,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
1854 if (sbi->s_log_groups_per_flex) {
1855 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
1856 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
1857 - &sbi->s_flex_groups[flex_group].free_clusters);
1858 + &sbi_array_rcu_deref(sbi, s_flex_groups,
1859 + flex_group)->free_clusters);
1860 }
1861
1862 ext4_mb_unload_buddy(&e4b);
1863 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1864 index aef2a24dc9f9..845d9841c91c 100644
1865 --- a/fs/ext4/resize.c
1866 +++ b/fs/ext4/resize.c
1867 @@ -16,6 +16,33 @@
1868
1869 #include "ext4_jbd2.h"
1870
1871 +struct ext4_rcu_ptr {
1872 + struct rcu_head rcu;
1873 + void *ptr;
1874 +};
1875 +
1876 +static void ext4_rcu_ptr_callback(struct rcu_head *head)
1877 +{
1878 + struct ext4_rcu_ptr *ptr;
1879 +
1880 + ptr = container_of(head, struct ext4_rcu_ptr, rcu);
1881 + kvfree(ptr->ptr);
1882 + kfree(ptr);
1883 +}
1884 +
1885 +void ext4_kvfree_array_rcu(void *to_free)
1886 +{
1887 + struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
1888 +
1889 + if (ptr) {
1890 + ptr->ptr = to_free;
1891 + call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
1892 + return;
1893 + }
1894 + synchronize_rcu();
1895 + kvfree(to_free);
1896 +}
1897 +
1898 int ext4_resize_begin(struct super_block *sb)
1899 {
1900 struct ext4_sb_info *sbi = EXT4_SB(sb);
1901 @@ -541,8 +568,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
1902 brelse(gdb);
1903 goto out;
1904 }
1905 - memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
1906 - gdb->b_size);
1907 + memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
1908 + s_group_desc, j)->b_data, gdb->b_size);
1909 set_buffer_uptodate(gdb);
1910
1911 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
1912 @@ -849,13 +876,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
1913 }
1914 brelse(dind);
1915
1916 - o_group_desc = EXT4_SB(sb)->s_group_desc;
1917 + rcu_read_lock();
1918 + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
1919 memcpy(n_group_desc, o_group_desc,
1920 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
1921 + rcu_read_unlock();
1922 n_group_desc[gdb_num] = gdb_bh;
1923 - EXT4_SB(sb)->s_group_desc = n_group_desc;
1924 + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
1925 EXT4_SB(sb)->s_gdb_count++;
1926 - kvfree(o_group_desc);
1927 + ext4_kvfree_array_rcu(o_group_desc);
1928
1929 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
1930 err = ext4_handle_dirty_super(handle, sb);
1931 @@ -903,9 +932,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
1932 return err;
1933 }
1934
1935 - o_group_desc = EXT4_SB(sb)->s_group_desc;
1936 + rcu_read_lock();
1937 + o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
1938 memcpy(n_group_desc, o_group_desc,
1939 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
1940 + rcu_read_unlock();
1941 n_group_desc[gdb_num] = gdb_bh;
1942
1943 BUFFER_TRACE(gdb_bh, "get_write_access");
1944 @@ -916,9 +947,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
1945 return err;
1946 }
1947
1948 - EXT4_SB(sb)->s_group_desc = n_group_desc;
1949 + rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
1950 EXT4_SB(sb)->s_gdb_count++;
1951 - kvfree(o_group_desc);
1952 + ext4_kvfree_array_rcu(o_group_desc);
1953 return err;
1954 }
1955
1956 @@ -1180,7 +1211,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1957 * use non-sparse filesystems anymore. This is already checked above.
1958 */
1959 if (gdb_off) {
1960 - gdb_bh = sbi->s_group_desc[gdb_num];
1961 + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1962 + gdb_num);
1963 BUFFER_TRACE(gdb_bh, "get_write_access");
1964 err = ext4_journal_get_write_access(handle, gdb_bh);
1965
1966 @@ -1262,7 +1294,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1967 /*
1968 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1969 */
1970 - gdb_bh = sbi->s_group_desc[gdb_num];
1971 + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1972 /* Update group descriptor block for new group */
1973 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1974 gdb_off * EXT4_DESC_SIZE(sb));
1975 @@ -1390,11 +1422,14 @@ static void ext4_update_super(struct super_block *sb,
1976 percpu_counter_read(&sbi->s_freeclusters_counter));
1977 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1978 ext4_group_t flex_group;
1979 + struct flex_groups *fg;
1980 +
1981 flex_group = ext4_flex_group(sbi, group_data[0].group);
1982 + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1983 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1984 - &sbi->s_flex_groups[flex_group].free_clusters);
1985 + &fg->free_clusters);
1986 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1987 - &sbi->s_flex_groups[flex_group].free_inodes);
1988 + &fg->free_inodes);
1989 }
1990
1991 /*
1992 @@ -1489,7 +1524,8 @@ exit_journal:
1993 for (; gdb_num <= gdb_num_end; gdb_num++) {
1994 struct buffer_head *gdb_bh;
1995
1996 - gdb_bh = sbi->s_group_desc[gdb_num];
1997 + gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1998 + gdb_num);
1999 if (old_gdb == gdb_bh->b_blocknr)
2000 continue;
2001 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
2002 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2003 index b69a78c061cb..75f71e52ffc7 100644
2004 --- a/fs/ext4/super.c
2005 +++ b/fs/ext4/super.c
2006 @@ -826,6 +826,8 @@ static void ext4_put_super(struct super_block *sb)
2007 {
2008 struct ext4_sb_info *sbi = EXT4_SB(sb);
2009 struct ext4_super_block *es = sbi->s_es;
2010 + struct buffer_head **group_desc;
2011 + struct flex_groups **flex_groups;
2012 int aborted = 0;
2013 int i, err;
2014
2015 @@ -857,10 +859,18 @@ static void ext4_put_super(struct super_block *sb)
2016 if (!(sb->s_flags & MS_RDONLY))
2017 ext4_commit_super(sb, 1);
2018
2019 + rcu_read_lock();
2020 + group_desc = rcu_dereference(sbi->s_group_desc);
2021 for (i = 0; i < sbi->s_gdb_count; i++)
2022 - brelse(sbi->s_group_desc[i]);
2023 - kvfree(sbi->s_group_desc);
2024 - kvfree(sbi->s_flex_groups);
2025 + brelse(group_desc[i]);
2026 + kvfree(group_desc);
2027 + flex_groups = rcu_dereference(sbi->s_flex_groups);
2028 + if (flex_groups) {
2029 + for (i = 0; i < sbi->s_flex_groups_allocated; i++)
2030 + kvfree(flex_groups[i]);
2031 + kvfree(flex_groups);
2032 + }
2033 + rcu_read_unlock();
2034 percpu_counter_destroy(&sbi->s_freeclusters_counter);
2035 percpu_counter_destroy(&sbi->s_freeinodes_counter);
2036 percpu_counter_destroy(&sbi->s_dirs_counter);
2037 @@ -2109,8 +2119,8 @@ done:
2038 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2039 {
2040 struct ext4_sb_info *sbi = EXT4_SB(sb);
2041 - struct flex_groups *new_groups;
2042 - int size;
2043 + struct flex_groups **old_groups, **new_groups;
2044 + int size, i, j;
2045
2046 if (!sbi->s_log_groups_per_flex)
2047 return 0;
2048 @@ -2119,22 +2129,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2049 if (size <= sbi->s_flex_groups_allocated)
2050 return 0;
2051
2052 - size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2053 - new_groups = ext4_kvzalloc(size, GFP_KERNEL);
2054 + new_groups = ext4_kvzalloc(roundup_pow_of_two(size *
2055 + sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
2056 if (!new_groups) {
2057 - ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
2058 - size / (int) sizeof(struct flex_groups));
2059 + ext4_msg(sb, KERN_ERR,
2060 + "not enough memory for %d flex group pointers", size);
2061 return -ENOMEM;
2062 }
2063 -
2064 - if (sbi->s_flex_groups) {
2065 - memcpy(new_groups, sbi->s_flex_groups,
2066 - (sbi->s_flex_groups_allocated *
2067 - sizeof(struct flex_groups)));
2068 - kvfree(sbi->s_flex_groups);
2069 + for (i = sbi->s_flex_groups_allocated; i < size; i++) {
2070 + new_groups[i] = ext4_kvzalloc(roundup_pow_of_two(
2071 + sizeof(struct flex_groups)),
2072 + GFP_KERNEL);
2073 + if (!new_groups[i]) {
2074 + for (j = sbi->s_flex_groups_allocated; j < i; j++)
2075 + kvfree(new_groups[j]);
2076 + kvfree(new_groups);
2077 + ext4_msg(sb, KERN_ERR,
2078 + "not enough memory for %d flex groups", size);
2079 + return -ENOMEM;
2080 + }
2081 }
2082 - sbi->s_flex_groups = new_groups;
2083 - sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
2084 + rcu_read_lock();
2085 + old_groups = rcu_dereference(sbi->s_flex_groups);
2086 + if (old_groups)
2087 + memcpy(new_groups, old_groups,
2088 + (sbi->s_flex_groups_allocated *
2089 + sizeof(struct flex_groups *)));
2090 + rcu_read_unlock();
2091 + rcu_assign_pointer(sbi->s_flex_groups, new_groups);
2092 + sbi->s_flex_groups_allocated = size;
2093 + if (old_groups)
2094 + ext4_kvfree_array_rcu(old_groups);
2095 return 0;
2096 }
2097
2098 @@ -2142,6 +2167,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
2099 {
2100 struct ext4_sb_info *sbi = EXT4_SB(sb);
2101 struct ext4_group_desc *gdp = NULL;
2102 + struct flex_groups *fg;
2103 ext4_group_t flex_group;
2104 int i, err;
2105
2106 @@ -2159,12 +2185,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
2107 gdp = ext4_get_group_desc(sb, i, NULL);
2108
2109 flex_group = ext4_flex_group(sbi, i);
2110 - atomic_add(ext4_free_inodes_count(sb, gdp),
2111 - &sbi->s_flex_groups[flex_group].free_inodes);
2112 + fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
2113 + atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
2114 atomic64_add(ext4_free_group_clusters(sb, gdp),
2115 - &sbi->s_flex_groups[flex_group].free_clusters);
2116 - atomic_add(ext4_used_dirs_count(sb, gdp),
2117 - &sbi->s_flex_groups[flex_group].used_dirs);
2118 + &fg->free_clusters);
2119 + atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
2120 }
2121
2122 return 1;
2123 @@ -3403,9 +3428,10 @@ static void ext4_set_resv_clusters(struct super_block *sb)
2124 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2125 {
2126 char *orig_data = kstrdup(data, GFP_KERNEL);
2127 - struct buffer_head *bh;
2128 + struct buffer_head *bh, **group_desc;
2129 struct ext4_super_block *es = NULL;
2130 struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2131 + struct flex_groups **flex_groups;
2132 ext4_fsblk_t block;
2133 ext4_fsblk_t sb_block = get_sb_block(&data);
2134 ext4_fsblk_t logical_sb_block;
2135 @@ -3955,9 +3981,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2136 goto failed_mount;
2137 }
2138 }
2139 - sbi->s_group_desc = ext4_kvmalloc(db_count *
2140 + rcu_assign_pointer(sbi->s_group_desc,
2141 + ext4_kvmalloc(db_count *
2142 sizeof(struct buffer_head *),
2143 - GFP_KERNEL);
2144 + GFP_KERNEL));
2145 if (sbi->s_group_desc == NULL) {
2146 ext4_msg(sb, KERN_ERR, "not enough memory");
2147 ret = -ENOMEM;
2148 @@ -3967,14 +3994,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2149 bgl_lock_init(sbi->s_blockgroup_lock);
2150
2151 for (i = 0; i < db_count; i++) {
2152 + struct buffer_head *bh;
2153 +
2154 block = descriptor_loc(sb, logical_sb_block, i);
2155 - sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
2156 - if (!sbi->s_group_desc[i]) {
2157 + bh = sb_bread_unmovable(sb, block);
2158 + if (!bh) {
2159 ext4_msg(sb, KERN_ERR,
2160 "can't read group descriptor %d", i);
2161 db_count = i;
2162 goto failed_mount2;
2163 }
2164 + rcu_read_lock();
2165 + rcu_dereference(sbi->s_group_desc)[i] = bh;
2166 + rcu_read_unlock();
2167 }
2168 sbi->s_gdb_count = db_count;
2169 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
2170 @@ -4316,8 +4348,14 @@ failed_mount7:
2171 ext4_unregister_li_request(sb);
2172 failed_mount6:
2173 ext4_mb_release(sb);
2174 - if (sbi->s_flex_groups)
2175 - kvfree(sbi->s_flex_groups);
2176 + rcu_read_lock();
2177 + flex_groups = rcu_dereference(sbi->s_flex_groups);
2178 + if (flex_groups) {
2179 + for (i = 0; i < sbi->s_flex_groups_allocated; i++)
2180 + kvfree(flex_groups[i]);
2181 + kvfree(flex_groups);
2182 + }
2183 + rcu_read_unlock();
2184 percpu_counter_destroy(&sbi->s_freeclusters_counter);
2185 percpu_counter_destroy(&sbi->s_freeinodes_counter);
2186 percpu_counter_destroy(&sbi->s_dirs_counter);
2187 @@ -4349,9 +4387,12 @@ failed_mount3:
2188 if (sbi->s_mmp_tsk)
2189 kthread_stop(sbi->s_mmp_tsk);
2190 failed_mount2:
2191 + rcu_read_lock();
2192 + group_desc = rcu_dereference(sbi->s_group_desc);
2193 for (i = 0; i < db_count; i++)
2194 - brelse(sbi->s_group_desc[i]);
2195 - kvfree(sbi->s_group_desc);
2196 + brelse(group_desc[i]);
2197 + kvfree(group_desc);
2198 + rcu_read_unlock();
2199 failed_mount:
2200 if (sbi->s_chksum_driver)
2201 crypto_free_shash(sbi->s_chksum_driver);
2202 diff --git a/fs/fat/inode.c b/fs/fat/inode.c
2203 index 88720011a6eb..f0387d040331 100644
2204 --- a/fs/fat/inode.c
2205 +++ b/fs/fat/inode.c
2206 @@ -736,6 +736,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
2207 return NULL;
2208
2209 init_rwsem(&ei->truncate_lock);
2210 + /* Zeroing to allow iput() even if partial initialized inode. */
2211 + ei->mmu_private = 0;
2212 + ei->i_start = 0;
2213 + ei->i_logstart = 0;
2214 + ei->i_attrs = 0;
2215 + ei->i_pos = 0;
2216 +
2217 return &ei->vfs_inode;
2218 }
2219
2220 @@ -1366,16 +1373,6 @@ out:
2221 return 0;
2222 }
2223
2224 -static void fat_dummy_inode_init(struct inode *inode)
2225 -{
2226 - /* Initialize this dummy inode to work as no-op. */
2227 - MSDOS_I(inode)->mmu_private = 0;
2228 - MSDOS_I(inode)->i_start = 0;
2229 - MSDOS_I(inode)->i_logstart = 0;
2230 - MSDOS_I(inode)->i_attrs = 0;
2231 - MSDOS_I(inode)->i_pos = 0;
2232 -}
2233 -
2234 static int fat_read_root(struct inode *inode)
2235 {
2236 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
2237 @@ -1820,13 +1817,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
2238 fat_inode = new_inode(sb);
2239 if (!fat_inode)
2240 goto out_fail;
2241 - fat_dummy_inode_init(fat_inode);
2242 sbi->fat_inode = fat_inode;
2243
2244 fsinfo_inode = new_inode(sb);
2245 if (!fsinfo_inode)
2246 goto out_fail;
2247 - fat_dummy_inode_init(fsinfo_inode);
2248 fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
2249 sbi->fsinfo_inode = fsinfo_inode;
2250 insert_inode_hash(fsinfo_inode);
2251 diff --git a/fs/namei.c b/fs/namei.c
2252 index 757a50ecf0f4..0953281430b1 100644
2253 --- a/fs/namei.c
2254 +++ b/fs/namei.c
2255 @@ -1370,7 +1370,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
2256 nd->path.dentry = parent;
2257 nd->seq = seq;
2258 if (unlikely(!path_connected(&nd->path)))
2259 - return -ENOENT;
2260 + return -ECHILD;
2261 break;
2262 } else {
2263 struct mount *mnt = real_mount(nd->path.mnt);
2264 diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
2265 index 1d798abae710..f502d257d494 100644
2266 --- a/include/acpi/actypes.h
2267 +++ b/include/acpi/actypes.h
2268 @@ -551,6 +551,8 @@ typedef u64 acpi_integer;
2269 #define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
2270 #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
2271
2272 +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
2273 +
2274 /*******************************************************************************
2275 *
2276 * Miscellaneous constants
2277 diff --git a/include/linux/bitops.h b/include/linux/bitops.h
2278 index 76ad8a957ffa..cee74a52b9eb 100644
2279 --- a/include/linux/bitops.h
2280 +++ b/include/linux/bitops.h
2281 @@ -3,7 +3,8 @@
2282 #include <asm/types.h>
2283 #include <linux/bits.h>
2284
2285 -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
2286 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
2287 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
2288
2289 extern unsigned int __sw_hweight8(unsigned int w);
2290 extern unsigned int __sw_hweight16(unsigned int w);
2291 diff --git a/include/linux/hid.h b/include/linux/hid.h
2292 index 04bdf5477ec5..eda06f7ee84a 100644
2293 --- a/include/linux/hid.h
2294 +++ b/include/linux/hid.h
2295 @@ -453,7 +453,7 @@ struct hid_report_enum {
2296 };
2297
2298 #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
2299 -#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
2300 +#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
2301 #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
2302 #define HID_OUTPUT_FIFO_SIZE 64
2303
2304 diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
2305 index 1505cf7a4aaf..7a85a4ef6868 100644
2306 --- a/include/net/flow_dissector.h
2307 +++ b/include/net/flow_dissector.h
2308 @@ -4,6 +4,7 @@
2309 #include <linux/types.h>
2310 #include <linux/in6.h>
2311 #include <linux/siphash.h>
2312 +#include <linux/string.h>
2313 #include <uapi/linux/if_ether.h>
2314
2315 /**
2316 @@ -204,4 +205,12 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
2317 return ((char *)target_container) + flow_dissector->offset[key_id];
2318 }
2319
2320 +static inline void
2321 +flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
2322 + struct flow_dissector_key_basic *key_basic)
2323 +{
2324 + memset(key_control, 0, sizeof(*key_control));
2325 + memset(key_basic, 0, sizeof(*key_basic));
2326 +}
2327 +
2328 #endif
2329 diff --git a/kernel/audit.c b/kernel/audit.c
2330 index 3461a3d874fe..53dcaa3b67bc 100644
2331 --- a/kernel/audit.c
2332 +++ b/kernel/audit.c
2333 @@ -751,13 +751,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
2334 audit_log_end(ab);
2335 }
2336
2337 -static int audit_set_feature(struct sk_buff *skb)
2338 +static int audit_set_feature(struct audit_features *uaf)
2339 {
2340 - struct audit_features *uaf;
2341 int i;
2342
2343 BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
2344 - uaf = nlmsg_data(nlmsg_hdr(skb));
2345
2346 /* if there is ever a version 2 we should handle that here */
2347
2348 @@ -823,6 +821,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2349 {
2350 u32 seq;
2351 void *data;
2352 + int data_len;
2353 int err;
2354 struct audit_buffer *ab;
2355 u16 msg_type = nlh->nlmsg_type;
2356 @@ -846,6 +845,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2357 }
2358 seq = nlh->nlmsg_seq;
2359 data = nlmsg_data(nlh);
2360 + data_len = nlmsg_len(nlh);
2361
2362 switch (msg_type) {
2363 case AUDIT_GET: {
2364 @@ -867,7 +867,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2365 struct audit_status s;
2366 memset(&s, 0, sizeof(s));
2367 /* guard against past and future API changes */
2368 - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
2369 + memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
2370 if (s.mask & AUDIT_STATUS_ENABLED) {
2371 err = audit_set_enabled(s.enabled);
2372 if (err < 0)
2373 @@ -930,7 +930,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2374 return err;
2375 break;
2376 case AUDIT_SET_FEATURE:
2377 - err = audit_set_feature(skb);
2378 + if (data_len < sizeof(struct audit_features))
2379 + return -EINVAL;
2380 + err = audit_set_feature(data);
2381 if (err)
2382 return err;
2383 break;
2384 @@ -942,6 +944,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2385
2386 err = audit_filter(msg_type, AUDIT_FILTER_USER);
2387 if (err == 1) { /* match or error */
2388 + char *str = data;
2389 +
2390 err = 0;
2391 if (msg_type == AUDIT_USER_TTY) {
2392 err = tty_audit_push();
2393 @@ -950,19 +954,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2394 }
2395 mutex_unlock(&audit_cmd_mutex);
2396 audit_log_common_recv_msg(&ab, msg_type);
2397 - if (msg_type != AUDIT_USER_TTY)
2398 + if (msg_type != AUDIT_USER_TTY) {
2399 + /* ensure NULL termination */
2400 + str[data_len - 1] = '\0';
2401 audit_log_format(ab, " msg='%.*s'",
2402 AUDIT_MESSAGE_TEXT_MAX,
2403 - (char *)data);
2404 - else {
2405 - int size;
2406 -
2407 + str);
2408 + } else {
2409 audit_log_format(ab, " data=");
2410 - size = nlmsg_len(nlh);
2411 - if (size > 0 &&
2412 - ((unsigned char *)data)[size - 1] == '\0')
2413 - size--;
2414 - audit_log_n_untrustedstring(ab, data, size);
2415 + if (data_len > 0 && str[data_len - 1] == '\0')
2416 + data_len--;
2417 + audit_log_n_untrustedstring(ab, str, data_len);
2418 }
2419 audit_set_portid(ab, NETLINK_CB(skb).portid);
2420 audit_log_end(ab);
2421 @@ -971,7 +973,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2422 break;
2423 case AUDIT_ADD_RULE:
2424 case AUDIT_DEL_RULE:
2425 - if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
2426 + if (data_len < sizeof(struct audit_rule_data))
2427 return -EINVAL;
2428 if (audit_enabled == AUDIT_LOCKED) {
2429 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
2430 @@ -980,7 +982,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2431 return -EPERM;
2432 }
2433 err = audit_rule_change(msg_type, NETLINK_CB(skb).portid,
2434 - seq, data, nlmsg_len(nlh));
2435 + seq, data, data_len);
2436 break;
2437 case AUDIT_LIST_RULES:
2438 err = audit_list_rules_send(skb, seq);
2439 @@ -994,7 +996,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2440 case AUDIT_MAKE_EQUIV: {
2441 void *bufp = data;
2442 u32 sizes[2];
2443 - size_t msglen = nlmsg_len(nlh);
2444 + size_t msglen = data_len;
2445 char *old, *new;
2446
2447 err = -EINVAL;
2448 @@ -1070,7 +1072,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2449
2450 memset(&s, 0, sizeof(s));
2451 /* guard against past and future API changes */
2452 - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
2453 + memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
2454 /* check if new data is valid */
2455 if ((s.enabled != 0 && s.enabled != 1) ||
2456 (s.log_passwd != 0 && s.log_passwd != 1))
2457 diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
2458 index 42b7251c597f..a71ff9965cba 100644
2459 --- a/kernel/auditfilter.c
2460 +++ b/kernel/auditfilter.c
2461 @@ -434,6 +434,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2462 bufp = data->buf;
2463 for (i = 0; i < data->field_count; i++) {
2464 struct audit_field *f = &entry->rule.fields[i];
2465 + u32 f_val;
2466
2467 err = -EINVAL;
2468
2469 @@ -442,12 +443,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2470 goto exit_free;
2471
2472 f->type = data->fields[i];
2473 - f->val = data->values[i];
2474 + f_val = data->values[i];
2475
2476 /* Support legacy tests for a valid loginuid */
2477 - if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
2478 + if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
2479 f->type = AUDIT_LOGINUID_SET;
2480 - f->val = 0;
2481 + f_val = 0;
2482 entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
2483 }
2484
2485 @@ -463,7 +464,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2486 case AUDIT_SUID:
2487 case AUDIT_FSUID:
2488 case AUDIT_OBJ_UID:
2489 - f->uid = make_kuid(current_user_ns(), f->val);
2490 + f->uid = make_kuid(current_user_ns(), f_val);
2491 if (!uid_valid(f->uid))
2492 goto exit_free;
2493 break;
2494 @@ -472,11 +473,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2495 case AUDIT_SGID:
2496 case AUDIT_FSGID:
2497 case AUDIT_OBJ_GID:
2498 - f->gid = make_kgid(current_user_ns(), f->val);
2499 + f->gid = make_kgid(current_user_ns(), f_val);
2500 if (!gid_valid(f->gid))
2501 goto exit_free;
2502 break;
2503 case AUDIT_ARCH:
2504 + f->val = f_val;
2505 entry->rule.arch_f = f;
2506 break;
2507 case AUDIT_SUBJ_USER:
2508 @@ -489,11 +491,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2509 case AUDIT_OBJ_TYPE:
2510 case AUDIT_OBJ_LEV_LOW:
2511 case AUDIT_OBJ_LEV_HIGH:
2512 - str = audit_unpack_string(&bufp, &remain, f->val);
2513 - if (IS_ERR(str))
2514 + str = audit_unpack_string(&bufp, &remain, f_val);
2515 + if (IS_ERR(str)) {
2516 + err = PTR_ERR(str);
2517 goto exit_free;
2518 - entry->rule.buflen += f->val;
2519 -
2520 + }
2521 + entry->rule.buflen += f_val;
2522 + f->lsm_str = str;
2523 err = security_audit_rule_init(f->type, f->op, str,
2524 (void **)&f->lsm_rule);
2525 /* Keep currently invalid fields around in case they
2526 @@ -502,68 +506,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2527 pr_warn("audit rule for LSM \'%s\' is invalid\n",
2528 str);
2529 err = 0;
2530 - }
2531 - if (err) {
2532 - kfree(str);
2533 + } else if (err)
2534 goto exit_free;
2535 - } else
2536 - f->lsm_str = str;
2537 break;
2538 case AUDIT_WATCH:
2539 - str = audit_unpack_string(&bufp, &remain, f->val);
2540 - if (IS_ERR(str))
2541 + str = audit_unpack_string(&bufp, &remain, f_val);
2542 + if (IS_ERR(str)) {
2543 + err = PTR_ERR(str);
2544 goto exit_free;
2545 - entry->rule.buflen += f->val;
2546 -
2547 - err = audit_to_watch(&entry->rule, str, f->val, f->op);
2548 + }
2549 + err = audit_to_watch(&entry->rule, str, f_val, f->op);
2550 if (err) {
2551 kfree(str);
2552 goto exit_free;
2553 }
2554 + entry->rule.buflen += f_val;
2555 break;
2556 case AUDIT_DIR:
2557 - str = audit_unpack_string(&bufp, &remain, f->val);
2558 - if (IS_ERR(str))
2559 + str = audit_unpack_string(&bufp, &remain, f_val);
2560 + if (IS_ERR(str)) {
2561 + err = PTR_ERR(str);
2562 goto exit_free;
2563 - entry->rule.buflen += f->val;
2564 -
2565 + }
2566 err = audit_make_tree(&entry->rule, str, f->op);
2567 kfree(str);
2568 if (err)
2569 goto exit_free;
2570 + entry->rule.buflen += f_val;
2571 break;
2572 case AUDIT_INODE:
2573 + f->val = f_val;
2574 err = audit_to_inode(&entry->rule, f);
2575 if (err)
2576 goto exit_free;
2577 break;
2578 case AUDIT_FILTERKEY:
2579 - if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
2580 + if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
2581 goto exit_free;
2582 - str = audit_unpack_string(&bufp, &remain, f->val);
2583 - if (IS_ERR(str))
2584 + str = audit_unpack_string(&bufp, &remain, f_val);
2585 + if (IS_ERR(str)) {
2586 + err = PTR_ERR(str);
2587 goto exit_free;
2588 - entry->rule.buflen += f->val;
2589 + }
2590 + entry->rule.buflen += f_val;
2591 entry->rule.filterkey = str;
2592 break;
2593 case AUDIT_EXE:
2594 - if (entry->rule.exe || f->val > PATH_MAX)
2595 + if (entry->rule.exe || f_val > PATH_MAX)
2596 goto exit_free;
2597 - str = audit_unpack_string(&bufp, &remain, f->val);
2598 + str = audit_unpack_string(&bufp, &remain, f_val);
2599 if (IS_ERR(str)) {
2600 err = PTR_ERR(str);
2601 goto exit_free;
2602 }
2603 - entry->rule.buflen += f->val;
2604 -
2605 - audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
2606 + audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
2607 if (IS_ERR(audit_mark)) {
2608 kfree(str);
2609 err = PTR_ERR(audit_mark);
2610 goto exit_free;
2611 }
2612 + entry->rule.buflen += f_val;
2613 entry->rule.exe = audit_mark;
2614 break;
2615 + default:
2616 + f->val = f_val;
2617 + break;
2618 }
2619 }
2620
2621 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2622 index f32f73fa5d3a..5fbd77d52602 100644
2623 --- a/mm/huge_memory.c
2624 +++ b/mm/huge_memory.c
2625 @@ -2095,7 +2095,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2626 unsigned long flags;
2627 pgoff_t end;
2628
2629 - VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2630 + VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
2631 VM_BUG_ON_PAGE(!PageLocked(page), page);
2632 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2633 VM_BUG_ON_PAGE(!PageCompound(page), page);
2634 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
2635 index be4629c344a6..9f172906cc88 100644
2636 --- a/net/core/fib_rules.c
2637 +++ b/net/core/fib_rules.c
2638 @@ -640,7 +640,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
2639
2640 frh = nlmsg_data(nlh);
2641 frh->family = ops->family;
2642 - frh->table = rule->table;
2643 + frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
2644 if (nla_put_u32(skb, FRA_TABLE, rule->table))
2645 goto nla_put_failure;
2646 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
2647 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2648 index 5da864997495..85c7e250c7a8 100644
2649 --- a/net/ipv6/ip6_fib.c
2650 +++ b/net/ipv6/ip6_fib.c
2651 @@ -784,8 +784,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2652 found++;
2653 break;
2654 }
2655 - if (rt_can_ecmp)
2656 - fallback_ins = fallback_ins ?: ins;
2657 + fallback_ins = fallback_ins ?: ins;
2658 goto next_iter;
2659 }
2660
2661 @@ -825,7 +824,9 @@ next_iter:
2662 }
2663
2664 if (fallback_ins && !found) {
2665 - /* No ECMP-able route found, replace first non-ECMP one */
2666 + /* No matching route with same ecmp-able-ness found, replace
2667 + * first matching route
2668 + */
2669 ins = fallback_ins;
2670 iter = *ins;
2671 found++;
2672 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2673 index 27c93baed708..2c4743f2d50e 100644
2674 --- a/net/ipv6/route.c
2675 +++ b/net/ipv6/route.c
2676 @@ -3069,6 +3069,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
2677 */
2678 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2679 NLM_F_REPLACE);
2680 + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
2681 nhn++;
2682 }
2683
2684 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
2685 index ca7de02e0a6e..52f9742c438a 100644
2686 --- a/net/mac80211/util.c
2687 +++ b/net/mac80211/util.c
2688 @@ -943,16 +943,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
2689 elem_parse_failed = true;
2690 break;
2691 case WLAN_EID_VHT_OPERATION:
2692 - if (elen >= sizeof(struct ieee80211_vht_operation))
2693 + if (elen >= sizeof(struct ieee80211_vht_operation)) {
2694 elems->vht_operation = (void *)pos;
2695 - else
2696 - elem_parse_failed = true;
2697 + if (calc_crc)
2698 + crc = crc32_be(crc, pos - 2, elen + 2);
2699 + break;
2700 + }
2701 + elem_parse_failed = true;
2702 break;
2703 case WLAN_EID_OPMODE_NOTIF:
2704 - if (elen > 0)
2705 + if (elen > 0) {
2706 elems->opmode_notif = pos;
2707 - else
2708 - elem_parse_failed = true;
2709 + if (calc_crc)
2710 + crc = crc32_be(crc, pos - 2, elen + 2);
2711 + break;
2712 + }
2713 + elem_parse_failed = true;
2714 break;
2715 case WLAN_EID_MESH_ID:
2716 elems->mesh_id = pos;
2717 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2718 index 025487436438..205865292ba3 100644
2719 --- a/net/netlink/af_netlink.c
2720 +++ b/net/netlink/af_netlink.c
2721 @@ -1003,7 +1003,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2722 if (nlk->netlink_bind && groups) {
2723 int group;
2724
2725 - for (group = 0; group < nlk->ngroups; group++) {
2726 + /* nl_groups is a u32, so cap the maximum groups we can bind */
2727 + for (group = 0; group < BITS_PER_TYPE(u32); group++) {
2728 if (!test_bit(group, &groups))
2729 continue;
2730 err = nlk->netlink_bind(net, group + 1);
2731 @@ -1022,7 +1023,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2732 netlink_insert(sk, nladdr->nl_pid) :
2733 netlink_autobind(sock);
2734 if (err) {
2735 - netlink_undo_bind(nlk->ngroups, groups, sk);
2736 + netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
2737 return err;
2738 }
2739 }
2740 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2741 index de03b7b49e05..18904313bd4e 100644
2742 --- a/net/sched/cls_flower.c
2743 +++ b/net/sched/cls_flower.c
2744 @@ -141,6 +141,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2745 if (!atomic_read(&head->ht.nelems))
2746 return -1;
2747
2748 + flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
2749 fl_clear_masked_range(&skb_key, &head->mask);
2750
2751 info = skb_tunnel_info(skb);
2752 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2753 index bfd068679710..1a3c75347f48 100644
2754 --- a/net/sctp/sm_statefuns.c
2755 +++ b/net/sctp/sm_statefuns.c
2756 @@ -177,6 +177,16 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
2757 return 1;
2758 }
2759
2760 +/* Check for format error in an ABORT chunk */
2761 +static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
2762 +{
2763 + struct sctp_errhdr *err;
2764 +
2765 + sctp_walk_errors(err, chunk->chunk_hdr);
2766 +
2767 + return (void *)err == (void *)chunk->chunk_end;
2768 +}
2769 +
2770 /**********************************************************
2771 * These are the state functions for handling chunk events.
2772 **********************************************************/
2773 @@ -2159,6 +2169,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2774 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2775 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2776
2777 + if (!sctp_err_chunk_valid(chunk))
2778 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2779 +
2780 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2781 }
2782
2783 @@ -2201,6 +2214,9 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
2784 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2785 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2786
2787 + if (!sctp_err_chunk_valid(chunk))
2788 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2789 +
2790 /* Stop the T2-shutdown timer. */
2791 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2792 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
2793 @@ -2466,6 +2482,9 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
2794 sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2795 return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2796
2797 + if (!sctp_err_chunk_valid(chunk))
2798 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2799 +
2800 return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2801 }
2802
2803 @@ -2482,15 +2501,9 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
2804
2805 /* See if we have an error cause code in the chunk. */
2806 len = ntohs(chunk->chunk_hdr->length);
2807 - if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2808 -
2809 - sctp_errhdr_t *err;
2810 - sctp_walk_errors(err, chunk->chunk_hdr);
2811 - if ((void *)err != (void *)chunk->chunk_end)
2812 - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2813
2814 + if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2815 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2816 - }
2817
2818 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2819 /* ASSOC_FAILED will DELETE_TCB. */
2820 diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
2821 index e9e91298c70d..3cedf2c2b60b 100644
2822 --- a/net/wireless/ethtool.c
2823 +++ b/net/wireless/ethtool.c
2824 @@ -6,9 +6,13 @@
2825 void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2826 {
2827 struct wireless_dev *wdev = dev->ieee80211_ptr;
2828 + struct device *pdev = wiphy_dev(wdev->wiphy);
2829
2830 - strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
2831 - sizeof(info->driver));
2832 + if (pdev->driver)
2833 + strlcpy(info->driver, pdev->driver->name,
2834 + sizeof(info->driver));
2835 + else
2836 + strlcpy(info->driver, "N/A", sizeof(info->driver));
2837
2838 strlcpy(info->version, init_utsname()->release, sizeof(info->version));
2839
2840 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2841 index bb19be78aed7..9823bef65e5e 100644
2842 --- a/net/wireless/nl80211.c
2843 +++ b/net/wireless/nl80211.c
2844 @@ -333,6 +333,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
2845 [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
2846 [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
2847 [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
2848 + [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
2849 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
2850 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
2851 [NL80211_ATTR_PID] = { .type = NLA_U32 },
2852 diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
2853 index 72b19e62f626..c0807b82399a 100644
2854 --- a/sound/soc/codecs/pcm512x.c
2855 +++ b/sound/soc/codecs/pcm512x.c
2856 @@ -1441,13 +1441,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
2857 }
2858
2859 pcm512x->sclk = devm_clk_get(dev, NULL);
2860 - if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
2861 - return -EPROBE_DEFER;
2862 + if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
2863 + ret = -EPROBE_DEFER;
2864 + goto err;
2865 + }
2866 if (!IS_ERR(pcm512x->sclk)) {
2867 ret = clk_prepare_enable(pcm512x->sclk);
2868 if (ret != 0) {
2869 dev_err(dev, "Failed to enable SCLK: %d\n", ret);
2870 - return ret;
2871 + goto err;
2872 }
2873 }
2874
2875 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2876 index 08bfc91c686f..a04672411bef 100644
2877 --- a/sound/soc/soc-dapm.c
2878 +++ b/sound/soc/soc-dapm.c
2879 @@ -4363,7 +4363,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
2880 continue;
2881 if (w->power) {
2882 dapm_seq_insert(w, &down_list, false);
2883 - w->power = 0;
2884 + w->new_power = 0;
2885 powerdown = 1;
2886 }
2887 }
2888 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
2889 index 280bb5cab87f..9df0c8102dc0 100644
2890 --- a/sound/soc/soc-pcm.c
2891 +++ b/sound/soc/soc-pcm.c
2892 @@ -2979,16 +2979,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
2893 ssize_t offset = 0;
2894
2895 /* FE state */
2896 - offset += snprintf(buf + offset, size - offset,
2897 + offset += scnprintf(buf + offset, size - offset,
2898 "[%s - %s]\n", fe->dai_link->name,
2899 stream ? "Capture" : "Playback");
2900
2901 - offset += snprintf(buf + offset, size - offset, "State: %s\n",
2902 + offset += scnprintf(buf + offset, size - offset, "State: %s\n",
2903 dpcm_state_string(fe->dpcm[stream].state));
2904
2905 if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
2906 (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
2907 - offset += snprintf(buf + offset, size - offset,
2908 + offset += scnprintf(buf + offset, size - offset,
2909 "Hardware Params: "
2910 "Format = %s, Channels = %d, Rate = %d\n",
2911 snd_pcm_format_name(params_format(params)),
2912 @@ -2996,10 +2996,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
2913 params_rate(params));
2914
2915 /* BEs state */
2916 - offset += snprintf(buf + offset, size - offset, "Backends:\n");
2917 + offset += scnprintf(buf + offset, size - offset, "Backends:\n");
2918
2919 if (list_empty(&fe->dpcm[stream].be_clients)) {
2920 - offset += snprintf(buf + offset, size - offset,
2921 + offset += scnprintf(buf + offset, size - offset,
2922 " No active DSP links\n");
2923 goto out;
2924 }
2925 @@ -3008,16 +3008,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
2926 struct snd_soc_pcm_runtime *be = dpcm->be;
2927 params = &dpcm->hw_params;
2928
2929 - offset += snprintf(buf + offset, size - offset,
2930 + offset += scnprintf(buf + offset, size - offset,
2931 "- %s\n", be->dai_link->name);
2932
2933 - offset += snprintf(buf + offset, size - offset,
2934 + offset += scnprintf(buf + offset, size - offset,
2935 " State: %s\n",
2936 dpcm_state_string(be->dpcm[stream].state));
2937
2938 if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
2939 (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
2940 - offset += snprintf(buf + offset, size - offset,
2941 + offset += scnprintf(buf + offset, size - offset,
2942 " Hardware Params: "
2943 "Format = %s, Channels = %d, Rate = %d\n",
2944 snd_pcm_format_name(params_format(params)),
2945 diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
2946 index a53fef0c673b..ade6abda9f46 100644
2947 --- a/tools/perf/ui/browsers/hists.c
2948 +++ b/tools/perf/ui/browsers/hists.c
2949 @@ -2930,6 +2930,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2950
2951 continue;
2952 }
2953 + actions->ms.map = map;
2954 top = pstack__peek(browser->pstack);
2955 if (top == &browser->hists->dso_filter) {
2956 /*
2957 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2958 index c0dff5337a50..4e4bb5dd2dcd 100644
2959 --- a/virt/kvm/kvm_main.c
2960 +++ b/virt/kvm/kvm_main.c
2961 @@ -2045,12 +2045,12 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2962 if (slots->generation != ghc->generation)
2963 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
2964
2965 - if (unlikely(!ghc->memslot))
2966 - return kvm_write_guest(kvm, ghc->gpa, data, len);
2967 -
2968 if (kvm_is_error_hva(ghc->hva))
2969 return -EFAULT;
2970
2971 + if (unlikely(!ghc->memslot))
2972 + return kvm_write_guest(kvm, ghc->gpa, data, len);
2973 +
2974 r = __copy_to_user((void __user *)ghc->hva, data, len);
2975 if (r)
2976 return -EFAULT;
2977 @@ -2071,12 +2071,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2978 if (slots->generation != ghc->generation)
2979 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
2980
2981 - if (unlikely(!ghc->memslot))
2982 - return kvm_read_guest(kvm, ghc->gpa, data, len);
2983 -
2984 if (kvm_is_error_hva(ghc->hva))
2985 return -EFAULT;
2986
2987 + if (unlikely(!ghc->memslot))
2988 + return kvm_read_guest(kvm, ghc->gpa, data, len);
2989 +
2990 r = __copy_from_user(data, (void __user *)ghc->hva, len);
2991 if (r)
2992 return -EFAULT;