Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0174-4.14.75-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 118185 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index cc0e65a8d7bf..7fc373c011c0 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 74
10 +SUBLEVEL = 75
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
15 index 11859287c52a..c98b59ac0612 100644
16 --- a/arch/arc/include/asm/atomic.h
17 +++ b/arch/arc/include/asm/atomic.h
18 @@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
19 "1: llock %[orig], [%[ctr]] \n" \
20 " " #asm_op " %[val], %[orig], %[i] \n" \
21 " scond %[val], [%[ctr]] \n" \
22 - " \n" \
23 + " bnz 1b \n" \
24 : [val] "=&r" (val), \
25 [orig] "=&r" (orig) \
26 : [ctr] "r" (&v->counter), \
27 diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
28 index 1b5e0e843c3a..7e2b3e360086 100644
29 --- a/arch/arm64/include/asm/jump_label.h
30 +++ b/arch/arm64/include/asm/jump_label.h
31 @@ -28,7 +28,7 @@
32
33 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
34 {
35 - asm goto("1: nop\n\t"
36 + asm_volatile_goto("1: nop\n\t"
37 ".pushsection __jump_table, \"aw\"\n\t"
38 ".align 3\n\t"
39 ".quad 1b, %l[l_yes], %c0\n\t"
40 @@ -42,7 +42,7 @@ l_yes:
41
42 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
43 {
44 - asm goto("1: b %l[l_yes]\n\t"
45 + asm_volatile_goto("1: b %l[l_yes]\n\t"
46 ".pushsection __jump_table, \"aw\"\n\t"
47 ".align 3\n\t"
48 ".quad 1b, %l[l_yes], %c0\n\t"
49 diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
50 index 5e4a59b3ec1b..2691a1857d20 100644
51 --- a/arch/hexagon/include/asm/bitops.h
52 +++ b/arch/hexagon/include/asm/bitops.h
53 @@ -211,7 +211,7 @@ static inline long ffz(int x)
54 * This is defined the same way as ffs.
55 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
56 */
57 -static inline long fls(int x)
58 +static inline int fls(int x)
59 {
60 int r;
61
62 @@ -232,7 +232,7 @@ static inline long fls(int x)
63 * the libc and compiler builtin ffs routines, therefore
64 * differs in spirit from the above ffz (man ffs).
65 */
66 -static inline long ffs(int x)
67 +static inline int ffs(int x)
68 {
69 int r;
70
71 diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
72 index 546792d176a4..564651bded42 100644
73 --- a/arch/hexagon/kernel/dma.c
74 +++ b/arch/hexagon/kernel/dma.c
75 @@ -59,7 +59,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
76 panic("Can't create %s() memory pool!", __func__);
77 else
78 gen_pool_add(coherent_pool,
79 - pfn_to_virt(max_low_pfn),
80 + (unsigned long)pfn_to_virt(max_low_pfn),
81 hexagon_coherent_pool_size, -1);
82 }
83
84 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
85 index df9b53f40b1e..7ac7e21b137e 100644
86 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
87 +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
88 @@ -355,7 +355,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
89 unsigned long pp, key;
90 unsigned long v, orig_v, gr;
91 __be64 *hptep;
92 - int index;
93 + long int index;
94 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
95
96 /* Get SLB entry */
97 diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
98 index a4170048a30b..17fbd07e4245 100644
99 --- a/arch/x86/events/intel/lbr.c
100 +++ b/arch/x86/events/intel/lbr.c
101 @@ -1250,4 +1250,8 @@ void intel_pmu_lbr_init_knl(void)
102
103 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
104 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
105 +
106 + /* Knights Landing does have MISPREDICT bit */
107 + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
108 + x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
109 }
110 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
111 index a8a2a271b63d..43fe195f6dca 100644
112 --- a/drivers/crypto/caam/caamalg.c
113 +++ b/drivers/crypto/caam/caamalg.c
114 @@ -1511,8 +1511,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
115 edesc->src_nents = src_nents;
116 edesc->dst_nents = dst_nents;
117 edesc->sec4_sg_bytes = sec4_sg_bytes;
118 - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
119 - desc_bytes;
120 + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
121 + desc_bytes);
122 edesc->iv_dir = DMA_TO_DEVICE;
123
124 /* Make sure IV is located in a DMAable area */
125 @@ -1715,8 +1715,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
126 edesc->src_nents = src_nents;
127 edesc->dst_nents = dst_nents;
128 edesc->sec4_sg_bytes = sec4_sg_bytes;
129 - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
130 - desc_bytes;
131 + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
132 + desc_bytes);
133 edesc->iv_dir = DMA_FROM_DEVICE;
134
135 /* Make sure IV is located in a DMAable area */
136 diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
137 index 764be3e6933c..a98a25733a22 100644
138 --- a/drivers/crypto/mxs-dcp.c
139 +++ b/drivers/crypto/mxs-dcp.c
140 @@ -63,7 +63,7 @@ struct dcp {
141 struct dcp_coherent_block *coh;
142
143 struct completion completion[DCP_MAX_CHANS];
144 - struct mutex mutex[DCP_MAX_CHANS];
145 + spinlock_t lock[DCP_MAX_CHANS];
146 struct task_struct *thread[DCP_MAX_CHANS];
147 struct crypto_queue queue[DCP_MAX_CHANS];
148 };
149 @@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
150
151 int ret;
152
153 - do {
154 - __set_current_state(TASK_INTERRUPTIBLE);
155 + while (!kthread_should_stop()) {
156 + set_current_state(TASK_INTERRUPTIBLE);
157
158 - mutex_lock(&sdcp->mutex[chan]);
159 + spin_lock(&sdcp->lock[chan]);
160 backlog = crypto_get_backlog(&sdcp->queue[chan]);
161 arq = crypto_dequeue_request(&sdcp->queue[chan]);
162 - mutex_unlock(&sdcp->mutex[chan]);
163 + spin_unlock(&sdcp->lock[chan]);
164 +
165 + if (!backlog && !arq) {
166 + schedule();
167 + continue;
168 + }
169 +
170 + set_current_state(TASK_RUNNING);
171
172 if (backlog)
173 backlog->complete(backlog, -EINPROGRESS);
174 @@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
175 if (arq) {
176 ret = mxs_dcp_aes_block_crypt(arq);
177 arq->complete(arq, ret);
178 - continue;
179 }
180 -
181 - schedule();
182 - } while (!kthread_should_stop());
183 + }
184
185 return 0;
186 }
187 @@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
188 rctx->ecb = ecb;
189 actx->chan = DCP_CHAN_CRYPTO;
190
191 - mutex_lock(&sdcp->mutex[actx->chan]);
192 + spin_lock(&sdcp->lock[actx->chan]);
193 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
194 - mutex_unlock(&sdcp->mutex[actx->chan]);
195 + spin_unlock(&sdcp->lock[actx->chan]);
196
197 wake_up_process(sdcp->thread[actx->chan]);
198
199 @@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
200 struct ahash_request *req;
201 int ret, fini;
202
203 - do {
204 - __set_current_state(TASK_INTERRUPTIBLE);
205 + while (!kthread_should_stop()) {
206 + set_current_state(TASK_INTERRUPTIBLE);
207
208 - mutex_lock(&sdcp->mutex[chan]);
209 + spin_lock(&sdcp->lock[chan]);
210 backlog = crypto_get_backlog(&sdcp->queue[chan]);
211 arq = crypto_dequeue_request(&sdcp->queue[chan]);
212 - mutex_unlock(&sdcp->mutex[chan]);
213 + spin_unlock(&sdcp->lock[chan]);
214 +
215 + if (!backlog && !arq) {
216 + schedule();
217 + continue;
218 + }
219 +
220 + set_current_state(TASK_RUNNING);
221
222 if (backlog)
223 backlog->complete(backlog, -EINPROGRESS);
224 @@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
225 ret = dcp_sha_req_to_buf(arq);
226 fini = rctx->fini;
227 arq->complete(arq, ret);
228 - if (!fini)
229 - continue;
230 }
231 -
232 - schedule();
233 - } while (!kthread_should_stop());
234 + }
235
236 return 0;
237 }
238 @@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
239 rctx->init = 1;
240 }
241
242 - mutex_lock(&sdcp->mutex[actx->chan]);
243 + spin_lock(&sdcp->lock[actx->chan]);
244 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
245 - mutex_unlock(&sdcp->mutex[actx->chan]);
246 + spin_unlock(&sdcp->lock[actx->chan]);
247
248 wake_up_process(sdcp->thread[actx->chan]);
249 mutex_unlock(&actx->mutex);
250 @@ -983,7 +990,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
251 platform_set_drvdata(pdev, sdcp);
252
253 for (i = 0; i < DCP_MAX_CHANS; i++) {
254 - mutex_init(&sdcp->mutex[i]);
255 + spin_lock_init(&sdcp->lock[i]);
256 init_completion(&sdcp->completion[i]);
257 crypto_init_queue(&sdcp->queue[i], 50);
258 }
259 diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
260 index f172171668ee..7c470ae97f60 100644
261 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
262 +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
263 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
264 struct adf_hw_device_data *hw_data;
265 char name[ADF_DEVICE_NAME_LENGTH];
266 unsigned int i, bar_nr;
267 - int ret, bar_mask;
268 + unsigned long bar_mask;
269 + int ret;
270
271 switch (ent->device) {
272 case ADF_C3XXX_PCI_DEVICE_ID:
273 @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
274 /* Find and map all the device's BARS */
275 i = 0;
276 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
277 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
278 - ADF_PCI_MAX_BARS * 2) {
279 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
280 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
281
282 bar->base_addr = pci_resource_start(pdev, bar_nr);
283 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
284 index 24ec908eb26c..613c7d5644ce 100644
285 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
286 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
287 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
288 struct adf_hw_device_data *hw_data;
289 char name[ADF_DEVICE_NAME_LENGTH];
290 unsigned int i, bar_nr;
291 - int ret, bar_mask;
292 + unsigned long bar_mask;
293 + int ret;
294
295 switch (ent->device) {
296 case ADF_C3XXXIOV_PCI_DEVICE_ID:
297 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
298 /* Find and map all the device's BARS */
299 i = 0;
300 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
301 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
302 - ADF_PCI_MAX_BARS * 2) {
303 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
304 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
305
306 bar->base_addr = pci_resource_start(pdev, bar_nr);
307 diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
308 index 58a984c9c3ec..cb11d85d7bb3 100644
309 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c
310 +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
311 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
312 struct adf_hw_device_data *hw_data;
313 char name[ADF_DEVICE_NAME_LENGTH];
314 unsigned int i, bar_nr;
315 - int ret, bar_mask;
316 + unsigned long bar_mask;
317 + int ret;
318
319 switch (ent->device) {
320 case ADF_C62X_PCI_DEVICE_ID:
321 @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
322 /* Find and map all the device's BARS */
323 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
324 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
325 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
326 - ADF_PCI_MAX_BARS * 2) {
327 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
328 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
329
330 bar->base_addr = pci_resource_start(pdev, bar_nr);
331 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
332 index b9f3e0e4fde9..278452b8ef81 100644
333 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
334 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
335 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
336 struct adf_hw_device_data *hw_data;
337 char name[ADF_DEVICE_NAME_LENGTH];
338 unsigned int i, bar_nr;
339 - int ret, bar_mask;
340 + unsigned long bar_mask;
341 + int ret;
342
343 switch (ent->device) {
344 case ADF_C62XIOV_PCI_DEVICE_ID:
345 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
346 /* Find and map all the device's BARS */
347 i = 0;
348 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
349 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
350 - ADF_PCI_MAX_BARS * 2) {
351 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
352 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
353
354 bar->base_addr = pci_resource_start(pdev, bar_nr);
355 diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
356 index 2ce01f010c74..07b741aed108 100644
357 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
358 +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
359 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
360 struct adf_hw_device_data *hw_data;
361 char name[ADF_DEVICE_NAME_LENGTH];
362 unsigned int i, bar_nr;
363 - int ret, bar_mask;
364 + unsigned long bar_mask;
365 + int ret;
366
367 switch (ent->device) {
368 case ADF_DH895XCC_PCI_DEVICE_ID:
369 @@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
370 /* Find and map all the device's BARS */
371 i = 0;
372 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
373 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
374 - ADF_PCI_MAX_BARS * 2) {
375 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
376 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
377
378 bar->base_addr = pci_resource_start(pdev, bar_nr);
379 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
380 index 26ab17bfc6da..3da0f951cb59 100644
381 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
382 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
383 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
384 struct adf_hw_device_data *hw_data;
385 char name[ADF_DEVICE_NAME_LENGTH];
386 unsigned int i, bar_nr;
387 - int ret, bar_mask;
388 + unsigned long bar_mask;
389 + int ret;
390
391 switch (ent->device) {
392 case ADF_DH895XCCIOV_PCI_DEVICE_ID:
393 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
394 /* Find and map all the device's BARS */
395 i = 0;
396 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
397 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
398 - ADF_PCI_MAX_BARS * 2) {
399 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
400 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
401
402 bar->base_addr = pci_resource_start(pdev, bar_nr);
403 diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
404 index e717f8dc3966..202d367a21e4 100644
405 --- a/drivers/gpio/gpio-adp5588.c
406 +++ b/drivers/gpio/gpio-adp5588.c
407 @@ -41,6 +41,8 @@ struct adp5588_gpio {
408 uint8_t int_en[3];
409 uint8_t irq_mask[3];
410 uint8_t irq_stat[3];
411 + uint8_t int_input_en[3];
412 + uint8_t int_lvl_cached[3];
413 };
414
415 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
416 @@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
417 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
418 int i;
419
420 - for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
421 + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
422 + if (dev->int_input_en[i]) {
423 + mutex_lock(&dev->lock);
424 + dev->dir[i] &= ~dev->int_input_en[i];
425 + dev->int_input_en[i] = 0;
426 + adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
427 + dev->dir[i]);
428 + mutex_unlock(&dev->lock);
429 + }
430 +
431 + if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
432 + dev->int_lvl_cached[i] = dev->int_lvl[i];
433 + adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
434 + dev->int_lvl[i]);
435 + }
436 +
437 if (dev->int_en[i] ^ dev->irq_mask[i]) {
438 dev->int_en[i] = dev->irq_mask[i];
439 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
440 dev->int_en[i]);
441 }
442 + }
443
444 mutex_unlock(&dev->irq_lock);
445 }
446 @@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
447 else
448 return -EINVAL;
449
450 - adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
451 - adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
452 - dev->int_lvl[bank]);
453 + dev->int_input_en[bank] |= bit;
454
455 return 0;
456 }
457 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
458 index 70b3c556f6cf..33d4bd505b5b 100644
459 --- a/drivers/gpio/gpiolib-acpi.c
460 +++ b/drivers/gpio/gpiolib-acpi.c
461 @@ -25,7 +25,6 @@
462
463 struct acpi_gpio_event {
464 struct list_head node;
465 - struct list_head initial_sync_list;
466 acpi_handle handle;
467 unsigned int pin;
468 unsigned int irq;
469 @@ -49,10 +48,19 @@ struct acpi_gpio_chip {
470 struct mutex conn_lock;
471 struct gpio_chip *chip;
472 struct list_head events;
473 + struct list_head deferred_req_irqs_list_entry;
474 };
475
476 -static LIST_HEAD(acpi_gpio_initial_sync_list);
477 -static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
478 +/*
479 + * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
480 + * (so builtin drivers) we register the ACPI GpioInt event handlers from a
481 + * late_initcall_sync handler, so that other builtin drivers can register their
482 + * OpRegions before the event handlers can run. This list contains gpiochips
483 + * for which the acpi_gpiochip_request_interrupts() has been deferred.
484 + */
485 +static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
486 +static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
487 +static bool acpi_gpio_deferred_req_irqs_done;
488
489 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
490 {
491 @@ -146,21 +154,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
492 return gpiochip_get_desc(chip, offset);
493 }
494
495 -static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
496 -{
497 - mutex_lock(&acpi_gpio_initial_sync_list_lock);
498 - list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
499 - mutex_unlock(&acpi_gpio_initial_sync_list_lock);
500 -}
501 -
502 -static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
503 -{
504 - mutex_lock(&acpi_gpio_initial_sync_list_lock);
505 - if (!list_empty(&event->initial_sync_list))
506 - list_del_init(&event->initial_sync_list);
507 - mutex_unlock(&acpi_gpio_initial_sync_list_lock);
508 -}
509 -
510 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
511 {
512 struct acpi_gpio_event *event = data;
513 @@ -247,7 +240,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
514
515 gpiod_direction_input(desc);
516
517 - value = gpiod_get_value(desc);
518 + value = gpiod_get_value_cansleep(desc);
519
520 ret = gpiochip_lock_as_irq(chip, pin);
521 if (ret) {
522 @@ -290,7 +283,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
523 event->irq = irq;
524 event->pin = pin;
525 event->desc = desc;
526 - INIT_LIST_HEAD(&event->initial_sync_list);
527
528 ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
529 "ACPI:Event", event);
530 @@ -312,10 +304,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
531 * may refer to OperationRegions from other (builtin) drivers which
532 * may be probed after us.
533 */
534 - if (handler == acpi_gpio_irq_handler &&
535 - (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
536 - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
537 - acpi_gpio_add_to_initial_sync_list(event);
538 + if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
539 + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
540 + handler(event->irq, event);
541
542 return AE_OK;
543
544 @@ -344,6 +335,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
545 struct acpi_gpio_chip *acpi_gpio;
546 acpi_handle handle;
547 acpi_status status;
548 + bool defer;
549
550 if (!chip->parent || !chip->to_irq)
551 return;
552 @@ -356,6 +348,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
553 if (ACPI_FAILURE(status))
554 return;
555
556 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
557 + defer = !acpi_gpio_deferred_req_irqs_done;
558 + if (defer)
559 + list_add(&acpi_gpio->deferred_req_irqs_list_entry,
560 + &acpi_gpio_deferred_req_irqs_list);
561 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
562 +
563 + if (defer)
564 + return;
565 +
566 acpi_walk_resources(handle, "_AEI",
567 acpi_gpiochip_request_interrupt, acpi_gpio);
568 }
569 @@ -386,11 +388,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
570 if (ACPI_FAILURE(status))
571 return;
572
573 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
574 + if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
575 + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
576 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
577 +
578 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
579 struct gpio_desc *desc;
580
581 - acpi_gpio_del_from_initial_sync_list(event);
582 -
583 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
584 disable_irq_wake(event->irq);
585
586 @@ -1101,6 +1106,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
587
588 acpi_gpio->chip = chip;
589 INIT_LIST_HEAD(&acpi_gpio->events);
590 + INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
591
592 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
593 if (ACPI_FAILURE(status)) {
594 @@ -1247,20 +1253,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
595 return con_id == NULL;
596 }
597
598 -/* Sync the initial state of handlers after all builtin drivers have probed */
599 -static int acpi_gpio_initial_sync(void)
600 +/* Run deferred acpi_gpiochip_request_interrupts() */
601 +static int acpi_gpio_handle_deferred_request_interrupts(void)
602 {
603 - struct acpi_gpio_event *event, *ep;
604 + struct acpi_gpio_chip *acpi_gpio, *tmp;
605 +
606 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
607 + list_for_each_entry_safe(acpi_gpio, tmp,
608 + &acpi_gpio_deferred_req_irqs_list,
609 + deferred_req_irqs_list_entry) {
610 + acpi_handle handle;
611
612 - mutex_lock(&acpi_gpio_initial_sync_list_lock);
613 - list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
614 - initial_sync_list) {
615 - acpi_evaluate_object(event->handle, NULL, NULL, NULL);
616 - list_del_init(&event->initial_sync_list);
617 + handle = ACPI_HANDLE(acpi_gpio->chip->parent);
618 + acpi_walk_resources(handle, "_AEI",
619 + acpi_gpiochip_request_interrupt, acpi_gpio);
620 +
621 + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
622 }
623 - mutex_unlock(&acpi_gpio_initial_sync_list_lock);
624 +
625 + acpi_gpio_deferred_req_irqs_done = true;
626 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
627
628 return 0;
629 }
630 /* We must use _sync so that this runs after the first deferred_probe run */
631 -late_initcall_sync(acpi_gpio_initial_sync);
632 +late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
633 diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
634 index ba38f530e403..ee8c046cab62 100644
635 --- a/drivers/gpio/gpiolib-of.c
636 +++ b/drivers/gpio/gpiolib-of.c
637 @@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
638 struct of_phandle_args *gpiospec = data;
639
640 return chip->gpiodev->dev.of_node == gpiospec->np &&
641 + chip->of_xlate &&
642 chip->of_xlate(chip, gpiospec, NULL) >= 0;
643 }
644
645 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
646 index 7e0bfd7347f6..7d5de4ef4f22 100644
647 --- a/drivers/gpio/gpiolib.c
648 +++ b/drivers/gpio/gpiolib.c
649 @@ -489,7 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
650 if (ret)
651 goto out_free_descs;
652 lh->descs[i] = desc;
653 - count = i;
654 + count = i + 1;
655
656 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
657 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
658 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
659 index 1ae5ae8c45a4..1a75a6b9ab2f 100644
660 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
661 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
662 @@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
663 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
664 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
665 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
666 + { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
667 { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
668 { 0, 0, 0, 0, 0 },
669 };
670 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
671 index 5f892ad6476e..44aa58ab55d0 100644
672 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
673 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
674 @@ -37,6 +37,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
675 {
676 struct drm_gem_object *gobj;
677 unsigned long size;
678 + int r;
679
680 gobj = drm_gem_object_lookup(p->filp, data->handle);
681 if (gobj == NULL)
682 @@ -48,20 +49,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
683 p->uf_entry.tv.shared = true;
684 p->uf_entry.user_pages = NULL;
685
686 - size = amdgpu_bo_size(p->uf_entry.robj);
687 - if (size != PAGE_SIZE || (data->offset + 8) > size)
688 - return -EINVAL;
689 -
690 - *offset = data->offset;
691 -
692 drm_gem_object_put_unlocked(gobj);
693
694 + size = amdgpu_bo_size(p->uf_entry.robj);
695 + if (size != PAGE_SIZE || (data->offset + 8) > size) {
696 + r = -EINVAL;
697 + goto error_unref;
698 + }
699 +
700 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
701 - amdgpu_bo_unref(&p->uf_entry.robj);
702 - return -EINVAL;
703 + r = -EINVAL;
704 + goto error_unref;
705 }
706
707 + *offset = data->offset;
708 +
709 return 0;
710 +
711 +error_unref:
712 + amdgpu_bo_unref(&p->uf_entry.robj);
713 + return r;
714 }
715
716 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
717 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
718 index e7fa67063cdc..cb9e1cd456b8 100644
719 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
720 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
721 @@ -1142,7 +1142,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
722 for (count = 0; count < num_se; count++) {
723 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
724 cgs_write_register(hwmgr->device, reg, data);
725 - result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
726 + result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
727 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
728 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
729 result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
730 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
731 index 7c5bed29ffef..6160a6158cf2 100644
732 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
733 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
734 @@ -412,14 +412,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
735 }
736
737 static void
738 -nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
739 +nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
740 {
741 struct nvkm_dp *dp = nvkm_dp(outp);
742
743 - /* Prevent link from being retrained if sink sends an IRQ. */
744 - atomic_set(&dp->lt.done, 0);
745 - ior->dp.nr = 0;
746 -
747 /* Execute DisableLT script from DP Info Table. */
748 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
749 init.outp = &dp->outp.info;
750 @@ -428,6 +424,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
751 );
752 }
753
754 +static void
755 +nvkm_dp_release(struct nvkm_outp *outp)
756 +{
757 + struct nvkm_dp *dp = nvkm_dp(outp);
758 +
759 + /* Prevent link from being retrained if sink sends an IRQ. */
760 + atomic_set(&dp->lt.done, 0);
761 + dp->outp.ior->dp.nr = 0;
762 +}
763 +
764 static int
765 nvkm_dp_acquire(struct nvkm_outp *outp)
766 {
767 @@ -576,6 +582,7 @@ nvkm_dp_func = {
768 .fini = nvkm_dp_fini,
769 .acquire = nvkm_dp_acquire,
770 .release = nvkm_dp_release,
771 + .disable = nvkm_dp_disable,
772 };
773
774 static int
775 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
776 index 0c570dbd3021..bc18a96bc61a 100644
777 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
778 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
779 @@ -436,11 +436,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
780 nv50_disp_super_ied_off(head, ior, 2);
781
782 /* If we're shutting down the OR's only active head, execute
783 - * the output path's release function.
784 + * the output path's disable function.
785 */
786 if (ior->arm.head == (1 << head->id)) {
787 - if ((outp = ior->arm.outp) && outp->func->release)
788 - outp->func->release(outp, ior);
789 + if ((outp = ior->arm.outp) && outp->func->disable)
790 + outp->func->disable(outp, ior);
791 }
792 }
793
794 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
795 index be9e7f8c3b23..bbba77ff9385 100644
796 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
797 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
798 @@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
799 if (ior) {
800 outp->acquired &= ~user;
801 if (!outp->acquired) {
802 + if (outp->func->release && outp->ior)
803 + outp->func->release(outp);
804 outp->ior->asy.outp = NULL;
805 outp->ior = NULL;
806 }
807 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
808 index ea84d7d5741a..97196f802924 100644
809 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
810 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
811 @@ -41,7 +41,8 @@ struct nvkm_outp_func {
812 void (*init)(struct nvkm_outp *);
813 void (*fini)(struct nvkm_outp *);
814 int (*acquire)(struct nvkm_outp *);
815 - void (*release)(struct nvkm_outp *, struct nvkm_ior *);
816 + void (*release)(struct nvkm_outp *);
817 + void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
818 };
819
820 #define OUTP_MSG(o,l,f,a...) do { \
821 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
822 index 1730371933df..be0dd6074b57 100644
823 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
824 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
825 @@ -158,7 +158,8 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
826 }
827
828 /* load and execute some other ucode image (bios therm?) */
829 - return pmu_load(init, 0x01, post, NULL, NULL);
830 + pmu_load(init, 0x01, post, NULL, NULL);
831 + return 0;
832 }
833
834 static const struct nvkm_devinit_func
835 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
836 index 25b7bd56ae11..1cb41992aaa1 100644
837 --- a/drivers/hid/hid-apple.c
838 +++ b/drivers/hid/hid-apple.c
839 @@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
840 struct hid_field *field, struct hid_usage *usage,
841 unsigned long **bit, int *max)
842 {
843 - if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
844 + if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
845 + usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
846 /* The fn key on Apple USB keyboards */
847 set_bit(EV_REP, hi->input->evbit);
848 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
849 @@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
850 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
851 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
852 .driver_data = APPLE_HAS_FN },
853 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
854 + .driver_data = APPLE_HAS_FN },
855 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
856 + .driver_data = APPLE_HAS_FN },
857 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
858 + .driver_data = APPLE_HAS_FN },
859 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
860 .driver_data = APPLE_HAS_FN },
861 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
862 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
863 index 81ee1d026648..3fc8c0d67592 100644
864 --- a/drivers/hid/hid-ids.h
865 +++ b/drivers/hid/hid-ids.h
866 @@ -85,6 +85,7 @@
867 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
868
869 #define USB_VENDOR_ID_APPLE 0x05ac
870 +#define BT_VENDOR_ID_APPLE 0x004c
871 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
872 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
873 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
874 @@ -154,6 +155,7 @@
875 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
876 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
877 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
878 +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
879 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
880 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
881 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
882 @@ -924,6 +926,7 @@
883 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
884 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621
885 #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
886 +#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
887 #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
888 #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
889 #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
890 diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
891 index 39e642686ff0..683861f324e3 100644
892 --- a/drivers/hid/hid-saitek.c
893 +++ b/drivers/hid/hid-saitek.c
894 @@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
895 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
896 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
897 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
898 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
899 + .driver_data = SAITEK_RELEASE_MODE_RAT7 },
900 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
901 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
902 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
903 diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
904 index 25363fc571bc..faba542d1b07 100644
905 --- a/drivers/hid/hid-sensor-hub.c
906 +++ b/drivers/hid/hid-sensor-hub.c
907 @@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
908 }
909 EXPORT_SYMBOL_GPL(sensor_hub_device_close);
910
911 +static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
912 + unsigned int *rsize)
913 +{
914 + /*
915 + * Checks if the report descriptor of Thinkpad Helix 2 has a logical
916 + * minimum for magnetic flux axis greater than the maximum.
917 + */
918 + if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
919 + *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
920 + rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
921 + rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
922 + rdesc[921] == 0x07 && rdesc[922] == 0x00) {
923 + /* Sets negative logical minimum for mag x, y and z */
924 + rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
925 + rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
926 + rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
927 + rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
928 + }
929 +
930 + return rdesc;
931 +}
932 +
933 static int sensor_hub_probe(struct hid_device *hdev,
934 const struct hid_device_id *id)
935 {
936 @@ -742,6 +764,7 @@ static struct hid_driver sensor_hub_driver = {
937 .probe = sensor_hub_probe,
938 .remove = sensor_hub_remove,
939 .raw_event = sensor_hub_raw_event,
940 + .report_fixup = sensor_hub_report_fixup,
941 #ifdef CONFIG_PM
942 .suspend = sensor_hub_suspend,
943 .resume = sensor_hub_resume,
944 diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
945 index f41901f80b64..5449fc59b7f5 100644
946 --- a/drivers/hv/connection.c
947 +++ b/drivers/hv/connection.c
948 @@ -74,6 +74,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
949 __u32 version)
950 {
951 int ret = 0;
952 + unsigned int cur_cpu;
953 struct vmbus_channel_initiate_contact *msg;
954 unsigned long flags;
955
956 @@ -96,9 +97,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
957 * the CPU attempting to connect may not be CPU 0.
958 */
959 if (version >= VERSION_WIN8_1) {
960 - msg->target_vcpu =
961 - hv_cpu_number_to_vp_number(smp_processor_id());
962 - vmbus_connection.connect_cpu = smp_processor_id();
963 + cur_cpu = get_cpu();
964 + msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
965 + vmbus_connection.connect_cpu = cur_cpu;
966 + put_cpu();
967 } else {
968 msg->target_vcpu = 0;
969 vmbus_connection.connect_cpu = 0;
970 diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
971 index 9918bdd81619..a403e8579b65 100644
972 --- a/drivers/i2c/busses/i2c-uniphier-f.c
973 +++ b/drivers/i2c/busses/i2c-uniphier-f.c
974 @@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
975 return ret;
976
977 for (msg = msgs; msg < emsg; msg++) {
978 - /* If next message is read, skip the stop condition */
979 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
980 - /* but, force it if I2C_M_STOP is set */
981 - if (msg->flags & I2C_M_STOP)
982 - stop = true;
983 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */
984 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
985
986 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
987 if (ret)
988 diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
989 index bb181b088291..454f914ae66d 100644
990 --- a/drivers/i2c/busses/i2c-uniphier.c
991 +++ b/drivers/i2c/busses/i2c-uniphier.c
992 @@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
993 return ret;
994
995 for (msg = msgs; msg < emsg; msg++) {
996 - /* If next message is read, skip the stop condition */
997 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
998 - /* but, force it if I2C_M_STOP is set */
999 - if (msg->flags & I2C_M_STOP)
1000 - stop = true;
1001 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */
1002 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
1003
1004 ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
1005 if (ret)
1006 diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
1007 index d70e2e53d6a7..557214202eff 100644
1008 --- a/drivers/iio/temperature/maxim_thermocouple.c
1009 +++ b/drivers/iio/temperature/maxim_thermocouple.c
1010 @@ -267,7 +267,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
1011 static const struct spi_device_id maxim_thermocouple_id[] = {
1012 {"max6675", MAX6675},
1013 {"max31855", MAX31855},
1014 - {"max31856", MAX31855},
1015 {},
1016 };
1017 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
1018 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1019 index a22b992cde38..16423d7ab599 100644
1020 --- a/drivers/infiniband/core/ucma.c
1021 +++ b/drivers/infiniband/core/ucma.c
1022 @@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
1023 static DEFINE_IDR(ctx_idr);
1024 static DEFINE_IDR(multicast_idr);
1025
1026 +static const struct file_operations ucma_fops;
1027 +
1028 static inline struct ucma_context *_ucma_find_context(int id,
1029 struct ucma_file *file)
1030 {
1031 @@ -1564,6 +1566,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1032 f = fdget(cmd.fd);
1033 if (!f.file)
1034 return -ENOENT;
1035 + if (f.file->f_op != &ucma_fops) {
1036 + ret = -EINVAL;
1037 + goto file_put;
1038 + }
1039
1040 /* Validate current fd and prevent destruction of id. */
1041 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1042 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1043 index 01746e7b90de..9137030423cd 100644
1044 --- a/drivers/iommu/amd_iommu.c
1045 +++ b/drivers/iommu/amd_iommu.c
1046 @@ -3071,7 +3071,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
1047 return 0;
1048
1049 offset_mask = pte_pgsize - 1;
1050 - __pte = *pte & PM_ADDR_MASK;
1051 + __pte = __sme_clr(*pte & PM_ADDR_MASK);
1052
1053 return (__pte & ~offset_mask) | (iova & offset_mask);
1054 }
1055 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1056 index 38a2ac24428e..151211b4cb1b 100644
1057 --- a/drivers/md/dm-raid.c
1058 +++ b/drivers/md/dm-raid.c
1059 @@ -3061,6 +3061,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1060 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
1061 rs_set_new(rs);
1062 } else if (rs_is_recovering(rs)) {
1063 + /* Rebuild particular devices */
1064 + if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
1065 + set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
1066 + rs_setup_recovery(rs, MaxSector);
1067 + }
1068 /* A recovering raid set may be resized */
1069 ; /* skip setup rs */
1070 } else if (rs_is_reshaping(rs)) {
1071 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1072 index 36ef284ad086..45ff8fd00248 100644
1073 --- a/drivers/md/dm-thin-metadata.c
1074 +++ b/drivers/md/dm-thin-metadata.c
1075 @@ -188,6 +188,12 @@ struct dm_pool_metadata {
1076 unsigned long flags;
1077 sector_t data_block_size;
1078
1079 + /*
1080 + * We reserve a section of the metadata for commit overhead.
1081 + * All reported space does *not* include this.
1082 + */
1083 + dm_block_t metadata_reserve;
1084 +
1085 /*
1086 * Set if a transaction has to be aborted but the attempt to roll back
1087 * to the previous (good) transaction failed. The only pool metadata
1088 @@ -825,6 +831,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
1089 return dm_tm_commit(pmd->tm, sblock);
1090 }
1091
1092 +static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
1093 +{
1094 + int r;
1095 + dm_block_t total;
1096 + dm_block_t max_blocks = 4096; /* 16M */
1097 +
1098 + r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
1099 + if (r) {
1100 + DMERR("could not get size of metadata device");
1101 + pmd->metadata_reserve = max_blocks;
1102 + } else
1103 + pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
1104 +}
1105 +
1106 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
1107 sector_t data_block_size,
1108 bool format_device)
1109 @@ -858,6 +878,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
1110 return ERR_PTR(r);
1111 }
1112
1113 + __set_metadata_reserve(pmd);
1114 +
1115 return pmd;
1116 }
1117
1118 @@ -1829,6 +1851,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1119 down_read(&pmd->root_lock);
1120 if (!pmd->fail_io)
1121 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1122 +
1123 + if (!r) {
1124 + if (*result < pmd->metadata_reserve)
1125 + *result = 0;
1126 + else
1127 + *result -= pmd->metadata_reserve;
1128 + }
1129 up_read(&pmd->root_lock);
1130
1131 return r;
1132 @@ -1941,8 +1970,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
1133 int r = -EINVAL;
1134
1135 down_write(&pmd->root_lock);
1136 - if (!pmd->fail_io)
1137 + if (!pmd->fail_io) {
1138 r = __resize_space_map(pmd->metadata_sm, new_count);
1139 + if (!r)
1140 + __set_metadata_reserve(pmd);
1141 + }
1142 up_write(&pmd->root_lock);
1143
1144 return r;
1145 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1146 index 6cf9ad4e4e16..699c40c7fe60 100644
1147 --- a/drivers/md/dm-thin.c
1148 +++ b/drivers/md/dm-thin.c
1149 @@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
1150 enum pool_mode {
1151 PM_WRITE, /* metadata may be changed */
1152 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
1153 +
1154 + /*
1155 + * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
1156 + */
1157 + PM_OUT_OF_METADATA_SPACE,
1158 PM_READ_ONLY, /* metadata may not be changed */
1159 +
1160 PM_FAIL, /* all I/O fails */
1161 };
1162
1163 @@ -1382,7 +1388,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1164
1165 static void requeue_bios(struct pool *pool);
1166
1167 -static void check_for_space(struct pool *pool)
1168 +static bool is_read_only_pool_mode(enum pool_mode mode)
1169 +{
1170 + return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1171 +}
1172 +
1173 +static bool is_read_only(struct pool *pool)
1174 +{
1175 + return is_read_only_pool_mode(get_pool_mode(pool));
1176 +}
1177 +
1178 +static void check_for_metadata_space(struct pool *pool)
1179 +{
1180 + int r;
1181 + const char *ooms_reason = NULL;
1182 + dm_block_t nr_free;
1183 +
1184 + r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1185 + if (r)
1186 + ooms_reason = "Could not get free metadata blocks";
1187 + else if (!nr_free)
1188 + ooms_reason = "No free metadata blocks";
1189 +
1190 + if (ooms_reason && !is_read_only(pool)) {
1191 + DMERR("%s", ooms_reason);
1192 + set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1193 + }
1194 +}
1195 +
1196 +static void check_for_data_space(struct pool *pool)
1197 {
1198 int r;
1199 dm_block_t nr_free;
1200 @@ -1408,14 +1442,16 @@ static int commit(struct pool *pool)
1201 {
1202 int r;
1203
1204 - if (get_pool_mode(pool) >= PM_READ_ONLY)
1205 + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1206 return -EINVAL;
1207
1208 r = dm_pool_commit_metadata(pool->pmd);
1209 if (r)
1210 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1211 - else
1212 - check_for_space(pool);
1213 + else {
1214 + check_for_metadata_space(pool);
1215 + check_for_data_space(pool);
1216 + }
1217
1218 return r;
1219 }
1220 @@ -1481,6 +1517,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1221 return r;
1222 }
1223
1224 + r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1225 + if (r) {
1226 + metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1227 + return r;
1228 + }
1229 +
1230 + if (!free_blocks) {
1231 + /* Let's commit before we use up the metadata reserve. */
1232 + r = commit(pool);
1233 + if (r)
1234 + return r;
1235 + }
1236 +
1237 return 0;
1238 }
1239
1240 @@ -1512,6 +1561,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1241 case PM_OUT_OF_DATA_SPACE:
1242 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1243
1244 + case PM_OUT_OF_METADATA_SPACE:
1245 case PM_READ_ONLY:
1246 case PM_FAIL:
1247 return BLK_STS_IOERR;
1248 @@ -2475,8 +2525,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1249 error_retry_list(pool);
1250 break;
1251
1252 + case PM_OUT_OF_METADATA_SPACE:
1253 case PM_READ_ONLY:
1254 - if (old_mode != new_mode)
1255 + if (!is_read_only_pool_mode(old_mode))
1256 notify_of_pool_mode_change(pool, "read-only");
1257 dm_pool_metadata_read_only(pool->pmd);
1258 pool->process_bio = process_bio_read_only;
1259 @@ -3412,6 +3463,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
1260 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
1261 dm_device_name(pool->pool_md),
1262 sb_metadata_dev_size, metadata_dev_size);
1263 +
1264 + if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
1265 + set_pool_mode(pool, PM_WRITE);
1266 +
1267 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
1268 if (r) {
1269 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
1270 @@ -3715,7 +3770,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
1271 struct pool_c *pt = ti->private;
1272 struct pool *pool = pt->pool;
1273
1274 - if (get_pool_mode(pool) >= PM_READ_ONLY) {
1275 + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
1276 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
1277 dm_device_name(pool->pool_md));
1278 return -EOPNOTSUPP;
1279 @@ -3789,6 +3844,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
1280 dm_block_t nr_blocks_data;
1281 dm_block_t nr_blocks_metadata;
1282 dm_block_t held_root;
1283 + enum pool_mode mode;
1284 char buf[BDEVNAME_SIZE];
1285 char buf2[BDEVNAME_SIZE];
1286 struct pool_c *pt = ti->private;
1287 @@ -3859,9 +3915,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
1288 else
1289 DMEMIT("- ");
1290
1291 - if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
1292 + mode = get_pool_mode(pool);
1293 + if (mode == PM_OUT_OF_DATA_SPACE)
1294 DMEMIT("out_of_data_space ");
1295 - else if (pool->pf.mode == PM_READ_ONLY)
1296 + else if (is_read_only_pool_mode(mode))
1297 DMEMIT("ro ");
1298 else
1299 DMEMIT("rw ");
1300 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1301 index 262a0f0f8fd5..927b60e9d3ca 100644
1302 --- a/drivers/md/raid10.c
1303 +++ b/drivers/md/raid10.c
1304 @@ -4394,11 +4394,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
1305 allow_barrier(conf);
1306 }
1307
1308 + raise_barrier(conf, 0);
1309 read_more:
1310 /* Now schedule reads for blocks from sector_nr to last */
1311 r10_bio = raid10_alloc_init_r10buf(conf);
1312 r10_bio->state = 0;
1313 - raise_barrier(conf, sectors_done != 0);
1314 + raise_barrier(conf, 1);
1315 atomic_set(&r10_bio->remaining, 0);
1316 r10_bio->mddev = mddev;
1317 r10_bio->sector = sector_nr;
1318 @@ -4494,6 +4495,8 @@ read_more:
1319 if (sector_nr <= last)
1320 goto read_more;
1321
1322 + lower_barrier(conf);
1323 +
1324 /* Now that we have done the whole section we can
1325 * update reshape_progress
1326 */
1327 diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
1328 index 284578b0a349..5c908c510c77 100644
1329 --- a/drivers/md/raid5-log.h
1330 +++ b/drivers/md/raid5-log.h
1331 @@ -43,6 +43,11 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
1332 extern void ppl_stripe_write_finished(struct stripe_head *sh);
1333 extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
1334
1335 +static inline bool raid5_has_log(struct r5conf *conf)
1336 +{
1337 + return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1338 +}
1339 +
1340 static inline bool raid5_has_ppl(struct r5conf *conf)
1341 {
1342 return test_bit(MD_HAS_PPL, &conf->mddev->flags);
1343 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1344 index 5018fb2352c2..dbf51b4c21b3 100644
1345 --- a/drivers/md/raid5.c
1346 +++ b/drivers/md/raid5.c
1347 @@ -736,7 +736,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
1348 {
1349 struct r5conf *conf = sh->raid_conf;
1350
1351 - if (conf->log || raid5_has_ppl(conf))
1352 + if (raid5_has_log(conf) || raid5_has_ppl(conf))
1353 return false;
1354 return test_bit(STRIPE_BATCH_READY, &sh->state) &&
1355 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
1356 @@ -7717,7 +7717,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
1357 sector_t newsize;
1358 struct r5conf *conf = mddev->private;
1359
1360 - if (conf->log || raid5_has_ppl(conf))
1361 + if (raid5_has_log(conf) || raid5_has_ppl(conf))
1362 return -EINVAL;
1363 sectors &= ~((sector_t)conf->chunk_sectors - 1);
1364 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
1365 @@ -7768,7 +7768,7 @@ static int check_reshape(struct mddev *mddev)
1366 {
1367 struct r5conf *conf = mddev->private;
1368
1369 - if (conf->log || raid5_has_ppl(conf))
1370 + if (raid5_has_log(conf) || raid5_has_ppl(conf))
1371 return -EINVAL;
1372 if (mddev->delta_disks == 0 &&
1373 mddev->new_layout == mddev->layout &&
1374 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
1375 index e3b7a71fcad9..1a4ffc5d3da4 100644
1376 --- a/drivers/net/ethernet/amazon/ena/ena_com.c
1377 +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
1378 @@ -457,7 +457,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
1379 cqe = &admin_queue->cq.entries[head_masked];
1380
1381 /* Go over all the completions */
1382 - while ((cqe->acq_common_descriptor.flags &
1383 + while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
1384 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
1385 /* Do not read the rest of the completion entry before the
1386 * phase bit was validated
1387 @@ -633,7 +633,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
1388 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
1389
1390 for (i = 0; i < timeout; i++) {
1391 - if (read_resp->req_id == mmio_read->seq_num)
1392 + if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
1393 break;
1394
1395 udelay(1);
1396 @@ -1790,8 +1790,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1397 aenq_common = &aenq_e->aenq_common_desc;
1398
1399 /* Go over all the events */
1400 - while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1401 - phase) {
1402 + while ((READ_ONCE(aenq_common->flags) &
1403 + ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1404 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1405 aenq_common->group, aenq_common->syndrom,
1406 (u64)aenq_common->timestamp_low +
1407 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1408 index 67df5053dc30..60b3ee29d82c 100644
1409 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1410 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1411 @@ -456,7 +456,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
1412 return -ENOMEM;
1413 }
1414
1415 - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
1416 + dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1417 DMA_FROM_DEVICE);
1418 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
1419 u64_stats_update_begin(&rx_ring->syncp);
1420 @@ -473,7 +473,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
1421 rx_info->page_offset = 0;
1422 ena_buf = &rx_info->ena_buf;
1423 ena_buf->paddr = dma;
1424 - ena_buf->len = PAGE_SIZE;
1425 + ena_buf->len = ENA_PAGE_SIZE;
1426
1427 return 0;
1428 }
1429 @@ -490,7 +490,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
1430 return;
1431 }
1432
1433 - dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
1434 + dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
1435 DMA_FROM_DEVICE);
1436
1437 __free_page(page);
1438 @@ -910,10 +910,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1439 do {
1440 dma_unmap_page(rx_ring->dev,
1441 dma_unmap_addr(&rx_info->ena_buf, paddr),
1442 - PAGE_SIZE, DMA_FROM_DEVICE);
1443 + ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1444
1445 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1446 - rx_info->page_offset, len, PAGE_SIZE);
1447 + rx_info->page_offset, len, ENA_PAGE_SIZE);
1448
1449 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1450 "rx skb updated. len %d. data_len %d\n",
1451 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1452 index 29bb5704260b..3404376c28ca 100644
1453 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
1454 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1455 @@ -350,4 +350,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
1456
1457 int ena_get_sset_count(struct net_device *netdev, int sset);
1458
1459 +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
1460 + * driver passas 0.
1461 + * Since the max packet size the ENA handles is ~9kB limit the buffer length to
1462 + * 16kB.
1463 + */
1464 +#if PAGE_SIZE > SZ_16K
1465 +#define ENA_PAGE_SIZE SZ_16K
1466 +#else
1467 +#define ENA_PAGE_SIZE PAGE_SIZE
1468 +#endif
1469 +
1470 #endif /* !(ENA_H) */
1471 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1472 index dfef4ec167c1..c1787be6a258 100644
1473 --- a/drivers/net/ethernet/cadence/macb_main.c
1474 +++ b/drivers/net/ethernet/cadence/macb_main.c
1475 @@ -642,7 +642,7 @@ static int macb_halt_tx(struct macb *bp)
1476 if (!(status & MACB_BIT(TGO)))
1477 return 0;
1478
1479 - usleep_range(10, 250);
1480 + udelay(250);
1481 } while (time_before(halt_time, timeout));
1482
1483 return -ETIMEDOUT;
1484 diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
1485 index cad52bd331f7..08a750fb60c4 100644
1486 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h
1487 +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
1488 @@ -486,6 +486,8 @@ struct hnae_ae_ops {
1489 u8 *auto_neg, u16 *speed, u8 *duplex);
1490 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
1491 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
1492 + bool (*need_adjust_link)(struct hnae_handle *handle,
1493 + int speed, int duplex);
1494 int (*set_loopback)(struct hnae_handle *handle,
1495 enum hnae_loop loop_mode, int en);
1496 void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
1497 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1498 index bd68379d2bea..bf930ab3c2bd 100644
1499 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1500 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1501 @@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1502 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1503 }
1504
1505 +static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1506 +{
1507 + struct dsaf_device *dsaf_dev;
1508 + struct hns_ppe_cb *ppe_cb;
1509 + struct hnae_vf_cb *vf_cb;
1510 + int ret;
1511 + int i;
1512 +
1513 + for (i = 0; i < handle->q_num; i++) {
1514 + ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
1515 + if (ret)
1516 + return ret;
1517 + }
1518 +
1519 + ppe_cb = hns_get_ppe_cb(handle);
1520 + ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
1521 + if (ret)
1522 + return ret;
1523 +
1524 + dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
1525 + if (!dsaf_dev)
1526 + return -EINVAL;
1527 + ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
1528 + if (ret)
1529 + return ret;
1530 +
1531 + vf_cb = hns_ae_get_vf_cb(handle);
1532 + ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
1533 + if (ret)
1534 + return ret;
1535 +
1536 + mdelay(10);
1537 + return 0;
1538 +}
1539 +
1540 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
1541 {
1542 int q_num = handle->q_num;
1543 @@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
1544 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
1545 }
1546
1547 +static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
1548 + int duplex)
1549 +{
1550 + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
1551 +
1552 + return hns_mac_need_adjust_link(mac_cb, speed, duplex);
1553 +}
1554 +
1555 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
1556 int duplex)
1557 {
1558 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
1559
1560 - hns_mac_adjust_link(mac_cb, speed, duplex);
1561 + switch (mac_cb->dsaf_dev->dsaf_ver) {
1562 + case AE_VERSION_1:
1563 + hns_mac_adjust_link(mac_cb, speed, duplex);
1564 + break;
1565 +
1566 + case AE_VERSION_2:
1567 + /* chip need to clear all pkt inside */
1568 + hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
1569 + if (hns_ae_wait_flow_down(handle)) {
1570 + hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
1571 + break;
1572 + }
1573 +
1574 + hns_mac_adjust_link(mac_cb, speed, duplex);
1575 + hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
1576 + break;
1577 +
1578 + default:
1579 + break;
1580 + }
1581 +
1582 + return;
1583 }
1584
1585 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
1586 @@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
1587 .get_status = hns_ae_get_link_status,
1588 .get_info = hns_ae_get_mac_info,
1589 .adjust_link = hns_ae_adjust_link,
1590 + .need_adjust_link = hns_ae_need_adjust_link,
1591 .set_loopback = hns_ae_config_loopback,
1592 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
1593 .get_pauseparam = hns_ae_get_pauseparam,
1594 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1595 index 74bd260ca02a..8c7bc5cf193c 100644
1596 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1597 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1598 @@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
1599 *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
1600 }
1601
1602 +static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
1603 + int duplex)
1604 +{
1605 + struct mac_driver *drv = (struct mac_driver *)mac_drv;
1606 + struct hns_mac_cb *mac_cb = drv->mac_cb;
1607 +
1608 + return (mac_cb->speed != speed) ||
1609 + (mac_cb->half_duplex == duplex);
1610 +}
1611 +
1612 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
1613 u32 full_duplex)
1614 {
1615 @@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
1616 hns_gmac_set_uc_match(mac_drv, en);
1617 }
1618
1619 +int hns_gmac_wait_fifo_clean(void *mac_drv)
1620 +{
1621 + struct mac_driver *drv = (struct mac_driver *)mac_drv;
1622 + int wait_cnt;
1623 + u32 val;
1624 +
1625 + wait_cnt = 0;
1626 + while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1627 + val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
1628 + /* bit5~bit0 is not send complete pkts */
1629 + if ((val & 0x3f) == 0)
1630 + break;
1631 + usleep_range(100, 200);
1632 + }
1633 +
1634 + if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1635 + dev_err(drv->dev,
1636 + "hns ge %d fifo was not idle.\n", drv->mac_id);
1637 + return -EBUSY;
1638 + }
1639 +
1640 + return 0;
1641 +}
1642 +
1643 static void hns_gmac_init(void *mac_drv)
1644 {
1645 u32 port;
1646 @@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
1647 mac_drv->mac_disable = hns_gmac_disable;
1648 mac_drv->mac_free = hns_gmac_free;
1649 mac_drv->adjust_link = hns_gmac_adjust_link;
1650 + mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
1651 mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
1652 mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
1653 mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
1654 @@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
1655 mac_drv->get_strings = hns_gmac_get_strings;
1656 mac_drv->update_stats = hns_gmac_update_stats;
1657 mac_drv->set_promiscuous = hns_gmac_set_promisc;
1658 + mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
1659
1660 return (void *)mac_drv;
1661 }
1662 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1663 index 8b5cdf490850..5a8dbd72fe45 100644
1664 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1665 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1666 @@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
1667 return 0;
1668 }
1669
1670 +/**
1671 + *hns_mac_is_adjust_link - check is need change mac speed and duplex register
1672 + *@mac_cb: mac device
1673 + *@speed: phy device speed
1674 + *@duplex:phy device duplex
1675 + *
1676 + */
1677 +bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
1678 +{
1679 + struct mac_driver *mac_ctrl_drv;
1680 +
1681 + mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
1682 +
1683 + if (mac_ctrl_drv->need_adjust_link)
1684 + return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
1685 + (enum mac_speed)speed, duplex);
1686 + else
1687 + return true;
1688 +}
1689 +
1690 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
1691 {
1692 int ret;
1693 @@ -432,6 +452,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
1694 return 0;
1695 }
1696
1697 +int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
1698 +{
1699 + struct mac_driver *drv = hns_mac_get_drv(mac_cb);
1700 +
1701 + if (drv->wait_fifo_clean)
1702 + return drv->wait_fifo_clean(drv);
1703 +
1704 + return 0;
1705 +}
1706 +
1707 void hns_mac_reset(struct hns_mac_cb *mac_cb)
1708 {
1709 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
1710 @@ -1001,6 +1031,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
1711 return DSAF_MAX_PORT_NUM;
1712 }
1713
1714 +void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1715 +{
1716 + struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1717 +
1718 + mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
1719 +}
1720 +
1721 +void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1722 +{
1723 + struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1724 +
1725 + mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
1726 +}
1727 +
1728 /**
1729 * hns_mac_init - init mac
1730 * @dsaf_dev: dsa fabric device struct pointer
1731 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1732 index bbc0a98e7ca3..fbc75341bef7 100644
1733 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1734 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1735 @@ -356,6 +356,9 @@ struct mac_driver {
1736 /*adjust mac mode of port,include speed and duplex*/
1737 int (*adjust_link)(void *mac_drv, enum mac_speed speed,
1738 u32 full_duplex);
1739 + /* need adjust link */
1740 + bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
1741 + int duplex);
1742 /* config autoegotaite mode of port*/
1743 void (*set_an_mode)(void *mac_drv, u8 enable);
1744 /* config loopbank mode */
1745 @@ -394,6 +397,7 @@ struct mac_driver {
1746 void (*get_info)(void *mac_drv, struct mac_info *mac_info);
1747
1748 void (*update_stats)(void *mac_drv);
1749 + int (*wait_fifo_clean)(void *mac_drv);
1750
1751 enum mac_mode mac_mode;
1752 u8 mac_id;
1753 @@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
1754
1755 int hns_mac_init(struct dsaf_device *dsaf_dev);
1756 void mac_adjust_link(struct net_device *net_dev);
1757 +bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
1758 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
1759 int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
1760 int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
1761 @@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
1762 int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
1763 const unsigned char *addr);
1764 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
1765 +void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
1766 +void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
1767 +int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
1768
1769 #endif /* _HNS_DSAF_MAC_H */
1770 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1771 index e0bc79ea3d88..1f056a6b167e 100644
1772 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1773 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1774 @@ -2720,6 +2720,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
1775 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
1776 }
1777
1778 +int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
1779 +{
1780 + u32 val, val_tmp;
1781 + int wait_cnt;
1782 +
1783 + if (port >= DSAF_SERVICE_NW_NUM)
1784 + return 0;
1785 +
1786 + wait_cnt = 0;
1787 + while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1788 + val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
1789 + (port + DSAF_XGE_NUM) * 0x40);
1790 + val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
1791 + (port + DSAF_XGE_NUM) * 0x40);
1792 + if (val == val_tmp)
1793 + break;
1794 +
1795 + usleep_range(100, 200);
1796 + }
1797 +
1798 + if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1799 + dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
1800 + val, val_tmp);
1801 + return -EBUSY;
1802 + }
1803 +
1804 + return 0;
1805 +}
1806 +
1807 /**
1808 * dsaf_probe - probo dsaf dev
1809 * @pdev: dasf platform device
1810 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1811 index 4507e8222683..0e1cd99831a6 100644
1812 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1813 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1814 @@ -44,6 +44,8 @@ struct hns_mac_cb;
1815 #define DSAF_ROCE_CREDIT_CHN 8
1816 #define DSAF_ROCE_CHAN_MODE 3
1817
1818 +#define HNS_MAX_WAIT_CNT 10000
1819 +
1820 enum dsaf_roce_port_mode {
1821 DSAF_ROCE_6PORT_MODE,
1822 DSAF_ROCE_4PORT_MODE,
1823 @@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
1824
1825 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
1826 u8 mac_id, u8 port_num);
1827 +int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
1828
1829 #endif /* __HNS_DSAF_MAIN_H__ */
1830 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1831 index 93e71e27401b..a19932aeb9d7 100644
1832 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1833 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1834 @@ -274,6 +274,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
1835 dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
1836 }
1837
1838 +int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
1839 +{
1840 + int wait_cnt;
1841 + u32 val;
1842 +
1843 + wait_cnt = 0;
1844 + while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1845 + val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
1846 + if (!val)
1847 + break;
1848 +
1849 + usleep_range(100, 200);
1850 + }
1851 +
1852 + if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1853 + dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
1854 + val);
1855 + return -EBUSY;
1856 + }
1857 +
1858 + return 0;
1859 +}
1860 +
1861 /**
1862 * ppe_init_hw - init ppe
1863 * @ppe_cb: ppe device
1864 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1865 index 9d8e643e8aa6..f670e63a5a01 100644
1866 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1867 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1868 @@ -100,6 +100,7 @@ struct ppe_common_cb {
1869
1870 };
1871
1872 +int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
1873 int hns_ppe_init(struct dsaf_device *dsaf_dev);
1874
1875 void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
1876 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1877 index e2e28532e4dc..1e43d7a3ca86 100644
1878 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1879 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1880 @@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
1881 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
1882 }
1883
1884 +int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
1885 +{
1886 + u32 head, tail;
1887 + int wait_cnt;
1888 +
1889 + tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
1890 + wait_cnt = 0;
1891 + while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1892 + head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
1893 + if (tail == head)
1894 + break;
1895 +
1896 + usleep_range(100, 200);
1897 + }
1898 +
1899 + if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1900 + dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
1901 + return -EBUSY;
1902 + }
1903 +
1904 + return 0;
1905 +}
1906 +
1907 /**
1908 *hns_rcb_reset_ring_hw - ring reset
1909 *@q: ring struct pointer
1910 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1911 index 602816498c8d..2319b772a271 100644
1912 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1913 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1914 @@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
1915 void hns_rcb_init_hw(struct ring_pair_cb *ring);
1916 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
1917 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
1918 +int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
1919 u32 hns_rcb_get_rx_coalesced_frames(
1920 struct rcb_common_cb *rcb_common, u32 port_idx);
1921 u32 hns_rcb_get_tx_coalesced_frames(
1922 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1923 index 46a52d9bb196..6d20e4eb7402 100644
1924 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1925 +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1926 @@ -464,6 +464,7 @@
1927 #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
1928 #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
1929
1930 +#define GMAC_FIFO_STATE_REG 0x0000UL
1931 #define GMAC_DUPLEX_TYPE_REG 0x0008UL
1932 #define GMAC_FD_FC_TYPE_REG 0x000CUL
1933 #define GMAC_TX_WATER_LINE_REG 0x0010UL
1934 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1935 index 25a9732afc84..07d6a9cf2c55 100644
1936 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1937 +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1938 @@ -1212,11 +1212,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
1939 struct hnae_handle *h = priv->ae_handle;
1940 int state = 1;
1941
1942 + /* If there is no phy, do not need adjust link */
1943 if (ndev->phydev) {
1944 - h->dev->ops->adjust_link(h, ndev->phydev->speed,
1945 - ndev->phydev->duplex);
1946 - state = ndev->phydev->link;
1947 + /* When phy link down, do nothing */
1948 + if (ndev->phydev->link == 0)
1949 + return;
1950 +
1951 + if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1952 + ndev->phydev->duplex)) {
1953 + /* because Hi161X chip don't support to change gmac
1954 + * speed and duplex with traffic. Delay 200ms to
1955 + * make sure there is no more data in chip FIFO.
1956 + */
1957 + netif_carrier_off(ndev);
1958 + msleep(200);
1959 + h->dev->ops->adjust_link(h, ndev->phydev->speed,
1960 + ndev->phydev->duplex);
1961 + netif_carrier_on(ndev);
1962 + }
1963 }
1964 +
1965 state = state && h->dev->ops->get_status(h);
1966
1967 if (state != priv->link) {
1968 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1969 index 2e14a3ae1d8b..c1e947bb852f 100644
1970 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1971 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
1972 @@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
1973 }
1974
1975 if (h->dev->ops->adjust_link) {
1976 + netif_carrier_off(net_dev);
1977 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
1978 + netif_carrier_on(net_dev);
1979 return 0;
1980 }
1981
1982 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1983 index b68d94b49a8a..42183a8b649c 100644
1984 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1985 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1986 @@ -3108,11 +3108,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
1987 return budget;
1988
1989 /* all work done, exit the polling mode */
1990 - napi_complete_done(napi, work_done);
1991 - if (adapter->rx_itr_setting & 1)
1992 - ixgbe_set_itr(q_vector);
1993 - if (!test_bit(__IXGBE_DOWN, &adapter->state))
1994 - ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
1995 + if (likely(napi_complete_done(napi, work_done))) {
1996 + if (adapter->rx_itr_setting & 1)
1997 + ixgbe_set_itr(q_vector);
1998 + if (!test_bit(__IXGBE_DOWN, &adapter->state))
1999 + ixgbe_irq_enable_queues(adapter,
2000 + BIT_ULL(q_vector->v_idx));
2001 + }
2002
2003 return min(work_done, budget - 1);
2004 }
2005 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2006 index 9f9c9ff10735..07fda3984e10 100644
2007 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2008 +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2009 @@ -388,16 +388,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
2010 }
2011 }
2012
2013 -static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
2014 +static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
2015 {
2016 - return (u16)((dev->pdev->bus->number << 8) |
2017 + return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
2018 + (dev->pdev->bus->number << 8) |
2019 PCI_SLOT(dev->pdev->devfn));
2020 }
2021
2022 /* Must be called with intf_mutex held */
2023 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
2024 {
2025 - u16 pci_id = mlx5_gen_pci_id(dev);
2026 + u32 pci_id = mlx5_gen_pci_id(dev);
2027 struct mlx5_core_dev *res = NULL;
2028 struct mlx5_core_dev *tmp_dev;
2029 struct mlx5_priv *priv;
2030 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2031 index 3669005b9294..f7e540eeb877 100644
2032 --- a/drivers/net/ethernet/realtek/r8169.c
2033 +++ b/drivers/net/ethernet/realtek/r8169.c
2034 @@ -760,7 +760,7 @@ struct rtl8169_tc_offsets {
2035 };
2036
2037 enum rtl_flag {
2038 - RTL_FLAG_TASK_ENABLED,
2039 + RTL_FLAG_TASK_ENABLED = 0,
2040 RTL_FLAG_TASK_SLOW_PENDING,
2041 RTL_FLAG_TASK_RESET_PENDING,
2042 RTL_FLAG_TASK_PHY_PENDING,
2043 @@ -7657,7 +7657,8 @@ static int rtl8169_close(struct net_device *dev)
2044 rtl8169_update_counters(dev);
2045
2046 rtl_lock_work(tp);
2047 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
2048 + /* Clear all task flags */
2049 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
2050
2051 rtl8169_down(dev);
2052 rtl_unlock_work(tp);
2053 @@ -7838,7 +7839,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
2054
2055 rtl_lock_work(tp);
2056 napi_disable(&tp->napi);
2057 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
2058 + /* Clear all task flags */
2059 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
2060 +
2061 rtl_unlock_work(tp);
2062
2063 rtl_pll_power_down(tp);
2064 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2065 index d686ba10fecc..aafa7aa18fbd 100644
2066 --- a/drivers/net/wireless/mac80211_hwsim.c
2067 +++ b/drivers/net/wireless/mac80211_hwsim.c
2068 @@ -2632,9 +2632,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2069 IEEE80211_VHT_CAP_SHORT_GI_80 |
2070 IEEE80211_VHT_CAP_SHORT_GI_160 |
2071 IEEE80211_VHT_CAP_TXSTBC |
2072 - IEEE80211_VHT_CAP_RXSTBC_1 |
2073 - IEEE80211_VHT_CAP_RXSTBC_2 |
2074 - IEEE80211_VHT_CAP_RXSTBC_3 |
2075 IEEE80211_VHT_CAP_RXSTBC_4 |
2076 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
2077 sband->vht_cap.vht_mcs.rx_mcs_map =
2078 @@ -3124,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2079 if (info->attrs[HWSIM_ATTR_CHANNELS])
2080 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
2081
2082 + if (param.channels < 1) {
2083 + GENL_SET_ERR_MSG(info, "must have at least one channel");
2084 + return -EINVAL;
2085 + }
2086 +
2087 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
2088 GENL_SET_ERR_MSG(info, "too many channels specified");
2089 return -EINVAL;
2090 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
2091 index 3333d417b248..a70b3d24936d 100644
2092 --- a/drivers/nvme/target/rdma.c
2093 +++ b/drivers/nvme/target/rdma.c
2094 @@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
2095
2096 struct nvmet_req req;
2097
2098 + bool allocated;
2099 u8 n_rdma;
2100 u32 flags;
2101 u32 invalidate_rkey;
2102 @@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2103 unsigned long flags;
2104
2105 spin_lock_irqsave(&queue->rsps_lock, flags);
2106 - rsp = list_first_entry(&queue->free_rsps,
2107 + rsp = list_first_entry_or_null(&queue->free_rsps,
2108 struct nvmet_rdma_rsp, free_list);
2109 - list_del(&rsp->free_list);
2110 + if (likely(rsp))
2111 + list_del(&rsp->free_list);
2112 spin_unlock_irqrestore(&queue->rsps_lock, flags);
2113
2114 + if (unlikely(!rsp)) {
2115 + rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
2116 + if (unlikely(!rsp))
2117 + return NULL;
2118 + rsp->allocated = true;
2119 + }
2120 +
2121 return rsp;
2122 }
2123
2124 @@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2125 {
2126 unsigned long flags;
2127
2128 + if (rsp->allocated) {
2129 + kfree(rsp);
2130 + return;
2131 + }
2132 +
2133 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
2134 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
2135 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
2136 @@ -756,6 +770,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2137
2138 cmd->queue = queue;
2139 rsp = nvmet_rdma_get_rsp(queue);
2140 + if (unlikely(!rsp)) {
2141 + /*
2142 + * we get here only under memory pressure,
2143 + * silently drop and have the host retry
2144 + * as we can't even fail it.
2145 + */
2146 + nvmet_rdma_post_recv(queue->dev, cmd);
2147 + return;
2148 + }
2149 rsp->queue = queue;
2150 rsp->cmd = cmd;
2151 rsp->flags = 0;
2152 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2153 index 0a6afd4b283d..4f2747cd15a6 100644
2154 --- a/drivers/s390/net/qeth_core_main.c
2155 +++ b/drivers/s390/net/qeth_core_main.c
2156 @@ -23,6 +23,7 @@
2157 #include <linux/netdevice.h>
2158 #include <linux/netdev_features.h>
2159 #include <linux/skbuff.h>
2160 +#include <linux/vmalloc.h>
2161
2162 #include <net/iucv/af_iucv.h>
2163 #include <net/dsfield.h>
2164 @@ -4728,7 +4729,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
2165
2166 priv.buffer_len = oat_data.buffer_len;
2167 priv.response_len = 0;
2168 - priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
2169 + priv.buffer = vzalloc(oat_data.buffer_len);
2170 if (!priv.buffer) {
2171 rc = -ENOMEM;
2172 goto out;
2173 @@ -4769,7 +4770,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
2174 rc = -EFAULT;
2175
2176 out_free:
2177 - kfree(priv.buffer);
2178 + vfree(priv.buffer);
2179 out:
2180 return rc;
2181 }
2182 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2183 index 521293b1f4fa..11ae67842edf 100644
2184 --- a/drivers/s390/net/qeth_l2_main.c
2185 +++ b/drivers/s390/net/qeth_l2_main.c
2186 @@ -484,7 +484,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
2187 default:
2188 dev_kfree_skb_any(skb);
2189 QETH_CARD_TEXT(card, 3, "inbunkno");
2190 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2191 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
2192 continue;
2193 }
2194 work_done++;
2195 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2196 index 1c62cbbaa66f..cd73172bff47 100644
2197 --- a/drivers/s390/net/qeth_l3_main.c
2198 +++ b/drivers/s390/net/qeth_l3_main.c
2199 @@ -1793,7 +1793,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2200 default:
2201 dev_kfree_skb_any(skb);
2202 QETH_CARD_TEXT(card, 3, "inbunkno");
2203 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2204 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
2205 continue;
2206 }
2207 work_done++;
2208 diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
2209 index 5be0086142ca..ab30db8c36c6 100644
2210 --- a/drivers/scsi/csiostor/csio_hw.c
2211 +++ b/drivers/scsi/csiostor/csio_hw.c
2212 @@ -2010,8 +2010,8 @@ bye:
2213 }
2214
2215 /*
2216 - * Returns -EINVAL if attempts to flash the firmware failed
2217 - * else returns 0,
2218 + * Returns -EINVAL if attempts to flash the firmware failed,
2219 + * -ENOMEM if memory allocation failed else returns 0,
2220 * if flashing was not attempted because the card had the
2221 * latest firmware ECANCELED is returned
2222 */
2223 @@ -2039,6 +2039,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2224 return -EINVAL;
2225 }
2226
2227 + /* allocate memory to read the header of the firmware on the
2228 + * card
2229 + */
2230 + card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2231 + if (!card_fw)
2232 + return -ENOMEM;
2233 +
2234 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2235 fw_bin_file = FW_FNAME_T5;
2236 else
2237 @@ -2052,11 +2059,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2238 fw_size = fw->size;
2239 }
2240
2241 - /* allocate memory to read the header of the firmware on the
2242 - * card
2243 - */
2244 - card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2245 -
2246 /* upgrade FW logic */
2247 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2248 hw->fw_state, reset);
2249 diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
2250 index b8b22ce60ecc..95141066c3fa 100644
2251 --- a/drivers/scsi/qedi/qedi.h
2252 +++ b/drivers/scsi/qedi/qedi.h
2253 @@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
2254 QEDI_NVM_TGT_SEC,
2255 };
2256
2257 +struct qedi_nvm_iscsi_image {
2258 + struct nvm_iscsi_cfg iscsi_cfg;
2259 + u32 crc;
2260 +};
2261 +
2262 struct qedi_uio_ctrl {
2263 /* meta data */
2264 u32 uio_hsi_version;
2265 @@ -294,7 +299,7 @@ struct qedi_ctx {
2266 void *bdq_pbl_list;
2267 dma_addr_t bdq_pbl_list_dma;
2268 u8 bdq_pbl_list_num_entries;
2269 - struct nvm_iscsi_cfg *iscsi_cfg;
2270 + struct qedi_nvm_iscsi_image *iscsi_image;
2271 dma_addr_t nvm_buf_dma;
2272 void __iomem *bdq_primary_prod;
2273 void __iomem *bdq_secondary_prod;
2274 diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2275 index e7daadc089fc..24b945b555ba 100644
2276 --- a/drivers/scsi/qedi/qedi_main.c
2277 +++ b/drivers/scsi/qedi/qedi_main.c
2278 @@ -1147,23 +1147,26 @@ exit_setup_int:
2279
2280 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
2281 {
2282 - if (qedi->iscsi_cfg)
2283 + if (qedi->iscsi_image)
2284 dma_free_coherent(&qedi->pdev->dev,
2285 - sizeof(struct nvm_iscsi_cfg),
2286 - qedi->iscsi_cfg, qedi->nvm_buf_dma);
2287 + sizeof(struct qedi_nvm_iscsi_image),
2288 + qedi->iscsi_image, qedi->nvm_buf_dma);
2289 }
2290
2291 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
2292 {
2293 - qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
2294 - sizeof(struct nvm_iscsi_cfg),
2295 - &qedi->nvm_buf_dma, GFP_KERNEL);
2296 - if (!qedi->iscsi_cfg) {
2297 + struct qedi_nvm_iscsi_image nvm_image;
2298 +
2299 + qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
2300 + sizeof(nvm_image),
2301 + &qedi->nvm_buf_dma,
2302 + GFP_KERNEL);
2303 + if (!qedi->iscsi_image) {
2304 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
2305 return -ENOMEM;
2306 }
2307 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2308 - "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
2309 + "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
2310 qedi->nvm_buf_dma);
2311
2312 return 0;
2313 @@ -1716,7 +1719,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
2314 struct nvm_iscsi_block *block;
2315
2316 pf = qedi->dev_info.common.abs_pf_id;
2317 - block = &qedi->iscsi_cfg->block[0];
2318 + block = &qedi->iscsi_image->iscsi_cfg.block[0];
2319 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
2320 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
2321 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
2322 @@ -2008,15 +2011,14 @@ static void qedi_boot_release(void *data)
2323 static int qedi_get_boot_info(struct qedi_ctx *qedi)
2324 {
2325 int ret = 1;
2326 - u16 len;
2327 -
2328 - len = sizeof(struct nvm_iscsi_cfg);
2329 + struct qedi_nvm_iscsi_image nvm_image;
2330
2331 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2332 "Get NVM iSCSI CFG image\n");
2333 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2334 QED_NVM_IMAGE_ISCSI_CFG,
2335 - (char *)qedi->iscsi_cfg, len);
2336 + (char *)qedi->iscsi_image,
2337 + sizeof(nvm_image));
2338 if (ret)
2339 QEDI_ERR(&qedi->dbg_ctx,
2340 "Could not get NVM image. ret = %d\n", ret);
2341 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2342 index 98e27da34f3c..27893d90c4ef 100644
2343 --- a/drivers/target/iscsi/iscsi_target_login.c
2344 +++ b/drivers/target/iscsi/iscsi_target_login.c
2345 @@ -310,11 +310,9 @@ static int iscsi_login_zero_tsih_s1(
2346 return -ENOMEM;
2347 }
2348
2349 - ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
2350 - if (unlikely(ret)) {
2351 - kfree(sess);
2352 - return ret;
2353 - }
2354 + if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
2355 + goto free_sess;
2356 +
2357 sess->init_task_tag = pdu->itt;
2358 memcpy(&sess->isid, pdu->isid, 6);
2359 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
2360 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2361 index 45b57c294d13..401c983ec5f3 100644
2362 --- a/drivers/tty/serial/mvebu-uart.c
2363 +++ b/drivers/tty/serial/mvebu-uart.c
2364 @@ -327,8 +327,10 @@ static void mvebu_uart_set_termios(struct uart_port *port,
2365 if ((termios->c_cflag & CREAD) == 0)
2366 port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
2367
2368 - if (old)
2369 + if (old) {
2370 tty_termios_copy_hw(termios, old);
2371 + termios->c_cflag |= CS8;
2372 + }
2373
2374 baud = uart_get_baud_rate(port, termios, old, 0, 460800);
2375 uart_update_timeout(port, termios->c_cflag, baud);
2376 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
2377 index 78d0204e3e20..d17d7052605b 100644
2378 --- a/drivers/usb/gadget/udc/fotg210-udc.c
2379 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
2380 @@ -1066,12 +1066,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
2381 static int fotg210_udc_remove(struct platform_device *pdev)
2382 {
2383 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
2384 + int i;
2385
2386 usb_del_gadget_udc(&fotg210->gadget);
2387 iounmap(fotg210->reg);
2388 free_irq(platform_get_irq(pdev, 0), fotg210);
2389
2390 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
2391 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
2392 + kfree(fotg210->ep[i]);
2393 kfree(fotg210);
2394
2395 return 0;
2396 @@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2397 /* initialize udc */
2398 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
2399 if (fotg210 == NULL)
2400 - goto err_alloc;
2401 + goto err;
2402
2403 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
2404 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
2405 @@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2406 fotg210->reg = ioremap(res->start, resource_size(res));
2407 if (fotg210->reg == NULL) {
2408 pr_err("ioremap error.\n");
2409 - goto err_map;
2410 + goto err_alloc;
2411 }
2412
2413 spin_lock_init(&fotg210->lock);
2414 @@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2415 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
2416 GFP_KERNEL);
2417 if (fotg210->ep0_req == NULL)
2418 - goto err_req;
2419 + goto err_map;
2420
2421 fotg210_init(fotg210);
2422
2423 @@ -1190,12 +1193,14 @@ err_req:
2424 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
2425
2426 err_map:
2427 - if (fotg210->reg)
2428 - iounmap(fotg210->reg);
2429 + iounmap(fotg210->reg);
2430
2431 err_alloc:
2432 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
2433 + kfree(fotg210->ep[i]);
2434 kfree(fotg210);
2435
2436 +err:
2437 return ret;
2438 }
2439
2440 diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2441 index 0673f286afbd..4f48f5730e12 100644
2442 --- a/drivers/usb/misc/yurex.c
2443 +++ b/drivers/usb/misc/yurex.c
2444 @@ -417,6 +417,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2445 spin_unlock_irqrestore(&dev->lock, flags);
2446 mutex_unlock(&dev->io_mutex);
2447
2448 + if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
2449 + return -EIO;
2450 +
2451 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
2452 }
2453
2454 diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
2455 index d4265c8ebb22..b1357aa4bc55 100644
2456 --- a/drivers/xen/cpu_hotplug.c
2457 +++ b/drivers/xen/cpu_hotplug.c
2458 @@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
2459
2460 static void disable_hotplug_cpu(int cpu)
2461 {
2462 - if (cpu_online(cpu)) {
2463 - lock_device_hotplug();
2464 + if (!cpu_is_hotpluggable(cpu))
2465 + return;
2466 + lock_device_hotplug();
2467 + if (cpu_online(cpu))
2468 device_offline(get_cpu_device(cpu));
2469 - unlock_device_hotplug();
2470 - }
2471 - if (cpu_present(cpu))
2472 + if (!cpu_online(cpu) && cpu_present(cpu)) {
2473 xen_arch_unregister_cpu(cpu);
2474 -
2475 - set_cpu_present(cpu, false);
2476 + set_cpu_present(cpu, false);
2477 + }
2478 + unlock_device_hotplug();
2479 }
2480
2481 static int vcpu_online(unsigned int cpu)
2482 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2483 index 08e4af04d6f2..e6c1934734b7 100644
2484 --- a/drivers/xen/events/events_base.c
2485 +++ b/drivers/xen/events/events_base.c
2486 @@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
2487 clear_evtchn_to_irq_row(row);
2488 }
2489
2490 - evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
2491 + evtchn_to_irq[row][col] = irq;
2492 return 0;
2493 }
2494
2495 diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
2496 index 587d12829925..0444ebdda3f0 100644
2497 --- a/drivers/xen/manage.c
2498 +++ b/drivers/xen/manage.c
2499 @@ -283,9 +283,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
2500 /*
2501 * The Xenstore watch fires directly after registering it and
2502 * after a suspend/resume cycle. So ENOENT is no error but
2503 - * might happen in those cases.
2504 + * might happen in those cases. ERANGE is observed when we get
2505 + * an empty value (''), this happens when we acknowledge the
2506 + * request by writing '\0' below.
2507 */
2508 - if (err != -ENOENT)
2509 + if (err != -ENOENT && err != -ERANGE)
2510 pr_err("Error %d reading sysrq code in control/sysrq\n",
2511 err);
2512 xenbus_transaction_end(xbt, 1);
2513 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2514 index a39b1f0b0606..a0947f4a3e87 100644
2515 --- a/fs/btrfs/volumes.c
2516 +++ b/fs/btrfs/volumes.c
2517 @@ -4517,7 +4517,12 @@ again:
2518
2519 /* Now btrfs_update_device() will change the on-disk size. */
2520 ret = btrfs_update_device(trans, device);
2521 - btrfs_end_transaction(trans);
2522 + if (ret < 0) {
2523 + btrfs_abort_transaction(trans, ret);
2524 + btrfs_end_transaction(trans);
2525 + } else {
2526 + ret = btrfs_commit_transaction(trans);
2527 + }
2528 done:
2529 btrfs_free_path(path);
2530 if (ret) {
2531 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
2532 index b380e0871372..a2b2355e7f01 100644
2533 --- a/fs/cifs/cifs_unicode.c
2534 +++ b/fs/cifs/cifs_unicode.c
2535 @@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
2536 case SFM_LESSTHAN:
2537 *target = '<';
2538 break;
2539 - case SFM_SLASH:
2540 - *target = '\\';
2541 - break;
2542 case SFM_SPACE:
2543 *target = ' ';
2544 break;
2545 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2546 index b5a436583469..2e936f94f102 100644
2547 --- a/fs/cifs/cifssmb.c
2548 +++ b/fs/cifs/cifssmb.c
2549 @@ -589,10 +589,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
2550 }
2551
2552 count = 0;
2553 + /*
2554 + * We know that all the name entries in the protocols array
2555 + * are short (< 16 bytes anyway) and are NUL terminated.
2556 + */
2557 for (i = 0; i < CIFS_NUM_PROT; i++) {
2558 - strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
2559 - count += strlen(protocols[i].name) + 1;
2560 - /* null at end of source and target buffers anyway */
2561 + size_t len = strlen(protocols[i].name) + 1;
2562 +
2563 + memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
2564 + count += len;
2565 }
2566 inc_rfc1001_len(pSMB, count);
2567 pSMB->ByteCount = cpu_to_le16(count);
2568 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2569 index 460084a8eac5..bcab30d4a6c7 100644
2570 --- a/fs/cifs/misc.c
2571 +++ b/fs/cifs/misc.c
2572 @@ -398,9 +398,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2573 (struct smb_com_transaction_change_notify_rsp *)buf;
2574 struct file_notify_information *pnotify;
2575 __u32 data_offset = 0;
2576 + size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
2577 +
2578 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
2579 data_offset = le32_to_cpu(pSMBr->DataOffset);
2580
2581 + if (data_offset >
2582 + len - sizeof(struct file_notify_information)) {
2583 + cifs_dbg(FYI, "invalid data_offset %u\n",
2584 + data_offset);
2585 + return true;
2586 + }
2587 pnotify = (struct file_notify_information *)
2588 ((char *)&pSMBr->hdr.Protocol + data_offset);
2589 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
2590 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2591 index 759cbbf7b1af..4e5b05263e4a 100644
2592 --- a/fs/cifs/smb2ops.c
2593 +++ b/fs/cifs/smb2ops.c
2594 @@ -1239,7 +1239,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2595 }
2596
2597 srch_inf->entries_in_buffer = 0;
2598 - srch_inf->index_of_last_entry = 0;
2599 + srch_inf->index_of_last_entry = 2;
2600
2601 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2602 fid->volatile_fid, 0, srch_inf);
2603 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
2604 index 3e04279446e8..44e7d180ebec 100644
2605 --- a/fs/ocfs2/dlm/dlmmaster.c
2606 +++ b/fs/ocfs2/dlm/dlmmaster.c
2607 @@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
2608
2609 res->last_used = 0;
2610
2611 - spin_lock(&dlm->spinlock);
2612 + spin_lock(&dlm->track_lock);
2613 list_add_tail(&res->tracking, &dlm->tracking_list);
2614 - spin_unlock(&dlm->spinlock);
2615 + spin_unlock(&dlm->track_lock);
2616
2617 memset(res->lvb, 0, DLM_LVB_LEN);
2618 memset(res->refmap, 0, sizeof(res->refmap));
2619 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
2620 index 8a10506db993..d9468de3c951 100644
2621 --- a/fs/overlayfs/namei.c
2622 +++ b/fs/overlayfs/namei.c
2623 @@ -519,7 +519,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
2624 index = NULL;
2625 goto out;
2626 }
2627 - pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
2628 + pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
2629 "overlayfs: mount with '-o index=off' to disable inodes index.\n",
2630 d_inode(origin)->i_ino, name.len, name.name,
2631 err);
2632 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
2633 index d9a0edd4e57e..e1e743005583 100644
2634 --- a/fs/overlayfs/overlayfs.h
2635 +++ b/fs/overlayfs/overlayfs.h
2636 @@ -136,8 +136,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
2637 const void *value, size_t size, int flags)
2638 {
2639 int err = vfs_setxattr(dentry, name, value, size, flags);
2640 - pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
2641 - dentry, name, (int) size, (char *) value, flags, err);
2642 + pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
2643 + dentry, name, min((int)size, 48), value, size, flags, err);
2644 return err;
2645 }
2646
2647 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
2648 index f60ce2e04df0..afdc2533ce74 100644
2649 --- a/fs/overlayfs/util.c
2650 +++ b/fs/overlayfs/util.c
2651 @@ -438,7 +438,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
2652 struct dentry *upperdentry = ovl_dentry_upper(dentry);
2653 struct dentry *index = NULL;
2654 struct inode *inode;
2655 - struct qstr name;
2656 + struct qstr name = { };
2657 int err;
2658
2659 err = ovl_get_index_name(lowerdentry, &name);
2660 @@ -477,6 +477,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
2661 goto fail;
2662
2663 out:
2664 + kfree(name.name);
2665 dput(index);
2666 return;
2667
2668 diff --git a/fs/proc/base.c b/fs/proc/base.c
2669 index c5c42f3e33d1..9063738ff1f0 100644
2670 --- a/fs/proc/base.c
2671 +++ b/fs/proc/base.c
2672 @@ -431,6 +431,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
2673 int err;
2674 int i;
2675
2676 + /*
2677 + * The ability to racily run the kernel stack unwinder on a running task
2678 + * and then observe the unwinder output is scary; while it is useful for
2679 + * debugging kernel issues, it can also allow an attacker to leak kernel
2680 + * stack contents.
2681 + * Doing this in a manner that is at least safe from races would require
2682 + * some work to ensure that the remote task can not be scheduled; and
2683 + * even then, this would still expose the unwinder as local attack
2684 + * surface.
2685 + * Therefore, this interface is restricted to root.
2686 + */
2687 + if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
2688 + return -EACCES;
2689 +
2690 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
2691 if (!entries)
2692 return -ENOMEM;
2693 diff --git a/fs/xattr.c b/fs/xattr.c
2694 index be2ce57cd6ad..50029811fbe3 100644
2695 --- a/fs/xattr.c
2696 +++ b/fs/xattr.c
2697 @@ -951,17 +951,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
2698 int err = 0;
2699
2700 #ifdef CONFIG_FS_POSIX_ACL
2701 - if (inode->i_acl) {
2702 - err = xattr_list_one(&buffer, &remaining_size,
2703 - XATTR_NAME_POSIX_ACL_ACCESS);
2704 - if (err)
2705 - return err;
2706 - }
2707 - if (inode->i_default_acl) {
2708 - err = xattr_list_one(&buffer, &remaining_size,
2709 - XATTR_NAME_POSIX_ACL_DEFAULT);
2710 - if (err)
2711 - return err;
2712 + if (IS_POSIXACL(inode)) {
2713 + if (inode->i_acl) {
2714 + err = xattr_list_one(&buffer, &remaining_size,
2715 + XATTR_NAME_POSIX_ACL_ACCESS);
2716 + if (err)
2717 + return err;
2718 + }
2719 + if (inode->i_default_acl) {
2720 + err = xattr_list_one(&buffer, &remaining_size,
2721 + XATTR_NAME_POSIX_ACL_DEFAULT);
2722 + if (err)
2723 + return err;
2724 + }
2725 }
2726 #endif
2727
2728 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2729 index 450e2cd31ed6..a0ffc62e7677 100644
2730 --- a/kernel/bpf/verifier.c
2731 +++ b/kernel/bpf/verifier.c
2732 @@ -2076,6 +2076,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2733 u64 umin_val, umax_val;
2734 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2735
2736 + if (insn_bitness == 32) {
2737 + /* Relevant for 32-bit RSH: Information can propagate towards
2738 + * LSB, so it isn't sufficient to only truncate the output to
2739 + * 32 bits.
2740 + */
2741 + coerce_reg_to_size(dst_reg, 4);
2742 + coerce_reg_to_size(&src_reg, 4);
2743 + }
2744 +
2745 smin_val = src_reg.smin_value;
2746 smax_val = src_reg.smax_value;
2747 umin_val = src_reg.umin_value;
2748 @@ -2295,7 +2304,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2749 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2750 /* 32-bit ALU ops are (32,32)->32 */
2751 coerce_reg_to_size(dst_reg, 4);
2752 - coerce_reg_to_size(&src_reg, 4);
2753 }
2754
2755 __reg_deduce_bounds(dst_reg);
2756 diff --git a/mm/madvise.c b/mm/madvise.c
2757 index 751e97aa2210..576b753be428 100644
2758 --- a/mm/madvise.c
2759 +++ b/mm/madvise.c
2760 @@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
2761 new_flags |= VM_DONTDUMP;
2762 break;
2763 case MADV_DODUMP:
2764 - if (new_flags & VM_SPECIAL) {
2765 + if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
2766 error = -EINVAL;
2767 goto out;
2768 }
2769 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
2770 index e9c6aa3ed05b..3d0d12fbd8dd 100644
2771 --- a/net/mac80211/ibss.c
2772 +++ b/net/mac80211/ibss.c
2773 @@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
2774 if (len < IEEE80211_DEAUTH_FRAME_LEN)
2775 return;
2776
2777 - ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
2778 - mgmt->sa, mgmt->da, mgmt->bssid, reason);
2779 + ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2780 + ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
2781 sta_info_destroy_addr(sdata, mgmt->sa);
2782 }
2783
2784 @@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
2785 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
2786 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
2787
2788 - ibss_dbg(sdata,
2789 - "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
2790 - mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
2791 + ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2792 + ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
2793 + mgmt->bssid, auth_transaction);
2794
2795 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
2796 return;
2797 @@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2798 rx_timestamp = drv_get_tsf(local, sdata);
2799 }
2800
2801 - ibss_dbg(sdata,
2802 - "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
2803 + ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
2804 mgmt->sa, mgmt->bssid,
2805 - (unsigned long long)rx_timestamp,
2806 + (unsigned long long)rx_timestamp);
2807 + ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
2808 (unsigned long long)beacon_timestamp,
2809 (unsigned long long)(rx_timestamp - beacon_timestamp),
2810 jiffies);
2811 @@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
2812
2813 tx_last_beacon = drv_tx_last_beacon(local);
2814
2815 - ibss_dbg(sdata,
2816 - "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
2817 - mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
2818 + ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2819 + ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
2820 + mgmt->bssid, tx_last_beacon);
2821
2822 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
2823 return;
2824 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
2825 index 8aa1f5b6a051..8a51f94ec1ce 100644
2826 --- a/net/mac80211/main.c
2827 +++ b/net/mac80211/main.c
2828 @@ -255,8 +255,27 @@ static void ieee80211_restart_work(struct work_struct *work)
2829
2830 flush_work(&local->radar_detected_work);
2831 rtnl_lock();
2832 - list_for_each_entry(sdata, &local->interfaces, list)
2833 + list_for_each_entry(sdata, &local->interfaces, list) {
2834 + /*
2835 + * XXX: there may be more work for other vif types and even
2836 + * for station mode: a good thing would be to run most of
2837 + * the iface type's dependent _stop (ieee80211_mg_stop,
2838 + * ieee80211_ibss_stop) etc...
2839 + * For now, fix only the specific bug that was seen: race
2840 + * between csa_connection_drop_work and us.
2841 + */
2842 + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2843 + /*
2844 + * This worker is scheduled from the iface worker that
2845 + * runs on mac80211's workqueue, so we can't be
2846 + * scheduling this worker after the cancel right here.
2847 + * The exception is ieee80211_chswitch_done.
2848 + * Then we can have a race...
2849 + */
2850 + cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
2851 + }
2852 flush_delayed_work(&sdata->dec_tailroom_needed_wk);
2853 + }
2854 ieee80211_scan_cancel(local);
2855
2856 /* make sure any new ROC will consider local->in_reconfig */
2857 @@ -467,10 +486,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
2858 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
2859 IEEE80211_VHT_CAP_SHORT_GI_80 |
2860 IEEE80211_VHT_CAP_SHORT_GI_160 |
2861 - IEEE80211_VHT_CAP_RXSTBC_1 |
2862 - IEEE80211_VHT_CAP_RXSTBC_2 |
2863 - IEEE80211_VHT_CAP_RXSTBC_3 |
2864 - IEEE80211_VHT_CAP_RXSTBC_4 |
2865 + IEEE80211_VHT_CAP_RXSTBC_MASK |
2866 IEEE80211_VHT_CAP_TXSTBC |
2867 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2868 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2869 @@ -1171,6 +1187,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
2870 #if IS_ENABLED(CONFIG_IPV6)
2871 unregister_inet6addr_notifier(&local->ifa6_notifier);
2872 #endif
2873 + ieee80211_txq_teardown_flows(local);
2874
2875 rtnl_lock();
2876
2877 @@ -1199,7 +1216,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
2878 skb_queue_purge(&local->skb_queue);
2879 skb_queue_purge(&local->skb_queue_unreliable);
2880 skb_queue_purge(&local->skb_queue_tdls_chsw);
2881 - ieee80211_txq_teardown_flows(local);
2882
2883 destroy_workqueue(local->workqueue);
2884 wiphy_unregister(local->hw.wiphy);
2885 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
2886 index d6d3f316de4c..055ea36ff27b 100644
2887 --- a/net/mac80211/mesh_hwmp.c
2888 +++ b/net/mac80211/mesh_hwmp.c
2889 @@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
2890 forward = false;
2891 reply = true;
2892 target_metric = 0;
2893 +
2894 + if (SN_GT(target_sn, ifmsh->sn))
2895 + ifmsh->sn = target_sn;
2896 +
2897 if (time_after(jiffies, ifmsh->last_sn_update +
2898 net_traversal_jiffies(sdata)) ||
2899 time_before(jiffies, ifmsh->last_sn_update)) {
2900 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2901 index 052dbd4fa366..328ac10084e4 100644
2902 --- a/net/mac80211/mlme.c
2903 +++ b/net/mac80211/mlme.c
2904 @@ -988,6 +988,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
2905 */
2906
2907 if (sdata->reserved_chanctx) {
2908 + struct ieee80211_supported_band *sband = NULL;
2909 + struct sta_info *mgd_sta = NULL;
2910 + enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
2911 +
2912 /*
2913 * with multi-vif csa driver may call ieee80211_csa_finish()
2914 * many times while waiting for other interfaces to use their
2915 @@ -996,6 +1000,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
2916 if (sdata->reserved_ready)
2917 goto out;
2918
2919 + if (sdata->vif.bss_conf.chandef.width !=
2920 + sdata->csa_chandef.width) {
2921 + /*
2922 + * For managed interface, we need to also update the AP
2923 + * station bandwidth and align the rate scale algorithm
2924 + * on the bandwidth change. Here we only consider the
2925 + * bandwidth of the new channel definition (as channel
2926 + * switch flow does not have the full HT/VHT/HE
2927 + * information), assuming that if additional changes are
2928 + * required they would be done as part of the processing
2929 + * of the next beacon from the AP.
2930 + */
2931 + switch (sdata->csa_chandef.width) {
2932 + case NL80211_CHAN_WIDTH_20_NOHT:
2933 + case NL80211_CHAN_WIDTH_20:
2934 + default:
2935 + bw = IEEE80211_STA_RX_BW_20;
2936 + break;
2937 + case NL80211_CHAN_WIDTH_40:
2938 + bw = IEEE80211_STA_RX_BW_40;
2939 + break;
2940 + case NL80211_CHAN_WIDTH_80:
2941 + bw = IEEE80211_STA_RX_BW_80;
2942 + break;
2943 + case NL80211_CHAN_WIDTH_80P80:
2944 + case NL80211_CHAN_WIDTH_160:
2945 + bw = IEEE80211_STA_RX_BW_160;
2946 + break;
2947 + }
2948 +
2949 + mgd_sta = sta_info_get(sdata, ifmgd->bssid);
2950 + sband =
2951 + local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
2952 + }
2953 +
2954 + if (sdata->vif.bss_conf.chandef.width >
2955 + sdata->csa_chandef.width) {
2956 + mgd_sta->sta.bandwidth = bw;
2957 + rate_control_rate_update(local, sband, mgd_sta,
2958 + IEEE80211_RC_BW_CHANGED);
2959 + }
2960 +
2961 ret = ieee80211_vif_use_reserved_context(sdata);
2962 if (ret) {
2963 sdata_info(sdata,
2964 @@ -1006,6 +1052,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
2965 goto out;
2966 }
2967
2968 + if (sdata->vif.bss_conf.chandef.width <
2969 + sdata->csa_chandef.width) {
2970 + mgd_sta->sta.bandwidth = bw;
2971 + rate_control_rate_update(local, sband, mgd_sta,
2972 + IEEE80211_RC_BW_CHANGED);
2973 + }
2974 +
2975 goto out;
2976 }
2977
2978 @@ -1227,6 +1280,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
2979 cbss->beacon_interval));
2980 return;
2981 drop_connection:
2982 + /*
2983 + * This is just so that the disconnect flow will know that
2984 + * we were trying to switch channel and failed. In case the
2985 + * mode is 1 (we are not allowed to Tx), we will know not to
2986 + * send a deauthentication frame. Those two fields will be
2987 + * reset when the disconnection worker runs.
2988 + */
2989 + sdata->vif.csa_active = true;
2990 + sdata->csa_block_tx = csa_ie.mode;
2991 +
2992 ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
2993 mutex_unlock(&local->chanctx_mtx);
2994 mutex_unlock(&local->mtx);
2995 @@ -2397,6 +2460,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2996 struct ieee80211_local *local = sdata->local;
2997 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2998 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2999 + bool tx;
3000
3001 sdata_lock(sdata);
3002 if (!ifmgd->associated) {
3003 @@ -2404,6 +2468,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3004 return;
3005 }
3006
3007 + tx = !sdata->csa_block_tx;
3008 +
3009 /* AP is probably out of range (or not reachable for another reason) so
3010 * remove the bss struct for that AP.
3011 */
3012 @@ -2411,7 +2477,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3013
3014 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3015 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
3016 - true, frame_buf);
3017 + tx, frame_buf);
3018 mutex_lock(&local->mtx);
3019 sdata->vif.csa_active = false;
3020 ifmgd->csa_waiting_bcn = false;
3021 @@ -2422,7 +2488,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3022 }
3023 mutex_unlock(&local->mtx);
3024
3025 - ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
3026 + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
3027 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
3028
3029 sdata_unlock(sdata);
3030 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3031 index ccb65f18df5d..d8fddd88bf46 100644
3032 --- a/net/mac80211/tx.c
3033 +++ b/net/mac80211/tx.c
3034 @@ -3022,27 +3022,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
3035 }
3036
3037 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
3038 - struct sk_buff *skb, int headroom,
3039 - int *subframe_len)
3040 + struct sk_buff *skb, int headroom)
3041 {
3042 - int amsdu_len = *subframe_len + sizeof(struct ethhdr);
3043 - int padding = (4 - amsdu_len) & 3;
3044 -
3045 - if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
3046 + if (skb_headroom(skb) < headroom) {
3047 I802_DEBUG_INC(local->tx_expand_skb_head);
3048
3049 - if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
3050 + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
3051 wiphy_debug(local->hw.wiphy,
3052 "failed to reallocate TX buffer\n");
3053 return false;
3054 }
3055 }
3056
3057 - if (padding) {
3058 - *subframe_len += padding;
3059 - skb_put_zero(skb, padding);
3060 - }
3061 -
3062 return true;
3063 }
3064
3065 @@ -3066,8 +3057,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
3066 if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
3067 return true;
3068
3069 - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
3070 - &subframe_len))
3071 + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
3072 return false;
3073
3074 data = skb_push(skb, sizeof(*amsdu_hdr));
3075 @@ -3133,7 +3123,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3076 void *data;
3077 bool ret = false;
3078 unsigned int orig_len;
3079 - int n = 1, nfrags;
3080 + int n = 2, nfrags, pad = 0;
3081 + u16 hdrlen;
3082
3083 if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
3084 return false;
3085 @@ -3166,9 +3157,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3086 if (skb->len + head->len > max_amsdu_len)
3087 goto out;
3088
3089 - if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3090 - goto out;
3091 -
3092 nfrags = 1 + skb_shinfo(skb)->nr_frags;
3093 nfrags += 1 + skb_shinfo(head)->nr_frags;
3094 frag_tail = &skb_shinfo(head)->frag_list;
3095 @@ -3184,10 +3172,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3096 if (max_frags && nfrags > max_frags)
3097 goto out;
3098
3099 - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
3100 - &subframe_len))
3101 + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3102 goto out;
3103
3104 + /*
3105 + * Pad out the previous subframe to a multiple of 4 by adding the
3106 + * padding to the next one, that's being added. Note that head->len
3107 + * is the length of the full A-MSDU, but that works since each time
3108 + * we add a new subframe we pad out the previous one to a multiple
3109 + * of 4 and thus it no longer matters in the next round.
3110 + */
3111 + hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
3112 + if ((head->len - hdrlen) & 3)
3113 + pad = 4 - ((head->len - hdrlen) & 3);
3114 +
3115 + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
3116 + 2 + pad))
3117 + goto out_recalc;
3118 +
3119 ret = true;
3120 data = skb_push(skb, ETH_ALEN + 2);
3121 memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
3122 @@ -3197,15 +3199,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3123 memcpy(data, &len, 2);
3124 memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
3125
3126 + memset(skb_push(skb, pad), 0, pad);
3127 +
3128 head->len += skb->len;
3129 head->data_len += skb->len;
3130 *frag_tail = skb;
3131
3132 - flow->backlog += head->len - orig_len;
3133 - tin->backlog_bytes += head->len - orig_len;
3134 -
3135 - fq_recalc_backlog(fq, tin, flow);
3136 +out_recalc:
3137 + if (head->len != orig_len) {
3138 + flow->backlog += head->len - orig_len;
3139 + tin->backlog_bytes += head->len - orig_len;
3140
3141 + fq_recalc_backlog(fq, tin, flow);
3142 + }
3143 out:
3144 spin_unlock_bh(&fq->lock);
3145
3146 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3147 index 742aacb317e5..3ae365f92bff 100644
3148 --- a/net/netfilter/nf_tables_api.c
3149 +++ b/net/netfilter/nf_tables_api.c
3150 @@ -4250,6 +4250,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
3151 }
3152 set->ndeact++;
3153
3154 + nft_set_elem_deactivate(ctx->net, set, elem);
3155 nft_trans_elem_set(trans) = set;
3156 nft_trans_elem(trans) = *elem;
3157 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3158 diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
3159 index 57ef175dfbfa..504d5f730f4e 100644
3160 --- a/net/netfilter/xt_cluster.c
3161 +++ b/net/netfilter/xt_cluster.c
3162 @@ -133,6 +133,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
3163 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
3164 {
3165 struct xt_cluster_match_info *info = par->matchinfo;
3166 + int ret;
3167
3168 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
3169 pr_info("you have exceeded the maximum "
3170 @@ -145,7 +146,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
3171 "higher than the total number of nodes\n");
3172 return -EDOM;
3173 }
3174 - return 0;
3175 +
3176 + ret = nf_ct_netns_get(par->net, par->family);
3177 + if (ret < 0)
3178 + pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
3179 + par->family);
3180 + return ret;
3181 +}
3182 +
3183 +static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
3184 +{
3185 + nf_ct_netns_put(par->net, par->family);
3186 }
3187
3188 static struct xt_match xt_cluster_match __read_mostly = {
3189 @@ -154,6 +165,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
3190 .match = xt_cluster_mt,
3191 .checkentry = xt_cluster_mt_checkentry,
3192 .matchsize = sizeof(struct xt_cluster_match_info),
3193 + .destroy = xt_cluster_mt_destroy,
3194 .me = THIS_MODULE,
3195 };
3196
3197 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3198 index 753f3e73c498..3de415bca391 100644
3199 --- a/net/wireless/nl80211.c
3200 +++ b/net/wireless/nl80211.c
3201 @@ -11679,6 +11679,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
3202 return -EOPNOTSUPP;
3203
3204 if (!info->attrs[NL80211_ATTR_MDID] ||
3205 + !info->attrs[NL80211_ATTR_IE] ||
3206 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3207 return -EINVAL;
3208
3209 diff --git a/net/wireless/util.c b/net/wireless/util.c
3210 index c1238d582fd1..ca3361a3e750 100644
3211 --- a/net/wireless/util.c
3212 +++ b/net/wireless/util.c
3213 @@ -1449,7 +1449,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
3214 u8 *op_class)
3215 {
3216 u8 vht_opclass;
3217 - u16 freq = chandef->center_freq1;
3218 + u32 freq = chandef->center_freq1;
3219
3220 if (freq >= 2412 && freq <= 2472) {
3221 if (chandef->width > NL80211_CHAN_WIDTH_40)
3222 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3223 index dcc9e6551b51..fe5c741fcc6a 100644
3224 --- a/sound/pci/hda/patch_realtek.c
3225 +++ b/sound/pci/hda/patch_realtek.c
3226 @@ -6288,6 +6288,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3227 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
3228 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
3229 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3230 + SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
3231 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
3232 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
3233 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
3234 diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
3235 index 785f4e95148c..7a1039c15e7d 100644
3236 --- a/tools/hv/hv_fcopy_daemon.c
3237 +++ b/tools/hv/hv_fcopy_daemon.c
3238 @@ -233,6 +233,7 @@ int main(int argc, char *argv[])
3239 break;
3240
3241 default:
3242 + error = HV_E_FAIL;
3243 syslog(LOG_ERR, "Unknown operation: %d",
3244 buffer.hdr.operation);
3245
3246 diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
3247 index 32283d88701a..c0d653d36c0f 100755
3248 --- a/tools/kvm/kvm_stat/kvm_stat
3249 +++ b/tools/kvm/kvm_stat/kvm_stat
3250 @@ -724,13 +724,20 @@ class DebugfsProvider(Provider):
3251 if len(vms) == 0:
3252 self.do_read = False
3253
3254 - self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
3255 + self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
3256
3257 else:
3258 self.paths = []
3259 self.do_read = True
3260 self.reset()
3261
3262 + def _verify_paths(self):
3263 + """Remove invalid paths"""
3264 + for path in self.paths:
3265 + if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
3266 + self.paths.remove(path)
3267 + continue
3268 +
3269 def read(self, reset=0, by_guest=0):
3270 """Returns a dict with format:'file name / field -> current value'.
3271
3272 @@ -745,6 +752,7 @@ class DebugfsProvider(Provider):
3273 # If no debugfs filtering support is available, then don't read.
3274 if not self.do_read:
3275 return results
3276 + self._verify_paths()
3277
3278 paths = self.paths
3279 if self._pid == 0:
3280 @@ -1119,10 +1127,10 @@ class Tui(object):
3281 (x, term_width) = self.screen.getmaxyx()
3282 row = 2
3283 for line in text:
3284 - start = (term_width - len(line)) / 2
3285 + start = (term_width - len(line)) // 2
3286 self.screen.addstr(row, start, line)
3287 row += 1
3288 - self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
3289 + self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
3290 curses.A_STANDOUT)
3291 self.screen.getkey()
3292
3293 diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
3294 index 20e7d74d86cd..10a44e946f77 100644
3295 --- a/tools/perf/arch/powerpc/util/sym-handling.c
3296 +++ b/tools/perf/arch/powerpc/util/sym-handling.c
3297 @@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
3298
3299 #endif
3300
3301 -#if !defined(_CALL_ELF) || _CALL_ELF != 2
3302 int arch__choose_best_symbol(struct symbol *syma,
3303 struct symbol *symb __maybe_unused)
3304 {
3305 char *sym = syma->name;
3306
3307 +#if !defined(_CALL_ELF) || _CALL_ELF != 2
3308 /* Skip over any initial dot */
3309 if (*sym == '.')
3310 sym++;
3311 +#endif
3312
3313 /* Avoid "SyS" kernel syscall aliases */
3314 if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
3315 @@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
3316 return SYMBOL_A;
3317 }
3318
3319 +#if !defined(_CALL_ELF) || _CALL_ELF != 2
3320 /* Allow matching against dot variants */
3321 int arch__compare_symbol_names(const char *namea, const char *nameb)
3322 {
3323 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
3324 index 2227ee92d8e2..44c2f62b47a3 100644
3325 --- a/tools/perf/util/evsel.c
3326 +++ b/tools/perf/util/evsel.c
3327 @@ -259,8 +259,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
3328 {
3329 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
3330
3331 - if (evsel != NULL)
3332 - perf_evsel__init(evsel, attr, idx);
3333 + if (!evsel)
3334 + return NULL;
3335 + perf_evsel__init(evsel, attr, idx);
3336
3337 if (perf_evsel__is_bpf_output(evsel)) {
3338 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
3339 diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
3340 index e7d60d05596d..8f3b7ef221f2 100644
3341 --- a/tools/perf/util/trace-event-info.c
3342 +++ b/tools/perf/util/trace-event-info.c
3343 @@ -379,7 +379,7 @@ out:
3344
3345 static int record_saved_cmdline(void)
3346 {
3347 - unsigned int size;
3348 + unsigned long long size;
3349 char *path;
3350 struct stat st;
3351 int ret, err = 0;
3352 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3353 index 1512086c8cb8..7a1b20ec5216 100644
3354 --- a/tools/power/x86/turbostat/turbostat.c
3355 +++ b/tools/power/x86/turbostat/turbostat.c
3356 @@ -1485,7 +1485,7 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
3357 if (get_msr(cpu, mp->msr_num, counterp))
3358 return -1;
3359 } else {
3360 - char path[128];
3361 + char path[128 + PATH_BYTES];
3362
3363 if (mp->flags & SYSFS_PERCPU) {
3364 sprintf(path, "/sys/devices/system/cpu/cpu%d/%s",
3365 diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
3366 index e92903fc7113..6d5bcbaf6193 100644
3367 --- a/tools/vm/page-types.c
3368 +++ b/tools/vm/page-types.c
3369 @@ -155,12 +155,6 @@ static const char * const page_flag_names[] = {
3370 };
3371
3372
3373 -static const char * const debugfs_known_mountpoints[] = {
3374 - "/sys/kernel/debug",
3375 - "/debug",
3376 - 0,
3377 -};
3378 -
3379 /*
3380 * data structures
3381 */
3382 diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
3383 index b0b7ef6d0de1..3fe093254385 100644
3384 --- a/tools/vm/slabinfo.c
3385 +++ b/tools/vm/slabinfo.c
3386 @@ -30,8 +30,8 @@ struct slabinfo {
3387 int alias;
3388 int refs;
3389 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
3390 - int hwcache_align, object_size, objs_per_slab;
3391 - int sanity_checks, slab_size, store_user, trace;
3392 + unsigned int hwcache_align, object_size, objs_per_slab;
3393 + unsigned int sanity_checks, slab_size, store_user, trace;
3394 int order, poison, reclaim_account, red_zone;
3395 unsigned long partial, objects, slabs, objects_partial, objects_total;
3396 unsigned long alloc_fastpath, alloc_slowpath;