Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0231-4.9.132-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3233 - (show annotations) (download)
Thu Oct 18 08:37:18 2018 UTC (5 years, 6 months ago) by niro
File size: 65172 byte(s)
-linux-4.9.132
1 diff --git a/Makefile b/Makefile
2 index 73c4e9a8c127..a46c9788ca67 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 131
9 +SUBLEVEL = 132
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
14 index 54b54da6384c..49112f76710c 100644
15 --- a/arch/arc/include/asm/atomic.h
16 +++ b/arch/arc/include/asm/atomic.h
17 @@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
18 "1: llock %[orig], [%[ctr]] \n" \
19 " " #asm_op " %[val], %[orig], %[i] \n" \
20 " scond %[val], [%[ctr]] \n" \
21 - " \n" \
22 + " bnz 1b \n" \
23 : [val] "=&r" (val), \
24 [orig] "=&r" (orig) \
25 : [ctr] "r" (&v->counter), \
26 diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
27 index 1b5e0e843c3a..7e2b3e360086 100644
28 --- a/arch/arm64/include/asm/jump_label.h
29 +++ b/arch/arm64/include/asm/jump_label.h
30 @@ -28,7 +28,7 @@
31
32 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
33 {
34 - asm goto("1: nop\n\t"
35 + asm_volatile_goto("1: nop\n\t"
36 ".pushsection __jump_table, \"aw\"\n\t"
37 ".align 3\n\t"
38 ".quad 1b, %l[l_yes], %c0\n\t"
39 @@ -42,7 +42,7 @@ l_yes:
40
41 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
42 {
43 - asm goto("1: b %l[l_yes]\n\t"
44 + asm_volatile_goto("1: b %l[l_yes]\n\t"
45 ".pushsection __jump_table, \"aw\"\n\t"
46 ".align 3\n\t"
47 ".quad 1b, %l[l_yes], %c0\n\t"
48 diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
49 index 5e4a59b3ec1b..2691a1857d20 100644
50 --- a/arch/hexagon/include/asm/bitops.h
51 +++ b/arch/hexagon/include/asm/bitops.h
52 @@ -211,7 +211,7 @@ static inline long ffz(int x)
53 * This is defined the same way as ffs.
54 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
55 */
56 -static inline long fls(int x)
57 +static inline int fls(int x)
58 {
59 int r;
60
61 @@ -232,7 +232,7 @@ static inline long fls(int x)
62 * the libc and compiler builtin ffs routines, therefore
63 * differs in spirit from the above ffz (man ffs).
64 */
65 -static inline long ffs(int x)
66 +static inline int ffs(int x)
67 {
68 int r;
69
70 diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
71 index b9017785fb71..0e2be48dbf07 100644
72 --- a/arch/hexagon/kernel/dma.c
73 +++ b/arch/hexagon/kernel/dma.c
74 @@ -68,7 +68,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
75 panic("Can't create %s() memory pool!", __func__);
76 else
77 gen_pool_add(coherent_pool,
78 - pfn_to_virt(max_low_pfn),
79 + (unsigned long)pfn_to_virt(max_low_pfn),
80 hexagon_coherent_pool_size, -1);
81 }
82
83 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
84 index 05f09ae82587..915e89fcd946 100644
85 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
86 +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
87 @@ -314,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
88 unsigned long pp, key;
89 unsigned long v, gr;
90 __be64 *hptep;
91 - int index;
92 + long int index;
93 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
94
95 /* Get SLB entry */
96 diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
97 index 2c3c7abf678b..10c1a5c448e5 100644
98 --- a/arch/x86/events/intel/lbr.c
99 +++ b/arch/x86/events/intel/lbr.c
100 @@ -1195,4 +1195,8 @@ void intel_pmu_lbr_init_knl(void)
101
102 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
103 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
104 +
105 + /* Knights Landing does have MISPREDICT bit */
106 + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
107 + x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
108 }
109 diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
110 index 625ee50fd78b..decaed448ebb 100644
111 --- a/drivers/crypto/mxs-dcp.c
112 +++ b/drivers/crypto/mxs-dcp.c
113 @@ -63,7 +63,7 @@ struct dcp {
114 struct dcp_coherent_block *coh;
115
116 struct completion completion[DCP_MAX_CHANS];
117 - struct mutex mutex[DCP_MAX_CHANS];
118 + spinlock_t lock[DCP_MAX_CHANS];
119 struct task_struct *thread[DCP_MAX_CHANS];
120 struct crypto_queue queue[DCP_MAX_CHANS];
121 };
122 @@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
123
124 int ret;
125
126 - do {
127 - __set_current_state(TASK_INTERRUPTIBLE);
128 + while (!kthread_should_stop()) {
129 + set_current_state(TASK_INTERRUPTIBLE);
130
131 - mutex_lock(&sdcp->mutex[chan]);
132 + spin_lock(&sdcp->lock[chan]);
133 backlog = crypto_get_backlog(&sdcp->queue[chan]);
134 arq = crypto_dequeue_request(&sdcp->queue[chan]);
135 - mutex_unlock(&sdcp->mutex[chan]);
136 + spin_unlock(&sdcp->lock[chan]);
137 +
138 + if (!backlog && !arq) {
139 + schedule();
140 + continue;
141 + }
142 +
143 + set_current_state(TASK_RUNNING);
144
145 if (backlog)
146 backlog->complete(backlog, -EINPROGRESS);
147 @@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
148 if (arq) {
149 ret = mxs_dcp_aes_block_crypt(arq);
150 arq->complete(arq, ret);
151 - continue;
152 }
153 -
154 - schedule();
155 - } while (!kthread_should_stop());
156 + }
157
158 return 0;
159 }
160 @@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
161 rctx->ecb = ecb;
162 actx->chan = DCP_CHAN_CRYPTO;
163
164 - mutex_lock(&sdcp->mutex[actx->chan]);
165 + spin_lock(&sdcp->lock[actx->chan]);
166 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
167 - mutex_unlock(&sdcp->mutex[actx->chan]);
168 + spin_unlock(&sdcp->lock[actx->chan]);
169
170 wake_up_process(sdcp->thread[actx->chan]);
171
172 @@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
173 struct ahash_request *req;
174 int ret, fini;
175
176 - do {
177 - __set_current_state(TASK_INTERRUPTIBLE);
178 + while (!kthread_should_stop()) {
179 + set_current_state(TASK_INTERRUPTIBLE);
180
181 - mutex_lock(&sdcp->mutex[chan]);
182 + spin_lock(&sdcp->lock[chan]);
183 backlog = crypto_get_backlog(&sdcp->queue[chan]);
184 arq = crypto_dequeue_request(&sdcp->queue[chan]);
185 - mutex_unlock(&sdcp->mutex[chan]);
186 + spin_unlock(&sdcp->lock[chan]);
187 +
188 + if (!backlog && !arq) {
189 + schedule();
190 + continue;
191 + }
192 +
193 + set_current_state(TASK_RUNNING);
194
195 if (backlog)
196 backlog->complete(backlog, -EINPROGRESS);
197 @@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
198 ret = dcp_sha_req_to_buf(arq);
199 fini = rctx->fini;
200 arq->complete(arq, ret);
201 - if (!fini)
202 - continue;
203 }
204 -
205 - schedule();
206 - } while (!kthread_should_stop());
207 + }
208
209 return 0;
210 }
211 @@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
212 rctx->init = 1;
213 }
214
215 - mutex_lock(&sdcp->mutex[actx->chan]);
216 + spin_lock(&sdcp->lock[actx->chan]);
217 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
218 - mutex_unlock(&sdcp->mutex[actx->chan]);
219 + spin_unlock(&sdcp->lock[actx->chan]);
220
221 wake_up_process(sdcp->thread[actx->chan]);
222 mutex_unlock(&actx->mutex);
223 @@ -979,7 +986,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
224 platform_set_drvdata(pdev, sdcp);
225
226 for (i = 0; i < DCP_MAX_CHANS; i++) {
227 - mutex_init(&sdcp->mutex[i]);
228 + spin_lock_init(&sdcp->lock[i]);
229 init_completion(&sdcp->completion[i]);
230 crypto_init_queue(&sdcp->queue[i], 50);
231 }
232 diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
233 index 640c3fc870fd..ad9d6fbc2b8a 100644
234 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
235 +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
236 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
237 struct adf_hw_device_data *hw_data;
238 char name[ADF_DEVICE_NAME_LENGTH];
239 unsigned int i, bar_nr;
240 - int ret, bar_mask;
241 + unsigned long bar_mask;
242 + int ret;
243
244 switch (ent->device) {
245 case ADF_C3XXX_PCI_DEVICE_ID:
246 @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
247 /* Find and map all the device's BARS */
248 i = 0;
249 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
250 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
251 - ADF_PCI_MAX_BARS * 2) {
252 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
253 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
254
255 bar->base_addr = pci_resource_start(pdev, bar_nr);
256 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
257 index 949d77b79fbe..0dd8d2dc2ec1 100644
258 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
259 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
260 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
261 struct adf_hw_device_data *hw_data;
262 char name[ADF_DEVICE_NAME_LENGTH];
263 unsigned int i, bar_nr;
264 - int ret, bar_mask;
265 + unsigned long bar_mask;
266 + int ret;
267
268 switch (ent->device) {
269 case ADF_C3XXXIOV_PCI_DEVICE_ID:
270 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
271 /* Find and map all the device's BARS */
272 i = 0;
273 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
274 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
275 - ADF_PCI_MAX_BARS * 2) {
276 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
277 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
278
279 bar->base_addr = pci_resource_start(pdev, bar_nr);
280 diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
281 index 5b2d78a5b5aa..dcdb94cd7163 100644
282 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c
283 +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
284 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
285 struct adf_hw_device_data *hw_data;
286 char name[ADF_DEVICE_NAME_LENGTH];
287 unsigned int i, bar_nr;
288 - int ret, bar_mask;
289 + unsigned long bar_mask;
290 + int ret;
291
292 switch (ent->device) {
293 case ADF_C62X_PCI_DEVICE_ID:
294 @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
295 /* Find and map all the device's BARS */
296 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
297 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
298 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
299 - ADF_PCI_MAX_BARS * 2) {
300 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
301 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
302
303 bar->base_addr = pci_resource_start(pdev, bar_nr);
304 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
305 index 7540ce13b0d0..cd9e63468b18 100644
306 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
307 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
308 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
309 struct adf_hw_device_data *hw_data;
310 char name[ADF_DEVICE_NAME_LENGTH];
311 unsigned int i, bar_nr;
312 - int ret, bar_mask;
313 + unsigned long bar_mask;
314 + int ret;
315
316 switch (ent->device) {
317 case ADF_C62XIOV_PCI_DEVICE_ID:
318 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
319 /* Find and map all the device's BARS */
320 i = 0;
321 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
322 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
323 - ADF_PCI_MAX_BARS * 2) {
324 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
325 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
326
327 bar->base_addr = pci_resource_start(pdev, bar_nr);
328 diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
329 index 4d2de2838451..3417443f08a2 100644
330 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
331 +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
332 @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
333 struct adf_hw_device_data *hw_data;
334 char name[ADF_DEVICE_NAME_LENGTH];
335 unsigned int i, bar_nr;
336 - int ret, bar_mask;
337 + unsigned long bar_mask;
338 + int ret;
339
340 switch (ent->device) {
341 case ADF_DH895XCC_PCI_DEVICE_ID:
342 @@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
343 /* Find and map all the device's BARS */
344 i = 0;
345 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
346 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
347 - ADF_PCI_MAX_BARS * 2) {
348 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
349 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
350
351 bar->base_addr = pci_resource_start(pdev, bar_nr);
352 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
353 index 60df98632fa2..15de9cbed3bf 100644
354 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
355 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
356 @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
357 struct adf_hw_device_data *hw_data;
358 char name[ADF_DEVICE_NAME_LENGTH];
359 unsigned int i, bar_nr;
360 - int ret, bar_mask;
361 + unsigned long bar_mask;
362 + int ret;
363
364 switch (ent->device) {
365 case ADF_DH895XCCIOV_PCI_DEVICE_ID:
366 @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
367 /* Find and map all the device's BARS */
368 i = 0;
369 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
370 - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
371 - ADF_PCI_MAX_BARS * 2) {
372 + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
373 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
374
375 bar->base_addr = pci_resource_start(pdev, bar_nr);
376 diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
377 index c0f718b12317..c85407abc201 100644
378 --- a/drivers/gpio/gpio-adp5588.c
379 +++ b/drivers/gpio/gpio-adp5588.c
380 @@ -41,6 +41,8 @@ struct adp5588_gpio {
381 uint8_t int_en[3];
382 uint8_t irq_mask[3];
383 uint8_t irq_stat[3];
384 + uint8_t int_input_en[3];
385 + uint8_t int_lvl_cached[3];
386 };
387
388 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
389 @@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
390 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
391 int i;
392
393 - for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
394 + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
395 + if (dev->int_input_en[i]) {
396 + mutex_lock(&dev->lock);
397 + dev->dir[i] &= ~dev->int_input_en[i];
398 + dev->int_input_en[i] = 0;
399 + adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
400 + dev->dir[i]);
401 + mutex_unlock(&dev->lock);
402 + }
403 +
404 + if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
405 + dev->int_lvl_cached[i] = dev->int_lvl[i];
406 + adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
407 + dev->int_lvl[i]);
408 + }
409 +
410 if (dev->int_en[i] ^ dev->irq_mask[i]) {
411 dev->int_en[i] = dev->irq_mask[i];
412 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
413 dev->int_en[i]);
414 }
415 + }
416
417 mutex_unlock(&dev->irq_lock);
418 }
419 @@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
420 else
421 return -EINVAL;
422
423 - adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
424 - adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
425 - dev->int_lvl[bank]);
426 + dev->int_input_en[bank] |= bit;
427
428 return 0;
429 }
430 diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
431 index 193f15d50bba..aac84329c759 100644
432 --- a/drivers/gpio/gpiolib-of.c
433 +++ b/drivers/gpio/gpiolib-of.c
434 @@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
435 struct of_phandle_args *gpiospec = data;
436
437 return chip->gpiodev->dev.of_node == gpiospec->np &&
438 + chip->of_xlate &&
439 chip->of_xlate(chip, gpiospec, NULL) >= 0;
440 }
441
442 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
443 index dd0076497463..2ec402ae14de 100644
444 --- a/drivers/gpio/gpiolib.c
445 +++ b/drivers/gpio/gpiolib.c
446 @@ -471,7 +471,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
447 if (ret)
448 goto out_free_descs;
449 lh->descs[i] = desc;
450 - count = i;
451 + count = i + 1;
452
453 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
454 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
455 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
456 index a410c0db8a08..6a1b81e2b727 100644
457 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
458 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
459 @@ -161,7 +161,8 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
460 }
461
462 /* load and execute some other ucode image (bios therm?) */
463 - return pmu_load(init, 0x01, post, NULL, NULL);
464 + pmu_load(init, 0x01, post, NULL, NULL);
465 + return 0;
466 }
467
468 static const struct nvkm_devinit_func
469 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
470 index 2e046082210f..65a0c79f212e 100644
471 --- a/drivers/hid/hid-apple.c
472 +++ b/drivers/hid/hid-apple.c
473 @@ -333,7 +333,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
474 struct hid_field *field, struct hid_usage *usage,
475 unsigned long **bit, int *max)
476 {
477 - if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
478 + if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
479 + usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
480 /* The fn key on Apple USB keyboards */
481 set_bit(EV_REP, hi->input->evbit);
482 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
483 @@ -476,6 +477,12 @@ static const struct hid_device_id apple_devices[] = {
484 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
485 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
486 .driver_data = APPLE_HAS_FN },
487 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
488 + .driver_data = APPLE_HAS_FN },
489 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
490 + .driver_data = APPLE_HAS_FN },
491 + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
492 + .driver_data = APPLE_HAS_FN },
493 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
494 .driver_data = APPLE_HAS_FN },
495 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
496 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
497 index de64cd33590a..8913f357e78f 100644
498 --- a/drivers/hid/hid-ids.h
499 +++ b/drivers/hid/hid-ids.h
500 @@ -83,6 +83,7 @@
501 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
502
503 #define USB_VENDOR_ID_APPLE 0x05ac
504 +#define BT_VENDOR_ID_APPLE 0x004c
505 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
506 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
507 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
508 @@ -152,6 +153,7 @@
509 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
510 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
511 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
512 +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
513 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
514 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
515 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
516 @@ -888,6 +890,7 @@
517 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
518 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621
519 #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
520 +#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
521 #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
522 #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
523 #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
524 diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
525 index 39e642686ff0..683861f324e3 100644
526 --- a/drivers/hid/hid-saitek.c
527 +++ b/drivers/hid/hid-saitek.c
528 @@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
529 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
530 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
531 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
532 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
533 + .driver_data = SAITEK_RELEASE_MODE_RAT7 },
534 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
535 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
536 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
537 diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
538 index db9105e52c79..0da4991dd356 100644
539 --- a/drivers/i2c/busses/i2c-uniphier-f.c
540 +++ b/drivers/i2c/busses/i2c-uniphier-f.c
541 @@ -400,11 +400,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
542 return ret;
543
544 for (msg = msgs; msg < emsg; msg++) {
545 - /* If next message is read, skip the stop condition */
546 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
547 - /* but, force it if I2C_M_STOP is set */
548 - if (msg->flags & I2C_M_STOP)
549 - stop = true;
550 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */
551 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
552
553 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
554 if (ret)
555 diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
556 index 56e92af46ddc..fdfcee9230e4 100644
557 --- a/drivers/i2c/busses/i2c-uniphier.c
558 +++ b/drivers/i2c/busses/i2c-uniphier.c
559 @@ -247,11 +247,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
560 return ret;
561
562 for (msg = msgs; msg < emsg; msg++) {
563 - /* If next message is read, skip the stop condition */
564 - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
565 - /* but, force it if I2C_M_STOP is set */
566 - if (msg->flags & I2C_M_STOP)
567 - stop = true;
568 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */
569 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
570
571 ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
572 if (ret)
573 diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
574 index 3bef6d4ffe6f..0fd0d82f80d2 100644
575 --- a/drivers/infiniband/core/ucma.c
576 +++ b/drivers/infiniband/core/ucma.c
577 @@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
578 static DEFINE_IDR(ctx_idr);
579 static DEFINE_IDR(multicast_idr);
580
581 +static const struct file_operations ucma_fops;
582 +
583 static inline struct ucma_context *_ucma_find_context(int id,
584 struct ucma_file *file)
585 {
586 @@ -1545,6 +1547,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
587 f = fdget(cmd.fd);
588 if (!f.file)
589 return -ENOENT;
590 + if (f.file->f_op != &ucma_fops) {
591 + ret = -EINVAL;
592 + goto file_put;
593 + }
594
595 /* Validate current fd and prevent destruction of id. */
596 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
597 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
598 index ee75e3510be6..3f389b267e04 100644
599 --- a/drivers/md/dm-raid.c
600 +++ b/drivers/md/dm-raid.c
601 @@ -2880,6 +2880,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
602 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
603 rs_set_new(rs);
604 } else if (rs_is_recovering(rs)) {
605 + /* Rebuild particular devices */
606 + if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
607 + set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
608 + rs_setup_recovery(rs, MaxSector);
609 + }
610 /* A recovering raid set may be resized */
611 ; /* skip setup rs */
612 } else if (rs_is_reshaping(rs)) {
613 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
614 index e976f4f39334..149fbac97cb6 100644
615 --- a/drivers/md/dm-thin-metadata.c
616 +++ b/drivers/md/dm-thin-metadata.c
617 @@ -189,6 +189,12 @@ struct dm_pool_metadata {
618 unsigned long flags;
619 sector_t data_block_size;
620
621 + /*
622 + * We reserve a section of the metadata for commit overhead.
623 + * All reported space does *not* include this.
624 + */
625 + dm_block_t metadata_reserve;
626 +
627 /*
628 * Set if a transaction has to be aborted but the attempt to roll back
629 * to the previous (good) transaction failed. The only pool metadata
630 @@ -827,6 +833,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
631 return dm_tm_commit(pmd->tm, sblock);
632 }
633
634 +static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
635 +{
636 + int r;
637 + dm_block_t total;
638 + dm_block_t max_blocks = 4096; /* 16M */
639 +
640 + r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
641 + if (r) {
642 + DMERR("could not get size of metadata device");
643 + pmd->metadata_reserve = max_blocks;
644 + } else
645 + pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
646 +}
647 +
648 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
649 sector_t data_block_size,
650 bool format_device)
651 @@ -860,6 +880,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
652 return ERR_PTR(r);
653 }
654
655 + __set_metadata_reserve(pmd);
656 +
657 return pmd;
658 }
659
660 @@ -1831,6 +1853,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
661 down_read(&pmd->root_lock);
662 if (!pmd->fail_io)
663 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
664 +
665 + if (!r) {
666 + if (*result < pmd->metadata_reserve)
667 + *result = 0;
668 + else
669 + *result -= pmd->metadata_reserve;
670 + }
671 up_read(&pmd->root_lock);
672
673 return r;
674 @@ -1943,8 +1972,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
675 int r = -EINVAL;
676
677 down_write(&pmd->root_lock);
678 - if (!pmd->fail_io)
679 + if (!pmd->fail_io) {
680 r = __resize_space_map(pmd->metadata_sm, new_count);
681 + if (!r)
682 + __set_metadata_reserve(pmd);
683 + }
684 up_write(&pmd->root_lock);
685
686 return r;
687 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
688 index a952ad890f32..81309d7836c5 100644
689 --- a/drivers/md/dm-thin.c
690 +++ b/drivers/md/dm-thin.c
691 @@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
692 enum pool_mode {
693 PM_WRITE, /* metadata may be changed */
694 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
695 +
696 + /*
697 + * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
698 + */
699 + PM_OUT_OF_METADATA_SPACE,
700 PM_READ_ONLY, /* metadata may not be changed */
701 +
702 PM_FAIL, /* all I/O fails */
703 };
704
705 @@ -1386,7 +1392,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
706
707 static void requeue_bios(struct pool *pool);
708
709 -static void check_for_space(struct pool *pool)
710 +static bool is_read_only_pool_mode(enum pool_mode mode)
711 +{
712 + return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
713 +}
714 +
715 +static bool is_read_only(struct pool *pool)
716 +{
717 + return is_read_only_pool_mode(get_pool_mode(pool));
718 +}
719 +
720 +static void check_for_metadata_space(struct pool *pool)
721 +{
722 + int r;
723 + const char *ooms_reason = NULL;
724 + dm_block_t nr_free;
725 +
726 + r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
727 + if (r)
728 + ooms_reason = "Could not get free metadata blocks";
729 + else if (!nr_free)
730 + ooms_reason = "No free metadata blocks";
731 +
732 + if (ooms_reason && !is_read_only(pool)) {
733 + DMERR("%s", ooms_reason);
734 + set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
735 + }
736 +}
737 +
738 +static void check_for_data_space(struct pool *pool)
739 {
740 int r;
741 dm_block_t nr_free;
742 @@ -1412,14 +1446,16 @@ static int commit(struct pool *pool)
743 {
744 int r;
745
746 - if (get_pool_mode(pool) >= PM_READ_ONLY)
747 + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
748 return -EINVAL;
749
750 r = dm_pool_commit_metadata(pool->pmd);
751 if (r)
752 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
753 - else
754 - check_for_space(pool);
755 + else {
756 + check_for_metadata_space(pool);
757 + check_for_data_space(pool);
758 + }
759
760 return r;
761 }
762 @@ -1485,6 +1521,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
763 return r;
764 }
765
766 + r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
767 + if (r) {
768 + metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
769 + return r;
770 + }
771 +
772 + if (!free_blocks) {
773 + /* Let's commit before we use up the metadata reserve. */
774 + r = commit(pool);
775 + if (r)
776 + return r;
777 + }
778 +
779 return 0;
780 }
781
782 @@ -1516,6 +1565,7 @@ static int should_error_unserviceable_bio(struct pool *pool)
783 case PM_OUT_OF_DATA_SPACE:
784 return pool->pf.error_if_no_space ? -ENOSPC : 0;
785
786 + case PM_OUT_OF_METADATA_SPACE:
787 case PM_READ_ONLY:
788 case PM_FAIL:
789 return -EIO;
790 @@ -2479,8 +2529,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
791 error_retry_list(pool);
792 break;
793
794 + case PM_OUT_OF_METADATA_SPACE:
795 case PM_READ_ONLY:
796 - if (old_mode != new_mode)
797 + if (!is_read_only_pool_mode(old_mode))
798 notify_of_pool_mode_change(pool, "read-only");
799 dm_pool_metadata_read_only(pool->pmd);
800 pool->process_bio = process_bio_read_only;
801 @@ -3418,6 +3469,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
802 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
803 dm_device_name(pool->pool_md),
804 sb_metadata_dev_size, metadata_dev_size);
805 +
806 + if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
807 + set_pool_mode(pool, PM_WRITE);
808 +
809 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
810 if (r) {
811 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
812 @@ -3721,7 +3776,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
813 struct pool_c *pt = ti->private;
814 struct pool *pool = pt->pool;
815
816 - if (get_pool_mode(pool) >= PM_READ_ONLY) {
817 + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
818 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
819 dm_device_name(pool->pool_md));
820 return -EOPNOTSUPP;
821 @@ -3795,6 +3850,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
822 dm_block_t nr_blocks_data;
823 dm_block_t nr_blocks_metadata;
824 dm_block_t held_root;
825 + enum pool_mode mode;
826 char buf[BDEVNAME_SIZE];
827 char buf2[BDEVNAME_SIZE];
828 struct pool_c *pt = ti->private;
829 @@ -3865,9 +3921,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
830 else
831 DMEMIT("- ");
832
833 - if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
834 + mode = get_pool_mode(pool);
835 + if (mode == PM_OUT_OF_DATA_SPACE)
836 DMEMIT("out_of_data_space ");
837 - else if (pool->pf.mode == PM_READ_ONLY)
838 + else if (is_read_only_pool_mode(mode))
839 DMEMIT("ro ");
840 else
841 DMEMIT("rw ");
842 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
843 index 6da66c3acd46..b3046063402c 100644
844 --- a/drivers/md/raid10.c
845 +++ b/drivers/md/raid10.c
846 @@ -4381,11 +4381,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
847 allow_barrier(conf);
848 }
849
850 + raise_barrier(conf, 0);
851 read_more:
852 /* Now schedule reads for blocks from sector_nr to last */
853 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
854 r10_bio->state = 0;
855 - raise_barrier(conf, sectors_done != 0);
856 + raise_barrier(conf, 1);
857 atomic_set(&r10_bio->remaining, 0);
858 r10_bio->mddev = mddev;
859 r10_bio->sector = sector_nr;
860 @@ -4492,6 +4493,8 @@ bio_full:
861 if (sector_nr <= last)
862 goto read_more;
863
864 + lower_barrier(conf);
865 +
866 /* Now that we have done the whole section we can
867 * update reshape_progress
868 */
869 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
870 index 0d9ce08ee3a9..1d92e034febc 100644
871 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
872 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
873 @@ -422,7 +422,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
874 return -ENOMEM;
875 }
876
877 - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
878 + dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
879 DMA_FROM_DEVICE);
880 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
881 u64_stats_update_begin(&rx_ring->syncp);
882 @@ -439,7 +439,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
883 rx_info->page_offset = 0;
884 ena_buf = &rx_info->ena_buf;
885 ena_buf->paddr = dma;
886 - ena_buf->len = PAGE_SIZE;
887 + ena_buf->len = ENA_PAGE_SIZE;
888
889 return 0;
890 }
891 @@ -456,7 +456,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
892 return;
893 }
894
895 - dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
896 + dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
897 DMA_FROM_DEVICE);
898
899 __free_page(page);
900 @@ -849,10 +849,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
901 do {
902 dma_unmap_page(rx_ring->dev,
903 dma_unmap_addr(&rx_info->ena_buf, paddr),
904 - PAGE_SIZE, DMA_FROM_DEVICE);
905 + ENA_PAGE_SIZE, DMA_FROM_DEVICE);
906
907 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
908 - rx_info->page_offset, len, PAGE_SIZE);
909 + rx_info->page_offset, len, ENA_PAGE_SIZE);
910
911 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
912 "rx skb updated. len %d. data_len %d\n",
913 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
914 index c5eaf7616939..008f2d594d40 100644
915 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
916 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
917 @@ -321,4 +321,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
918
919 int ena_get_sset_count(struct net_device *netdev, int sset);
920
921 +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
922 + * driver passas 0.
923 + * Since the max packet size the ENA handles is ~9kB limit the buffer length to
924 + * 16kB.
925 + */
926 +#if PAGE_SIZE > SZ_16K
927 +#define ENA_PAGE_SIZE SZ_16K
928 +#else
929 +#define ENA_PAGE_SIZE PAGE_SIZE
930 +#endif
931 +
932 #endif /* !(ENA_H) */
933 diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
934 index ec09fcece711..2e1585635083 100644
935 --- a/drivers/net/ethernet/cadence/macb.c
936 +++ b/drivers/net/ethernet/cadence/macb.c
937 @@ -517,7 +517,7 @@ static int macb_halt_tx(struct macb *bp)
938 if (!(status & MACB_BIT(TGO)))
939 return 0;
940
941 - usleep_range(10, 250);
942 + udelay(250);
943 } while (time_before(halt_time, timeout));
944
945 return -ETIMEDOUT;
946 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
947 index 6be0cae44e9b..4cd163390dcc 100644
948 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
949 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
950 @@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
951 }
952
953 if (h->dev->ops->adjust_link) {
954 + netif_carrier_off(net_dev);
955 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
956 + netif_carrier_on(net_dev);
957 return 0;
958 }
959
960 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
961 index a9dbc28f6b97..524fff2b3dc6 100644
962 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
963 +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
964 @@ -288,16 +288,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
965 }
966 }
967
968 -static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
969 +static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
970 {
971 - return (u16)((dev->pdev->bus->number << 8) |
972 + return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
973 + (dev->pdev->bus->number << 8) |
974 PCI_SLOT(dev->pdev->devfn));
975 }
976
977 /* Must be called with intf_mutex held */
978 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
979 {
980 - u16 pci_id = mlx5_gen_pci_id(dev);
981 + u32 pci_id = mlx5_gen_pci_id(dev);
982 struct mlx5_core_dev *res = NULL;
983 struct mlx5_core_dev *tmp_dev;
984 struct mlx5_priv *priv;
985 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
986 index f65e8cd6d144..20f5c0cabc89 100644
987 --- a/drivers/net/ethernet/realtek/r8169.c
988 +++ b/drivers/net/ethernet/realtek/r8169.c
989 @@ -760,7 +760,7 @@ struct rtl8169_tc_offsets {
990 };
991
992 enum rtl_flag {
993 - RTL_FLAG_TASK_ENABLED,
994 + RTL_FLAG_TASK_ENABLED = 0,
995 RTL_FLAG_TASK_SLOW_PENDING,
996 RTL_FLAG_TASK_RESET_PENDING,
997 RTL_FLAG_TASK_PHY_PENDING,
998 @@ -7637,7 +7637,8 @@ static int rtl8169_close(struct net_device *dev)
999 rtl8169_update_counters(dev);
1000
1001 rtl_lock_work(tp);
1002 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
1003 + /* Clear all task flags */
1004 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
1005
1006 rtl8169_down(dev);
1007 rtl_unlock_work(tp);
1008 @@ -7820,7 +7821,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
1009
1010 rtl_lock_work(tp);
1011 napi_disable(&tp->napi);
1012 - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
1013 + /* Clear all task flags */
1014 + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
1015 +
1016 rtl_unlock_work(tp);
1017
1018 rtl_pll_power_down(tp);
1019 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1020 index 95e96419b4cf..4bb36dc73433 100644
1021 --- a/drivers/net/wireless/mac80211_hwsim.c
1022 +++ b/drivers/net/wireless/mac80211_hwsim.c
1023 @@ -2569,9 +2569,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1024 IEEE80211_VHT_CAP_SHORT_GI_80 |
1025 IEEE80211_VHT_CAP_SHORT_GI_160 |
1026 IEEE80211_VHT_CAP_TXSTBC |
1027 - IEEE80211_VHT_CAP_RXSTBC_1 |
1028 - IEEE80211_VHT_CAP_RXSTBC_2 |
1029 - IEEE80211_VHT_CAP_RXSTBC_3 |
1030 IEEE80211_VHT_CAP_RXSTBC_4 |
1031 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
1032 sband->vht_cap.vht_mcs.rx_mcs_map =
1033 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
1034 index 53bd32550867..2dfd877974d7 100644
1035 --- a/drivers/nvme/target/rdma.c
1036 +++ b/drivers/nvme/target/rdma.c
1037 @@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
1038
1039 struct nvmet_req req;
1040
1041 + bool allocated;
1042 u8 n_rdma;
1043 u32 flags;
1044 u32 invalidate_rkey;
1045 @@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
1046 unsigned long flags;
1047
1048 spin_lock_irqsave(&queue->rsps_lock, flags);
1049 - rsp = list_first_entry(&queue->free_rsps,
1050 + rsp = list_first_entry_or_null(&queue->free_rsps,
1051 struct nvmet_rdma_rsp, free_list);
1052 - list_del(&rsp->free_list);
1053 + if (likely(rsp))
1054 + list_del(&rsp->free_list);
1055 spin_unlock_irqrestore(&queue->rsps_lock, flags);
1056
1057 + if (unlikely(!rsp)) {
1058 + rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
1059 + if (unlikely(!rsp))
1060 + return NULL;
1061 + rsp->allocated = true;
1062 + }
1063 +
1064 return rsp;
1065 }
1066
1067 @@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
1068 {
1069 unsigned long flags;
1070
1071 + if (rsp->allocated) {
1072 + kfree(rsp);
1073 + return;
1074 + }
1075 +
1076 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
1077 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
1078 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
1079 @@ -755,6 +769,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1080
1081 cmd->queue = queue;
1082 rsp = nvmet_rdma_get_rsp(queue);
1083 + if (unlikely(!rsp)) {
1084 + /*
1085 + * we get here only under memory pressure,
1086 + * silently drop and have the host retry
1087 + * as we can't even fail it.
1088 + */
1089 + nvmet_rdma_post_recv(queue->dev, cmd);
1090 + return;
1091 + }
1092 rsp->queue = queue;
1093 rsp->cmd = cmd;
1094 rsp->flags = 0;
1095 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
1096 index 258a72869f57..a5e603062ee0 100644
1097 --- a/drivers/s390/net/qeth_core_main.c
1098 +++ b/drivers/s390/net/qeth_core_main.c
1099 @@ -23,6 +23,7 @@
1100 #include <linux/netdevice.h>
1101 #include <linux/netdev_features.h>
1102 #include <linux/skbuff.h>
1103 +#include <linux/vmalloc.h>
1104
1105 #include <net/iucv/af_iucv.h>
1106 #include <net/dsfield.h>
1107 @@ -4715,7 +4716,7 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
1108
1109 priv.buffer_len = oat_data.buffer_len;
1110 priv.response_len = 0;
1111 - priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
1112 + priv.buffer = vzalloc(oat_data.buffer_len);
1113 if (!priv.buffer) {
1114 rc = -ENOMEM;
1115 goto out;
1116 @@ -4756,7 +4757,7 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
1117 rc = -EFAULT;
1118
1119 out_free:
1120 - kfree(priv.buffer);
1121 + vfree(priv.buffer);
1122 out:
1123 return rc;
1124 }
1125 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
1126 index e94e9579914e..58404e69aa4b 100644
1127 --- a/drivers/s390/net/qeth_l2_main.c
1128 +++ b/drivers/s390/net/qeth_l2_main.c
1129 @@ -491,7 +491,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
1130 default:
1131 dev_kfree_skb_any(skb);
1132 QETH_CARD_TEXT(card, 3, "inbunkno");
1133 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
1134 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1135 continue;
1136 }
1137 work_done++;
1138 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1139 index 4ca161bdc696..efefe075557f 100644
1140 --- a/drivers/s390/net/qeth_l3_main.c
1141 +++ b/drivers/s390/net/qeth_l3_main.c
1142 @@ -1836,7 +1836,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1143 default:
1144 dev_kfree_skb_any(skb);
1145 QETH_CARD_TEXT(card, 3, "inbunkno");
1146 - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
1147 + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1148 continue;
1149 }
1150 work_done++;
1151 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
1152 index 45b57c294d13..401c983ec5f3 100644
1153 --- a/drivers/tty/serial/mvebu-uart.c
1154 +++ b/drivers/tty/serial/mvebu-uart.c
1155 @@ -327,8 +327,10 @@ static void mvebu_uart_set_termios(struct uart_port *port,
1156 if ((termios->c_cflag & CREAD) == 0)
1157 port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
1158
1159 - if (old)
1160 + if (old) {
1161 tty_termios_copy_hw(termios, old);
1162 + termios->c_cflag |= CS8;
1163 + }
1164
1165 baud = uart_get_baud_rate(port, termios, old, 0, 460800);
1166 uart_update_timeout(port, termios->c_cflag, baud);
1167 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
1168 index 6ba122cc7490..95df2b3bb6a1 100644
1169 --- a/drivers/usb/gadget/udc/fotg210-udc.c
1170 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
1171 @@ -1066,12 +1066,15 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
1172 static int fotg210_udc_remove(struct platform_device *pdev)
1173 {
1174 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
1175 + int i;
1176
1177 usb_del_gadget_udc(&fotg210->gadget);
1178 iounmap(fotg210->reg);
1179 free_irq(platform_get_irq(pdev, 0), fotg210);
1180
1181 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1182 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1183 + kfree(fotg210->ep[i]);
1184 kfree(fotg210);
1185
1186 return 0;
1187 @@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1188 /* initialize udc */
1189 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
1190 if (fotg210 == NULL)
1191 - goto err_alloc;
1192 + goto err;
1193
1194 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
1195 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
1196 @@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1197 fotg210->reg = ioremap(res->start, resource_size(res));
1198 if (fotg210->reg == NULL) {
1199 pr_err("ioremap error.\n");
1200 - goto err_map;
1201 + goto err_alloc;
1202 }
1203
1204 spin_lock_init(&fotg210->lock);
1205 @@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1206 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
1207 GFP_KERNEL);
1208 if (fotg210->ep0_req == NULL)
1209 - goto err_req;
1210 + goto err_map;
1211
1212 fotg210_init(fotg210);
1213
1214 @@ -1190,12 +1193,14 @@ err_req:
1215 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1216
1217 err_map:
1218 - if (fotg210->reg)
1219 - iounmap(fotg210->reg);
1220 + iounmap(fotg210->reg);
1221
1222 err_alloc:
1223 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1224 + kfree(fotg210->ep[i]);
1225 kfree(fotg210);
1226
1227 +err:
1228 return ret;
1229 }
1230
1231 diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
1232 index e36c6c6452cd..1e672343bcd6 100644
1233 --- a/drivers/usb/misc/yurex.c
1234 +++ b/drivers/usb/misc/yurex.c
1235 @@ -423,6 +423,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
1236 spin_unlock_irqrestore(&dev->lock, flags);
1237 mutex_unlock(&dev->io_mutex);
1238
1239 + if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
1240 + return -EIO;
1241 +
1242 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
1243 }
1244
1245 diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
1246 index 5676aefdf2bc..f4e59c445964 100644
1247 --- a/drivers/xen/cpu_hotplug.c
1248 +++ b/drivers/xen/cpu_hotplug.c
1249 @@ -18,15 +18,16 @@ static void enable_hotplug_cpu(int cpu)
1250
1251 static void disable_hotplug_cpu(int cpu)
1252 {
1253 - if (cpu_online(cpu)) {
1254 - lock_device_hotplug();
1255 + if (!cpu_is_hotpluggable(cpu))
1256 + return;
1257 + lock_device_hotplug();
1258 + if (cpu_online(cpu))
1259 device_offline(get_cpu_device(cpu));
1260 - unlock_device_hotplug();
1261 - }
1262 - if (cpu_present(cpu))
1263 + if (!cpu_online(cpu) && cpu_present(cpu)) {
1264 xen_arch_unregister_cpu(cpu);
1265 -
1266 - set_cpu_present(cpu, false);
1267 + set_cpu_present(cpu, false);
1268 + }
1269 + unlock_device_hotplug();
1270 }
1271
1272 static int vcpu_online(unsigned int cpu)
1273 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
1274 index 1435d8c58ea0..4b0cc9d0ca53 100644
1275 --- a/drivers/xen/events/events_base.c
1276 +++ b/drivers/xen/events/events_base.c
1277 @@ -139,7 +139,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
1278 clear_evtchn_to_irq_row(row);
1279 }
1280
1281 - evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
1282 + evtchn_to_irq[row][col] = irq;
1283 return 0;
1284 }
1285
1286 diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
1287 index 7abaaa5f0f67..abd49bc7c460 100644
1288 --- a/drivers/xen/manage.c
1289 +++ b/drivers/xen/manage.c
1290 @@ -282,9 +282,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
1291 /*
1292 * The Xenstore watch fires directly after registering it and
1293 * after a suspend/resume cycle. So ENOENT is no error but
1294 - * might happen in those cases.
1295 + * might happen in those cases. ERANGE is observed when we get
1296 + * an empty value (''), this happens when we acknowledge the
1297 + * request by writing '\0' below.
1298 */
1299 - if (err != -ENOENT)
1300 + if (err != -ENOENT && err != -ERANGE)
1301 pr_err("Error %d reading sysrq code in control/sysrq\n",
1302 err);
1303 xenbus_transaction_end(xbt, 1);
1304 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
1305 index a0b3e7d1be48..211ac472cb9d 100644
1306 --- a/fs/cifs/cifs_unicode.c
1307 +++ b/fs/cifs/cifs_unicode.c
1308 @@ -101,9 +101,6 @@ convert_sfm_char(const __u16 src_char, char *target)
1309 case SFM_LESSTHAN:
1310 *target = '<';
1311 break;
1312 - case SFM_SLASH:
1313 - *target = '\\';
1314 - break;
1315 case SFM_SPACE:
1316 *target = ' ';
1317 break;
1318 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
1319 index 8407b07428a6..741b83c59a30 100644
1320 --- a/fs/cifs/cifssmb.c
1321 +++ b/fs/cifs/cifssmb.c
1322 @@ -577,10 +577,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
1323 }
1324
1325 count = 0;
1326 + /*
1327 + * We know that all the name entries in the protocols array
1328 + * are short (< 16 bytes anyway) and are NUL terminated.
1329 + */
1330 for (i = 0; i < CIFS_NUM_PROT; i++) {
1331 - strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
1332 - count += strlen(protocols[i].name) + 1;
1333 - /* null at end of source and target buffers anyway */
1334 + size_t len = strlen(protocols[i].name) + 1;
1335 +
1336 + memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
1337 + count += len;
1338 }
1339 inc_rfc1001_len(pSMB, count);
1340 pSMB->ByteCount = cpu_to_le16(count);
1341 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
1342 index 323d8e34abde..50559a80acf8 100644
1343 --- a/fs/cifs/misc.c
1344 +++ b/fs/cifs/misc.c
1345 @@ -406,9 +406,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
1346 (struct smb_com_transaction_change_notify_rsp *)buf;
1347 struct file_notify_information *pnotify;
1348 __u32 data_offset = 0;
1349 + size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
1350 +
1351 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
1352 data_offset = le32_to_cpu(pSMBr->DataOffset);
1353
1354 + if (data_offset >
1355 + len - sizeof(struct file_notify_information)) {
1356 + cifs_dbg(FYI, "invalid data_offset %u\n",
1357 + data_offset);
1358 + return true;
1359 + }
1360 pnotify = (struct file_notify_information *)
1361 ((char *)&pSMBr->hdr.Protocol + data_offset);
1362 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
1363 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1364 index 68622f1e706b..08c1c86c2ad9 100644
1365 --- a/fs/cifs/smb2ops.c
1366 +++ b/fs/cifs/smb2ops.c
1367 @@ -989,7 +989,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1368 }
1369
1370 srch_inf->entries_in_buffer = 0;
1371 - srch_inf->index_of_last_entry = 0;
1372 + srch_inf->index_of_last_entry = 2;
1373
1374 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1375 fid->volatile_fid, 0, srch_inf);
1376 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
1377 index 3f828a187049..0cc30a56c3e6 100644
1378 --- a/fs/ocfs2/dlm/dlmmaster.c
1379 +++ b/fs/ocfs2/dlm/dlmmaster.c
1380 @@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
1381
1382 res->last_used = 0;
1383
1384 - spin_lock(&dlm->spinlock);
1385 + spin_lock(&dlm->track_lock);
1386 list_add_tail(&res->tracking, &dlm->tracking_list);
1387 - spin_unlock(&dlm->spinlock);
1388 + spin_unlock(&dlm->track_lock);
1389
1390 memset(res->lvb, 0, DLM_LVB_LEN);
1391 memset(res->refmap, 0, sizeof(res->refmap));
1392 diff --git a/fs/proc/base.c b/fs/proc/base.c
1393 index 591bf2b1ab66..79702d405ba7 100644
1394 --- a/fs/proc/base.c
1395 +++ b/fs/proc/base.c
1396 @@ -454,6 +454,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
1397 int err;
1398 int i;
1399
1400 + /*
1401 + * The ability to racily run the kernel stack unwinder on a running task
1402 + * and then observe the unwinder output is scary; while it is useful for
1403 + * debugging kernel issues, it can also allow an attacker to leak kernel
1404 + * stack contents.
1405 + * Doing this in a manner that is at least safe from races would require
1406 + * some work to ensure that the remote task can not be scheduled; and
1407 + * even then, this would still expose the unwinder as local attack
1408 + * surface.
1409 + * Therefore, this interface is restricted to root.
1410 + */
1411 + if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
1412 + return -EACCES;
1413 +
1414 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
1415 if (!entries)
1416 return -ENOMEM;
1417 diff --git a/fs/xattr.c b/fs/xattr.c
1418 index 093998872329..2f6423182301 100644
1419 --- a/fs/xattr.c
1420 +++ b/fs/xattr.c
1421 @@ -953,17 +953,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
1422 int err = 0;
1423
1424 #ifdef CONFIG_FS_POSIX_ACL
1425 - if (inode->i_acl) {
1426 - err = xattr_list_one(&buffer, &remaining_size,
1427 - XATTR_NAME_POSIX_ACL_ACCESS);
1428 - if (err)
1429 - return err;
1430 - }
1431 - if (inode->i_default_acl) {
1432 - err = xattr_list_one(&buffer, &remaining_size,
1433 - XATTR_NAME_POSIX_ACL_DEFAULT);
1434 - if (err)
1435 - return err;
1436 + if (IS_POSIXACL(inode)) {
1437 + if (inode->i_acl) {
1438 + err = xattr_list_one(&buffer, &remaining_size,
1439 + XATTR_NAME_POSIX_ACL_ACCESS);
1440 + if (err)
1441 + return err;
1442 + }
1443 + if (inode->i_default_acl) {
1444 + err = xattr_list_one(&buffer, &remaining_size,
1445 + XATTR_NAME_POSIX_ACL_DEFAULT);
1446 + if (err)
1447 + return err;
1448 + }
1449 }
1450 #endif
1451
1452 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
1453 index c2a0f0072274..734377ad42e9 100644
1454 --- a/include/linux/jiffies.h
1455 +++ b/include/linux/jiffies.h
1456 @@ -292,6 +292,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
1457 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
1458 }
1459
1460 +extern u64 jiffies64_to_nsecs(u64 j);
1461 +
1462 extern unsigned long __msecs_to_jiffies(const unsigned int m);
1463 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1464 /*
1465 diff --git a/kernel/time/time.c b/kernel/time/time.c
1466 index 39468651a064..a5b6d98ea7b1 100644
1467 --- a/kernel/time/time.c
1468 +++ b/kernel/time/time.c
1469 @@ -704,6 +704,16 @@ u64 nsec_to_clock_t(u64 x)
1470 #endif
1471 }
1472
1473 +u64 jiffies64_to_nsecs(u64 j)
1474 +{
1475 +#if !(NSEC_PER_SEC % HZ)
1476 + return (NSEC_PER_SEC / HZ) * j;
1477 +# else
1478 + return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
1479 +#endif
1480 +}
1481 +EXPORT_SYMBOL(jiffies64_to_nsecs);
1482 +
1483 /**
1484 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
1485 *
1486 diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
1487 index c48688904f9f..f83bbb81600b 100644
1488 --- a/kernel/time/timeconst.bc
1489 +++ b/kernel/time/timeconst.bc
1490 @@ -98,6 +98,12 @@ define timeconst(hz) {
1491 print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
1492 print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
1493 print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
1494 +
1495 + cd=gcd(hz,1000000000)
1496 + print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
1497 + print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
1498 + print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
1499 + print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
1500 print "\n"
1501
1502 print "#endif /* KERNEL_TIMECONST_H */\n"
1503 diff --git a/mm/madvise.c b/mm/madvise.c
1504 index a49afe08698b..4a01c4bd786c 100644
1505 --- a/mm/madvise.c
1506 +++ b/mm/madvise.c
1507 @@ -81,7 +81,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
1508 new_flags |= VM_DONTDUMP;
1509 break;
1510 case MADV_DODUMP:
1511 - if (new_flags & VM_SPECIAL) {
1512 + if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
1513 error = -EINVAL;
1514 goto out;
1515 }
1516 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
1517 index a5acaf1efaab..0c0695eb2609 100644
1518 --- a/net/mac80211/ibss.c
1519 +++ b/net/mac80211/ibss.c
1520 @@ -948,8 +948,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
1521 if (len < IEEE80211_DEAUTH_FRAME_LEN)
1522 return;
1523
1524 - ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
1525 - mgmt->sa, mgmt->da, mgmt->bssid, reason);
1526 + ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
1527 + ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
1528 sta_info_destroy_addr(sdata, mgmt->sa);
1529 }
1530
1531 @@ -967,9 +967,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
1532 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1533 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1534
1535 - ibss_dbg(sdata,
1536 - "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
1537 - mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
1538 + ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
1539 + ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
1540 + mgmt->bssid, auth_transaction);
1541
1542 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1543 return;
1544 @@ -1176,10 +1176,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1545 rx_timestamp = drv_get_tsf(local, sdata);
1546 }
1547
1548 - ibss_dbg(sdata,
1549 - "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
1550 + ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
1551 mgmt->sa, mgmt->bssid,
1552 - (unsigned long long)rx_timestamp,
1553 + (unsigned long long)rx_timestamp);
1554 + ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
1555 (unsigned long long)beacon_timestamp,
1556 (unsigned long long)(rx_timestamp - beacon_timestamp),
1557 jiffies);
1558 @@ -1538,9 +1538,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
1559
1560 tx_last_beacon = drv_tx_last_beacon(local);
1561
1562 - ibss_dbg(sdata,
1563 - "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
1564 - mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
1565 + ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
1566 + ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
1567 + mgmt->bssid, tx_last_beacon);
1568
1569 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
1570 return;
1571 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
1572 index 2bb6899854d4..e3bbfb20ae82 100644
1573 --- a/net/mac80211/main.c
1574 +++ b/net/mac80211/main.c
1575 @@ -254,8 +254,27 @@ static void ieee80211_restart_work(struct work_struct *work)
1576 "%s called with hardware scan in progress\n", __func__);
1577
1578 rtnl_lock();
1579 - list_for_each_entry(sdata, &local->interfaces, list)
1580 + list_for_each_entry(sdata, &local->interfaces, list) {
1581 + /*
1582 + * XXX: there may be more work for other vif types and even
1583 + * for station mode: a good thing would be to run most of
1584 + * the iface type's dependent _stop (ieee80211_mg_stop,
1585 + * ieee80211_ibss_stop) etc...
1586 + * For now, fix only the specific bug that was seen: race
1587 + * between csa_connection_drop_work and us.
1588 + */
1589 + if (sdata->vif.type == NL80211_IFTYPE_STATION) {
1590 + /*
1591 + * This worker is scheduled from the iface worker that
1592 + * runs on mac80211's workqueue, so we can't be
1593 + * scheduling this worker after the cancel right here.
1594 + * The exception is ieee80211_chswitch_done.
1595 + * Then we can have a race...
1596 + */
1597 + cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
1598 + }
1599 flush_delayed_work(&sdata->dec_tailroom_needed_wk);
1600 + }
1601 ieee80211_scan_cancel(local);
1602
1603 /* make sure any new ROC will consider local->in_reconfig */
1604 @@ -466,10 +485,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
1605 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
1606 IEEE80211_VHT_CAP_SHORT_GI_80 |
1607 IEEE80211_VHT_CAP_SHORT_GI_160 |
1608 - IEEE80211_VHT_CAP_RXSTBC_1 |
1609 - IEEE80211_VHT_CAP_RXSTBC_2 |
1610 - IEEE80211_VHT_CAP_RXSTBC_3 |
1611 - IEEE80211_VHT_CAP_RXSTBC_4 |
1612 + IEEE80211_VHT_CAP_RXSTBC_MASK |
1613 IEEE80211_VHT_CAP_TXSTBC |
1614 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
1615 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
1616 @@ -1164,6 +1180,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1617 #if IS_ENABLED(CONFIG_IPV6)
1618 unregister_inet6addr_notifier(&local->ifa6_notifier);
1619 #endif
1620 + ieee80211_txq_teardown_flows(local);
1621
1622 rtnl_lock();
1623
1624 @@ -1191,7 +1208,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1625 skb_queue_purge(&local->skb_queue);
1626 skb_queue_purge(&local->skb_queue_unreliable);
1627 skb_queue_purge(&local->skb_queue_tdls_chsw);
1628 - ieee80211_txq_teardown_flows(local);
1629
1630 destroy_workqueue(local->workqueue);
1631 wiphy_unregister(local->hw.wiphy);
1632 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
1633 index fed598a202c8..b0acb2961e80 100644
1634 --- a/net/mac80211/mesh_hwmp.c
1635 +++ b/net/mac80211/mesh_hwmp.c
1636 @@ -563,6 +563,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
1637 forward = false;
1638 reply = true;
1639 target_metric = 0;
1640 +
1641 + if (SN_GT(target_sn, ifmsh->sn))
1642 + ifmsh->sn = target_sn;
1643 +
1644 if (time_after(jiffies, ifmsh->last_sn_update +
1645 net_traversal_jiffies(sdata)) ||
1646 time_before(jiffies, ifmsh->last_sn_update)) {
1647 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
1648 index e6f42d12222e..39451c84c785 100644
1649 --- a/net/mac80211/mlme.c
1650 +++ b/net/mac80211/mlme.c
1651 @@ -989,6 +989,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1652 */
1653
1654 if (sdata->reserved_chanctx) {
1655 + struct ieee80211_supported_band *sband = NULL;
1656 + struct sta_info *mgd_sta = NULL;
1657 + enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
1658 +
1659 /*
1660 * with multi-vif csa driver may call ieee80211_csa_finish()
1661 * many times while waiting for other interfaces to use their
1662 @@ -997,6 +1001,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1663 if (sdata->reserved_ready)
1664 goto out;
1665
1666 + if (sdata->vif.bss_conf.chandef.width !=
1667 + sdata->csa_chandef.width) {
1668 + /*
1669 + * For managed interface, we need to also update the AP
1670 + * station bandwidth and align the rate scale algorithm
1671 + * on the bandwidth change. Here we only consider the
1672 + * bandwidth of the new channel definition (as channel
1673 + * switch flow does not have the full HT/VHT/HE
1674 + * information), assuming that if additional changes are
1675 + * required they would be done as part of the processing
1676 + * of the next beacon from the AP.
1677 + */
1678 + switch (sdata->csa_chandef.width) {
1679 + case NL80211_CHAN_WIDTH_20_NOHT:
1680 + case NL80211_CHAN_WIDTH_20:
1681 + default:
1682 + bw = IEEE80211_STA_RX_BW_20;
1683 + break;
1684 + case NL80211_CHAN_WIDTH_40:
1685 + bw = IEEE80211_STA_RX_BW_40;
1686 + break;
1687 + case NL80211_CHAN_WIDTH_80:
1688 + bw = IEEE80211_STA_RX_BW_80;
1689 + break;
1690 + case NL80211_CHAN_WIDTH_80P80:
1691 + case NL80211_CHAN_WIDTH_160:
1692 + bw = IEEE80211_STA_RX_BW_160;
1693 + break;
1694 + }
1695 +
1696 + mgd_sta = sta_info_get(sdata, ifmgd->bssid);
1697 + sband =
1698 + local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
1699 + }
1700 +
1701 + if (sdata->vif.bss_conf.chandef.width >
1702 + sdata->csa_chandef.width) {
1703 + mgd_sta->sta.bandwidth = bw;
1704 + rate_control_rate_update(local, sband, mgd_sta,
1705 + IEEE80211_RC_BW_CHANGED);
1706 + }
1707 +
1708 ret = ieee80211_vif_use_reserved_context(sdata);
1709 if (ret) {
1710 sdata_info(sdata,
1711 @@ -1007,6 +1053,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1712 goto out;
1713 }
1714
1715 + if (sdata->vif.bss_conf.chandef.width <
1716 + sdata->csa_chandef.width) {
1717 + mgd_sta->sta.bandwidth = bw;
1718 + rate_control_rate_update(local, sband, mgd_sta,
1719 + IEEE80211_RC_BW_CHANGED);
1720 + }
1721 +
1722 goto out;
1723 }
1724
1725 @@ -1229,6 +1282,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1726 cbss->beacon_interval));
1727 return;
1728 drop_connection:
1729 + /*
1730 + * This is just so that the disconnect flow will know that
1731 + * we were trying to switch channel and failed. In case the
1732 + * mode is 1 (we are not allowed to Tx), we will know not to
1733 + * send a deauthentication frame. Those two fields will be
1734 + * reset when the disconnection worker runs.
1735 + */
1736 + sdata->vif.csa_active = true;
1737 + sdata->csa_block_tx = csa_ie.mode;
1738 +
1739 ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
1740 mutex_unlock(&local->chanctx_mtx);
1741 mutex_unlock(&local->mtx);
1742 @@ -2401,6 +2464,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1743 struct ieee80211_local *local = sdata->local;
1744 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1745 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1746 + bool tx;
1747
1748 sdata_lock(sdata);
1749 if (!ifmgd->associated) {
1750 @@ -2408,6 +2472,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1751 return;
1752 }
1753
1754 + tx = !sdata->csa_block_tx;
1755 +
1756 /* AP is probably out of range (or not reachable for another reason) so
1757 * remove the bss struct for that AP.
1758 */
1759 @@ -2415,7 +2481,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1760
1761 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1762 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1763 - true, frame_buf);
1764 + tx, frame_buf);
1765 mutex_lock(&local->mtx);
1766 sdata->vif.csa_active = false;
1767 ifmgd->csa_waiting_bcn = false;
1768 @@ -2426,7 +2492,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1769 }
1770 mutex_unlock(&local->mtx);
1771
1772 - ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
1773 + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
1774 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
1775
1776 sdata_unlock(sdata);
1777 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1778 index 6afac189d20f..0e91ec49d3da 100644
1779 --- a/net/wireless/nl80211.c
1780 +++ b/net/wireless/nl80211.c
1781 @@ -11148,6 +11148,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
1782 return -EOPNOTSUPP;
1783
1784 if (!info->attrs[NL80211_ATTR_MDID] ||
1785 + !info->attrs[NL80211_ATTR_IE] ||
1786 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
1787 return -EINVAL;
1788
1789 diff --git a/net/wireless/util.c b/net/wireless/util.c
1790 index bb54d9db82df..3b9a81998014 100644
1791 --- a/net/wireless/util.c
1792 +++ b/net/wireless/util.c
1793 @@ -1432,7 +1432,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
1794 u8 *op_class)
1795 {
1796 u8 vht_opclass;
1797 - u16 freq = chandef->center_freq1;
1798 + u32 freq = chandef->center_freq1;
1799
1800 if (freq >= 2412 && freq <= 2472) {
1801 if (chandef->width > NL80211_CHAN_WIDTH_40)
1802 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1803 index f03a1430a3cb..ca2945711dbe 100644
1804 --- a/sound/pci/hda/patch_realtek.c
1805 +++ b/sound/pci/hda/patch_realtek.c
1806 @@ -5698,6 +5698,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1807 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
1808 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
1809 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
1810 + SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
1811 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
1812 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
1813 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
1814 diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
1815 index de477a3dc968..01a288c79dc5 100644
1816 --- a/tools/perf/arch/powerpc/util/sym-handling.c
1817 +++ b/tools/perf/arch/powerpc/util/sym-handling.c
1818 @@ -21,15 +21,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
1819
1820 #endif
1821
1822 -#if !defined(_CALL_ELF) || _CALL_ELF != 2
1823 int arch__choose_best_symbol(struct symbol *syma,
1824 struct symbol *symb __maybe_unused)
1825 {
1826 char *sym = syma->name;
1827
1828 +#if !defined(_CALL_ELF) || _CALL_ELF != 2
1829 /* Skip over any initial dot */
1830 if (*sym == '.')
1831 sym++;
1832 +#endif
1833
1834 /* Avoid "SyS" kernel syscall aliases */
1835 if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
1836 @@ -40,6 +41,7 @@ int arch__choose_best_symbol(struct symbol *syma,
1837 return SYMBOL_A;
1838 }
1839
1840 +#if !defined(_CALL_ELF) || _CALL_ELF != 2
1841 /* Allow matching against dot variants */
1842 int arch__compare_symbol_names(const char *namea, const char *nameb)
1843 {
1844 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
1845 index f55d10854565..3be8c489884e 100644
1846 --- a/tools/perf/util/evsel.c
1847 +++ b/tools/perf/util/evsel.c
1848 @@ -241,8 +241,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
1849 {
1850 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
1851
1852 - if (evsel != NULL)
1853 - perf_evsel__init(evsel, attr, idx);
1854 + if (!evsel)
1855 + return NULL;
1856 + perf_evsel__init(evsel, attr, idx);
1857
1858 if (perf_evsel__is_bpf_output(evsel)) {
1859 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
1860 diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
1861 index e92903fc7113..6d5bcbaf6193 100644
1862 --- a/tools/vm/page-types.c
1863 +++ b/tools/vm/page-types.c
1864 @@ -155,12 +155,6 @@ static const char * const page_flag_names[] = {
1865 };
1866
1867
1868 -static const char * const debugfs_known_mountpoints[] = {
1869 - "/sys/kernel/debug",
1870 - "/debug",
1871 - 0,
1872 -};
1873 -
1874 /*
1875 * data structures
1876 */
1877 diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
1878 index b9d34b37c017..6975ec43913b 100644
1879 --- a/tools/vm/slabinfo.c
1880 +++ b/tools/vm/slabinfo.c
1881 @@ -29,8 +29,8 @@ struct slabinfo {
1882 int alias;
1883 int refs;
1884 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
1885 - int hwcache_align, object_size, objs_per_slab;
1886 - int sanity_checks, slab_size, store_user, trace;
1887 + unsigned int hwcache_align, object_size, objs_per_slab;
1888 + unsigned int sanity_checks, slab_size, store_user, trace;
1889 int order, poison, reclaim_account, red_zone;
1890 unsigned long partial, objects, slabs, objects_partial, objects_total;
1891 unsigned long alloc_fastpath, alloc_slowpath;