Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0171-4.19.72-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3468 - (show annotations) (download)
Tue Oct 29 10:31:29 2019 UTC (4 years, 6 months ago) by niro
File size: 68167 byte(s)
-linux-4.19.72
1 diff --git a/Makefile b/Makefile
2 index f6c9d5757470..ef80b1dfb753 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 71
10 +SUBLEVEL = 72
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
15 index f8debf7aeb4c..76e1edf5bf12 100644
16 --- a/arch/x86/boot/compressed/pgtable_64.c
17 +++ b/arch/x86/boot/compressed/pgtable_64.c
18 @@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
19
20 /* Find the first usable memory region under bios_start. */
21 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
22 + unsigned long new = bios_start;
23 +
24 entry = &boot_params->e820_table[i];
25
26 /* Skip all entries above bios_start. */
27 @@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
28
29 /* Adjust bios_start to the end of the entry if needed. */
30 if (bios_start > entry->addr + entry->size)
31 - bios_start = entry->addr + entry->size;
32 + new = entry->addr + entry->size;
33
34 /* Keep bios_start page-aligned. */
35 - bios_start = round_down(bios_start, PAGE_SIZE);
36 + new = round_down(new, PAGE_SIZE);
37
38 /* Skip the entry if it's too small. */
39 - if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
40 + if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
41 continue;
42
43 + /* Protect against underflow. */
44 + if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
45 + break;
46 +
47 + bios_start = new;
48 break;
49 }
50
51 diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
52 index d3983fdf1012..8fa49cf1211d 100644
53 --- a/arch/x86/include/asm/bootparam_utils.h
54 +++ b/arch/x86/include/asm/bootparam_utils.h
55 @@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
56 BOOT_PARAM_PRESERVE(eddbuf_entries),
57 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
58 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
59 + BOOT_PARAM_PRESERVE(secure_boot),
60 BOOT_PARAM_PRESERVE(hdr),
61 BOOT_PARAM_PRESERVE(e820_table),
62 BOOT_PARAM_PRESERVE(eddbuf),
63 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
64 index 90be3a1506d3..b316bd61a6ac 100644
65 --- a/arch/x86/kernel/apic/apic.c
66 +++ b/arch/x86/kernel/apic/apic.c
67 @@ -1140,10 +1140,6 @@ void clear_local_APIC(void)
68 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
69 v = apic_read(APIC_LVT1);
70 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
71 - if (!x2apic_enabled()) {
72 - v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
73 - apic_write(APIC_LDR, v);
74 - }
75 if (maxlvt >= 4) {
76 v = apic_read(APIC_LVTPC);
77 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
78 diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
79 index ec9e03a6b778..9e70f7c7e565 100644
80 --- a/drivers/bluetooth/btqca.c
81 +++ b/drivers/bluetooth/btqca.c
82 @@ -363,6 +363,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
83 return err;
84 }
85
86 + /* Give the controller some time to get ready to receive the NVM */
87 + msleep(10);
88 +
89 /* Download NVM configuration */
90 config.type = TLV_TYPE_NVM;
91 if (soc_type == QCA_WCN3990)
92 diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
93 index fd83046d8376..f6389479fccb 100644
94 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
95 +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
96 @@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
97 struct mtk_drm_private *private = drm->dev_private;
98 struct platform_device *pdev;
99 struct device_node *np;
100 + struct device *dma_dev;
101 int ret;
102
103 if (!iommu_present(&platform_bus_type))
104 @@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
105 goto err_component_unbind;
106 }
107
108 - private->dma_dev = &pdev->dev;
109 + dma_dev = &pdev->dev;
110 + private->dma_dev = dma_dev;
111 +
112 + /*
113 + * Configure the DMA segment size to make sure we get contiguous IOVA
114 + * when importing PRIME buffers.
115 + */
116 + if (!dma_dev->dma_parms) {
117 + private->dma_parms_allocated = true;
118 + dma_dev->dma_parms =
119 + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
120 + GFP_KERNEL);
121 + }
122 + if (!dma_dev->dma_parms) {
123 + ret = -ENOMEM;
124 + goto err_component_unbind;
125 + }
126 +
127 + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
128 + if (ret) {
129 + dev_err(dma_dev, "Failed to set DMA segment size\n");
130 + goto err_unset_dma_parms;
131 + }
132
133 /*
134 * We don't use the drm_irq_install() helpers provided by the DRM
135 @@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
136 drm->irq_enabled = true;
137 ret = drm_vblank_init(drm, MAX_CRTC);
138 if (ret < 0)
139 - goto err_component_unbind;
140 + goto err_unset_dma_parms;
141
142 drm_kms_helper_poll_init(drm);
143 drm_mode_config_reset(drm);
144
145 return 0;
146
147 +err_unset_dma_parms:
148 + if (private->dma_parms_allocated)
149 + dma_dev->dma_parms = NULL;
150 err_component_unbind:
151 component_unbind_all(drm->dev, drm);
152 err_config_cleanup:
153 @@ -309,9 +335,14 @@ err_config_cleanup:
154
155 static void mtk_drm_kms_deinit(struct drm_device *drm)
156 {
157 + struct mtk_drm_private *private = drm->dev_private;
158 +
159 drm_kms_helper_poll_fini(drm);
160 drm_atomic_helper_shutdown(drm);
161
162 + if (private->dma_parms_allocated)
163 + private->dma_dev->dma_parms = NULL;
164 +
165 component_unbind_all(drm->dev, drm);
166 drm_mode_config_cleanup(drm);
167 }
168 @@ -327,6 +358,18 @@ static const struct file_operations mtk_drm_fops = {
169 .compat_ioctl = drm_compat_ioctl,
170 };
171
172 +/*
173 + * We need to override this because the device used to import the memory is
174 + * not dev->dev, as drm_gem_prime_import() expects.
175 + */
176 +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
177 + struct dma_buf *dma_buf)
178 +{
179 + struct mtk_drm_private *private = dev->dev_private;
180 +
181 + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
182 +}
183 +
184 static struct drm_driver mtk_drm_driver = {
185 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
186 DRIVER_ATOMIC,
187 @@ -338,7 +381,7 @@ static struct drm_driver mtk_drm_driver = {
188 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
189 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
190 .gem_prime_export = drm_gem_prime_export,
191 - .gem_prime_import = drm_gem_prime_import,
192 + .gem_prime_import = mtk_drm_gem_prime_import,
193 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
194 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
195 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
196 diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
197 index ecc00ca3221d..8fa60d46f860 100644
198 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
199 +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
200 @@ -59,6 +59,8 @@ struct mtk_drm_private {
201 } commit;
202
203 struct drm_atomic_state *suspend_state;
204 +
205 + bool dma_parms_allocated;
206 };
207
208 extern struct platform_driver mtk_ddp_driver;
209 diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
210 index 271f31461da4..6f65f5257236 100644
211 --- a/drivers/hid/hid-cp2112.c
212 +++ b/drivers/hid/hid-cp2112.c
213 @@ -1160,8 +1160,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
214
215 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
216
217 - cp2112_gpio_direction_input(gc, d->hwirq);
218 -
219 if (!dev->gpio_poll) {
220 dev->gpio_poll = true;
221 schedule_delayed_work(&dev->gpio_poll_worker, 0);
222 @@ -1209,6 +1207,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
223 return PTR_ERR(dev->desc[pin]);
224 }
225
226 + ret = cp2112_gpio_direction_input(&dev->gc, pin);
227 + if (ret < 0) {
228 + dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
229 + goto err_desc;
230 + }
231 +
232 ret = gpiochip_lock_as_irq(&dev->gc, pin);
233 if (ret) {
234 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
235 diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
236 index 7eaff4dcbfd7..5bc811b7e6cf 100644
237 --- a/drivers/infiniband/hw/hfi1/fault.c
238 +++ b/drivers/infiniband/hw/hfi1/fault.c
239 @@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
240 if (!data)
241 return -ENOMEM;
242 copy = min(len, datalen - 1);
243 - if (copy_from_user(data, buf, copy))
244 - return -EFAULT;
245 + if (copy_from_user(data, buf, copy)) {
246 + ret = -EFAULT;
247 + goto free_data;
248 + }
249
250 ret = debugfs_file_get(file->f_path.dentry);
251 if (unlikely(ret))
252 - return ret;
253 + goto free_data;
254 ptr = data;
255 token = ptr;
256 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
257 @@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
258 ret = len;
259
260 debugfs_file_put(file->f_path.dentry);
261 +free_data:
262 kfree(data);
263 return ret;
264 }
265 @@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
266 return -ENOMEM;
267 ret = debugfs_file_get(file->f_path.dentry);
268 if (unlikely(ret))
269 - return ret;
270 + goto free_data;
271 bit = find_first_bit(fault->opcodes, bitsize);
272 while (bit < bitsize) {
273 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
274 @@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
275 data[size - 1] = '\n';
276 data[size] = '\0';
277 ret = simple_read_from_buffer(buf, len, pos, data, size);
278 +free_data:
279 kfree(data);
280 return ret;
281 }
282 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
283 index e5466d786bb1..5aaa2a6c431b 100644
284 --- a/drivers/infiniband/hw/mlx4/mad.c
285 +++ b/drivers/infiniband/hw/mlx4/mad.c
286 @@ -1668,8 +1668,6 @@ tx_err:
287 tx_buf_size, DMA_TO_DEVICE);
288 kfree(tun_qp->tx_ring[i].buf.addr);
289 }
290 - kfree(tun_qp->tx_ring);
291 - tun_qp->tx_ring = NULL;
292 i = MLX4_NUM_TUNNEL_BUFS;
293 err:
294 while (i > 0) {
295 @@ -1678,6 +1676,8 @@ err:
296 rx_buf_size, DMA_FROM_DEVICE);
297 kfree(tun_qp->ring[i].addr);
298 }
299 + kfree(tun_qp->tx_ring);
300 + tun_qp->tx_ring = NULL;
301 kfree(tun_qp->ring);
302 tun_qp->ring = NULL;
303 return -ENOMEM;
304 diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
305 index a8b9be3e28db..7d0a5ccf5775 100644
306 --- a/drivers/input/serio/hyperv-keyboard.c
307 +++ b/drivers/input/serio/hyperv-keyboard.c
308 @@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
309
310 static void hv_kbd_on_channel_callback(void *context)
311 {
312 + struct vmpacket_descriptor *desc;
313 struct hv_device *hv_dev = context;
314 - void *buffer;
315 - int bufferlen = 0x100; /* Start with sensible size */
316 u32 bytes_recvd;
317 u64 req_id;
318 - int error;
319
320 - buffer = kmalloc(bufferlen, GFP_ATOMIC);
321 - if (!buffer)
322 - return;
323 -
324 - while (1) {
325 - error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
326 - &bytes_recvd, &req_id);
327 - switch (error) {
328 - case 0:
329 - if (bytes_recvd == 0) {
330 - kfree(buffer);
331 - return;
332 - }
333 -
334 - hv_kbd_handle_received_packet(hv_dev, buffer,
335 - bytes_recvd, req_id);
336 - break;
337 + foreach_vmbus_pkt(desc, hv_dev->channel) {
338 + bytes_recvd = desc->len8 * 8;
339 + req_id = desc->trans_id;
340
341 - case -ENOBUFS:
342 - kfree(buffer);
343 - /* Handle large packet */
344 - bufferlen = bytes_recvd;
345 - buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
346 - if (!buffer)
347 - return;
348 - break;
349 - }
350 + hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
351 + req_id);
352 }
353 }
354
355 diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
356 index 6aeb1045c302..1ab40c97403b 100644
357 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
358 +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
359 @@ -10,7 +10,7 @@
360
361 #include "cavium_ptp.h"
362
363 -#define DRV_NAME "Cavium PTP Driver"
364 +#define DRV_NAME "cavium_ptp"
365
366 #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
367 #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
368 diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
369 index 8f746e1348d4..3deb3c07681f 100644
370 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
371 +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
372 @@ -238,8 +238,10 @@ int octeon_setup_iq(struct octeon_device *oct,
373 }
374
375 oct->num_iqs++;
376 - if (oct->fn_list.enable_io_queues(oct))
377 + if (oct->fn_list.enable_io_queues(oct)) {
378 + octeon_delete_instr_queue(oct, iq_no);
379 return 1;
380 + }
381
382 return 0;
383 }
384 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
385 index 0f72f9c4ec74..b429b726b987 100644
386 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
387 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
388 @@ -3276,8 +3276,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
389 return -ENOMEM;
390
391 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
392 - if (err)
393 + if (err) {
394 + kvfree(t);
395 return err;
396 + }
397
398 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
399 kvfree(t);
400 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
401 index f70cb4d3c684..40ad1e503255 100644
402 --- a/drivers/net/ethernet/ibm/ibmveth.c
403 +++ b/drivers/net/ethernet/ibm/ibmveth.c
404 @@ -1618,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
405 struct net_device *netdev;
406 struct ibmveth_adapter *adapter;
407 unsigned char *mac_addr_p;
408 - unsigned int *mcastFilterSize_p;
409 + __be32 *mcastFilterSize_p;
410 long ret;
411 unsigned long ret_attr;
412
413 @@ -1640,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
414 return -EINVAL;
415 }
416
417 - mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
418 - VETH_MCAST_FILTER_SIZE, NULL);
419 + mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
420 + VETH_MCAST_FILTER_SIZE,
421 + NULL);
422 if (!mcastFilterSize_p) {
423 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
424 "attribute\n");
425 @@ -1658,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
426
427 adapter->vdev = dev;
428 adapter->netdev = netdev;
429 - adapter->mcastFilterSize = *mcastFilterSize_p;
430 + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
431 adapter->pool_config = 0;
432
433 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
434 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
435 index 0ae43d27cdcf..255de7d68cd3 100644
436 --- a/drivers/net/ethernet/ibm/ibmvnic.c
437 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
438 @@ -1586,6 +1586,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
439 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
440 (u64)tx_buff->indir_dma,
441 (u64)num_entries);
442 + dma_unmap_single(dev, tx_buff->indir_dma,
443 + sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
444 } else {
445 tx_buff->num_entries = num_entries;
446 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
447 @@ -2747,7 +2749,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
448 union sub_crq *next;
449 int index;
450 int i, j;
451 - u8 *first;
452
453 restart_loop:
454 while (pending_scrq(adapter, scrq)) {
455 @@ -2777,14 +2778,6 @@ restart_loop:
456
457 txbuff->data_dma[j] = 0;
458 }
459 - /* if sub_crq was sent indirectly */
460 - first = &txbuff->indir_arr[0].generic.first;
461 - if (*first == IBMVNIC_CRQ_CMD) {
462 - dma_unmap_single(dev, txbuff->indir_dma,
463 - sizeof(txbuff->indir_arr),
464 - DMA_TO_DEVICE);
465 - *first = 0;
466 - }
467
468 if (txbuff->last_frag) {
469 dev_kfree_skb_any(txbuff->skb);
470 diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
471 index b2d2ec8c11e2..6789eed78ff7 100644
472 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
473 +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
474 @@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
475 * setup (if available). */
476 status = myri10ge_request_irq(mgp);
477 if (status != 0)
478 - goto abort_with_firmware;
479 + goto abort_with_slices;
480 myri10ge_free_irq(mgp);
481
482 /* Save configuration space to be restored if the
483 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
484 index 5f092bbd0514..5462d2e8a1b7 100644
485 --- a/drivers/net/ethernet/renesas/ravb_main.c
486 +++ b/drivers/net/ethernet/renesas/ravb_main.c
487 @@ -1,7 +1,7 @@
488 // SPDX-License-Identifier: GPL-2.0
489 /* Renesas Ethernet AVB device driver
490 *
491 - * Copyright (C) 2014-2015 Renesas Electronics Corporation
492 + * Copyright (C) 2014-2019 Renesas Electronics Corporation
493 * Copyright (C) 2015 Renesas Solutions Corp.
494 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
495 *
496 @@ -514,7 +514,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
497 kfree(ts_skb);
498 if (tag == tfa_tag) {
499 skb_tstamp_tx(skb, &shhwtstamps);
500 + dev_consume_skb_any(skb);
501 break;
502 + } else {
503 + dev_kfree_skb_any(skb);
504 }
505 }
506 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
507 @@ -1556,7 +1559,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
508 DMA_TO_DEVICE);
509 goto unmap;
510 }
511 - ts_skb->skb = skb;
512 + ts_skb->skb = skb_get(skb);
513 ts_skb->tag = priv->ts_skb_tag++;
514 priv->ts_skb_tag &= 0x3ff;
515 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
516 @@ -1685,6 +1688,7 @@ static int ravb_close(struct net_device *ndev)
517 /* Clear the timestamp list */
518 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
519 list_del(&ts_skb->list);
520 + kfree_skb(ts_skb->skb);
521 kfree(ts_skb);
522 }
523
524 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
525 index 3b174eae77c1..f45df6df6932 100644
526 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
527 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
528 @@ -1203,10 +1203,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
529 int ret;
530 struct device *dev = &bsp_priv->pdev->dev;
531
532 - if (!ldo) {
533 - dev_err(dev, "no regulator found\n");
534 - return -1;
535 - }
536 + if (!ldo)
537 + return 0;
538
539 if (enable) {
540 ret = regulator_enable(ldo);
541 diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
542 index cce9c9ed46aa..9146068979d2 100644
543 --- a/drivers/net/ethernet/toshiba/tc35815.c
544 +++ b/drivers/net/ethernet/toshiba/tc35815.c
545 @@ -1497,7 +1497,7 @@ tc35815_rx(struct net_device *dev, int limit)
546 pci_unmap_single(lp->pci_dev,
547 lp->rx_skbs[cur_bd].skb_dma,
548 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
549 - if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
550 + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
551 memmove(skb->data, skb->data - NET_IP_ALIGN,
552 pkt_len);
553 data = skb_put(skb, pkt_len);
554 diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
555 index edcd1e60b30d..f076050c8ad3 100644
556 --- a/drivers/net/ethernet/tundra/tsi108_eth.c
557 +++ b/drivers/net/ethernet/tundra/tsi108_eth.c
558 @@ -383,9 +383,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
559 static void tsi108_stat_carry(struct net_device *dev)
560 {
561 struct tsi108_prv_data *data = netdev_priv(dev);
562 + unsigned long flags;
563 u32 carry1, carry2;
564
565 - spin_lock_irq(&data->misclock);
566 + spin_lock_irqsave(&data->misclock, flags);
567
568 carry1 = TSI_READ(TSI108_STAT_CARRY1);
569 carry2 = TSI_READ(TSI108_STAT_CARRY2);
570 @@ -453,7 +454,7 @@ static void tsi108_stat_carry(struct net_device *dev)
571 TSI108_STAT_TXPAUSEDROP_CARRY,
572 &data->tx_pause_drop);
573
574 - spin_unlock_irq(&data->misclock);
575 + spin_unlock_irqrestore(&data->misclock, flags);
576 }
577
578 /* Read a stat counter atomically with respect to carries.
579 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
580 index cc60ef9634db..6f6c0dbd91fc 100644
581 --- a/drivers/net/hyperv/netvsc_drv.c
582 +++ b/drivers/net/hyperv/netvsc_drv.c
583 @@ -1248,12 +1248,15 @@ static void netvsc_get_stats64(struct net_device *net,
584 struct rtnl_link_stats64 *t)
585 {
586 struct net_device_context *ndev_ctx = netdev_priv(net);
587 - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
588 + struct netvsc_device *nvdev;
589 struct netvsc_vf_pcpu_stats vf_tot;
590 int i;
591
592 + rcu_read_lock();
593 +
594 + nvdev = rcu_dereference(ndev_ctx->nvdev);
595 if (!nvdev)
596 - return;
597 + goto out;
598
599 netdev_stats_to_stats64(t, &net->stats);
600
601 @@ -1292,6 +1295,8 @@ static void netvsc_get_stats64(struct net_device *net,
602 t->rx_packets += packets;
603 t->multicast += multicast;
604 }
605 +out:
606 + rcu_read_unlock();
607 }
608
609 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
610 diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
611 index 947bea81d924..dfbdea22fbad 100644
612 --- a/drivers/net/usb/cx82310_eth.c
613 +++ b/drivers/net/usb/cx82310_eth.c
614 @@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
615 }
616 if (!timeout) {
617 dev_err(&udev->dev, "firmware not ready in time\n");
618 - return -ETIMEDOUT;
619 + ret = -ETIMEDOUT;
620 + goto err;
621 }
622
623 /* enable ethernet mode (?) */
624 diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
625 index bd2ba3659028..0cc6993c279a 100644
626 --- a/drivers/net/usb/kalmia.c
627 +++ b/drivers/net/usb/kalmia.c
628 @@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
629 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
630 usb_buf, 24);
631 if (status != 0)
632 - return status;
633 + goto out;
634
635 memcpy(usb_buf, init_msg_2, 12);
636 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
637 usb_buf, 28);
638 if (status != 0)
639 - return status;
640 + goto out;
641
642 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
643 -
644 +out:
645 kfree(usb_buf);
646 return status;
647 }
648 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
649 index 8d140495da79..e20266bd209e 100644
650 --- a/drivers/net/usb/lan78xx.c
651 +++ b/drivers/net/usb/lan78xx.c
652 @@ -3799,7 +3799,7 @@ static int lan78xx_probe(struct usb_interface *intf,
653 ret = register_netdev(netdev);
654 if (ret != 0) {
655 netif_err(dev, probe, netdev, "couldn't register the device\n");
656 - goto out3;
657 + goto out4;
658 }
659
660 usb_set_intfdata(intf, dev);
661 @@ -3814,12 +3814,14 @@ static int lan78xx_probe(struct usb_interface *intf,
662
663 ret = lan78xx_phy_init(dev);
664 if (ret < 0)
665 - goto out4;
666 + goto out5;
667
668 return 0;
669
670 -out4:
671 +out5:
672 unregister_netdev(netdev);
673 +out4:
674 + usb_free_urb(dev->urb_intr);
675 out3:
676 lan78xx_unbind(dev, intf);
677 out2:
678 diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
679 index e9fc168bb734..489cba9b284d 100644
680 --- a/drivers/net/wimax/i2400m/fw.c
681 +++ b/drivers/net/wimax/i2400m/fw.c
682 @@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
683 }
684 result = i2400m_barker_db_add(barker);
685 if (result < 0)
686 - goto error_add;
687 + goto error_parse_add;
688 }
689 kfree(options_orig);
690 }
691 return 0;
692
693 +error_parse_add:
694 error_parse:
695 + kfree(options_orig);
696 error_add:
697 kfree(i2400m_barker_db);
698 return result;
699 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
700 index 05d6371c7f38..f57feb8fdea4 100644
701 --- a/drivers/nvme/host/multipath.c
702 +++ b/drivers/nvme/host/multipath.c
703 @@ -323,6 +323,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
704 "failed to create id group.\n");
705 }
706
707 + synchronize_srcu(&ns->head->srcu);
708 kblockd_schedule_work(&ns->head->requeue_work);
709 }
710
711 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
712 index f8f4d3ea67f3..15d493f30810 100644
713 --- a/drivers/scsi/qla2xxx/qla_attr.c
714 +++ b/drivers/scsi/qla2xxx/qla_attr.c
715 @@ -2191,6 +2191,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
716 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
717 vha->gnl.ldma);
718
719 + vha->gnl.l = NULL;
720 +
721 vfree(vha->scan.l);
722
723 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
724 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
725 index 42b8f0d3e580..02fa81f122c2 100644
726 --- a/drivers/scsi/qla2xxx/qla_os.c
727 +++ b/drivers/scsi/qla2xxx/qla_os.c
728 @@ -3395,6 +3395,12 @@ skip_dpc:
729 return 0;
730
731 probe_failed:
732 + if (base_vha->gnl.l) {
733 + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
734 + base_vha->gnl.l, base_vha->gnl.ldma);
735 + base_vha->gnl.l = NULL;
736 + }
737 +
738 if (base_vha->timer_active)
739 qla2x00_stop_timer(base_vha);
740 base_vha->flags.online = 0;
741 @@ -3624,7 +3630,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
742 if (!atomic_read(&pdev->enable_cnt)) {
743 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
744 base_vha->gnl.l, base_vha->gnl.ldma);
745 -
746 + base_vha->gnl.l = NULL;
747 scsi_host_put(base_vha->host);
748 kfree(ha);
749 pci_set_drvdata(pdev, NULL);
750 @@ -3663,6 +3669,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
751 dma_free_coherent(&ha->pdev->dev,
752 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
753
754 + base_vha->gnl.l = NULL;
755 +
756 vfree(base_vha->scan.l);
757
758 if (IS_QLAFX00(ha))
759 @@ -4602,6 +4610,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
760 "Alloc failed for scan database.\n");
761 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
762 vha->gnl.l, vha->gnl.ldma);
763 + vha->gnl.l = NULL;
764 scsi_remove_host(vha->host);
765 return NULL;
766 }
767 diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
768 index 3094d818cf06..12c1fa5b06c5 100644
769 --- a/drivers/spi/spi-bcm2835aux.c
770 +++ b/drivers/spi/spi-bcm2835aux.c
771 @@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
772 BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
773 }
774
775 -static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
776 +static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
777 {
778 - struct spi_master *master = dev_id;
779 - struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
780 - irqreturn_t ret = IRQ_NONE;
781 -
782 - /* IRQ may be shared, so return if our interrupts are disabled */
783 - if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
784 - (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
785 - return ret;
786 + u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
787
788 /* check if we have data to read */
789 - while (bs->rx_len &&
790 - (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
791 - BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
792 + for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
793 + stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
794 bcm2835aux_rd_fifo(bs);
795 - ret = IRQ_HANDLED;
796 - }
797
798 /* check if we have data to write */
799 while (bs->tx_len &&
800 @@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
801 (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
802 BCM2835_AUX_SPI_STAT_TX_FULL))) {
803 bcm2835aux_wr_fifo(bs);
804 - ret = IRQ_HANDLED;
805 }
806 +}
807
808 - /* and check if we have reached "done" */
809 - while (bs->rx_len &&
810 - (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
811 - BCM2835_AUX_SPI_STAT_BUSY))) {
812 - bcm2835aux_rd_fifo(bs);
813 - ret = IRQ_HANDLED;
814 - }
815 +static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
816 +{
817 + struct spi_master *master = dev_id;
818 + struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
819 +
820 + /* IRQ may be shared, so return if our interrupts are disabled */
821 + if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
822 + (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
823 + return IRQ_NONE;
824 +
825 + /* do common fifo handling */
826 + bcm2835aux_spi_transfer_helper(bs);
827
828 if (!bs->tx_len) {
829 /* disable tx fifo empty interrupt */
830 @@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
831 complete(&master->xfer_completion);
832 }
833
834 - /* and return */
835 - return ret;
836 + return IRQ_HANDLED;
837 }
838
839 static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
840 @@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
841 {
842 struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
843 unsigned long timeout;
844 - u32 stat;
845
846 /* configure spi */
847 bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
848 @@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
849
850 /* loop until finished the transfer */
851 while (bs->rx_len) {
852 - /* read status */
853 - stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
854 -
855 - /* fill in tx fifo with remaining data */
856 - if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
857 - bcm2835aux_wr_fifo(bs);
858 - continue;
859 - }
860
861 - /* read data from fifo for both cases */
862 - if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
863 - bcm2835aux_rd_fifo(bs);
864 - continue;
865 - }
866 - if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
867 - bcm2835aux_rd_fifo(bs);
868 - continue;
869 - }
870 + /* do common fifo handling */
871 + bcm2835aux_spi_transfer_helper(bs);
872
873 /* there is still data pending to read check the timeout */
874 if (bs->rx_len && time_after(jiffies, timeout)) {
875 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
876 index c46efa47d68a..7159e8363b83 100644
877 --- a/drivers/target/target_core_user.c
878 +++ b/drivers/target/target_core_user.c
879 @@ -1143,14 +1143,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
880 struct se_cmd *se_cmd = cmd->se_cmd;
881 struct tcmu_dev *udev = cmd->tcmu_dev;
882 bool read_len_valid = false;
883 - uint32_t read_len = se_cmd->data_length;
884 + uint32_t read_len;
885
886 /*
887 * cmd has been completed already from timeout, just reclaim
888 * data area space and free cmd
889 */
890 - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
891 + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
892 + WARN_ON_ONCE(se_cmd);
893 goto out;
894 + }
895
896 list_del_init(&cmd->queue_entry);
897
898 @@ -1163,6 +1165,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
899 goto done;
900 }
901
902 + read_len = se_cmd->data_length;
903 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
904 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
905 read_len_valid = true;
906 @@ -1318,6 +1321,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
907 */
908 scsi_status = SAM_STAT_CHECK_CONDITION;
909 list_del_init(&cmd->queue_entry);
910 + cmd->se_cmd = NULL;
911 } else {
912 list_del_init(&cmd->queue_entry);
913 idr_remove(&udev->commands, id);
914 @@ -2036,6 +2040,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
915
916 idr_remove(&udev->commands, i);
917 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
918 + WARN_ON(!cmd->se_cmd);
919 list_del_init(&cmd->queue_entry);
920 if (err_level == 1) {
921 /*
922 diff --git a/fs/afs/cell.c b/fs/afs/cell.c
923 index 6127f0fcd62c..ee07162d35c7 100644
924 --- a/fs/afs/cell.c
925 +++ b/fs/afs/cell.c
926 @@ -76,6 +76,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
927 cell = rcu_dereference_raw(net->ws_cell);
928 if (cell) {
929 afs_get_cell(cell);
930 + ret = 0;
931 break;
932 }
933 ret = -EDESTADDRREQ;
934 @@ -110,6 +111,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
935
936 done_seqretry(&net->cells_lock, seq);
937
938 + if (ret != 0 && cell)
939 + afs_put_cell(net, cell);
940 +
941 return ret == 0 ? cell : ERR_PTR(ret);
942 }
943
944 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
945 index a11fa0b6b34d..db547af01b59 100644
946 --- a/fs/ceph/caps.c
947 +++ b/fs/ceph/caps.c
948 @@ -1280,6 +1280,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
949 {
950 struct ceph_inode_info *ci = cap->ci;
951 struct inode *inode = &ci->vfs_inode;
952 + struct ceph_buffer *old_blob = NULL;
953 struct cap_msg_args arg;
954 int held, revoking;
955 int wake = 0;
956 @@ -1344,7 +1345,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
957 ci->i_requested_max_size = arg.max_size;
958
959 if (flushing & CEPH_CAP_XATTR_EXCL) {
960 - __ceph_build_xattrs_blob(ci);
961 + old_blob = __ceph_build_xattrs_blob(ci);
962 arg.xattr_version = ci->i_xattrs.version;
963 arg.xattr_buf = ci->i_xattrs.blob;
964 } else {
965 @@ -1379,6 +1380,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
966
967 spin_unlock(&ci->i_ceph_lock);
968
969 + ceph_buffer_put(old_blob);
970 +
971 ret = send_cap_msg(&arg);
972 if (ret < 0) {
973 dout("error sending cap msg, must requeue %p\n", inode);
974 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
975 index 3e518c2ae2bf..11f19432a74c 100644
976 --- a/fs/ceph/inode.c
977 +++ b/fs/ceph/inode.c
978 @@ -742,6 +742,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
979 int issued, new_issued, info_caps;
980 struct timespec64 mtime, atime, ctime;
981 struct ceph_buffer *xattr_blob = NULL;
982 + struct ceph_buffer *old_blob = NULL;
983 struct ceph_string *pool_ns = NULL;
984 struct ceph_cap *new_cap = NULL;
985 int err = 0;
986 @@ -878,7 +879,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
987 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
988 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
989 if (ci->i_xattrs.blob)
990 - ceph_buffer_put(ci->i_xattrs.blob);
991 + old_blob = ci->i_xattrs.blob;
992 ci->i_xattrs.blob = xattr_blob;
993 if (xattr_blob)
994 memcpy(ci->i_xattrs.blob->vec.iov_base,
995 @@ -1017,8 +1018,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
996 out:
997 if (new_cap)
998 ceph_put_cap(mdsc, new_cap);
999 - if (xattr_blob)
1000 - ceph_buffer_put(xattr_blob);
1001 + ceph_buffer_put(old_blob);
1002 + ceph_buffer_put(xattr_blob);
1003 ceph_put_string(pool_ns);
1004 return err;
1005 }
1006 diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
1007 index 1f46b02f7314..5cf7b5f4db94 100644
1008 --- a/fs/ceph/snap.c
1009 +++ b/fs/ceph/snap.c
1010 @@ -460,6 +460,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1011 struct inode *inode = &ci->vfs_inode;
1012 struct ceph_cap_snap *capsnap;
1013 struct ceph_snap_context *old_snapc, *new_snapc;
1014 + struct ceph_buffer *old_blob = NULL;
1015 int used, dirty;
1016
1017 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
1018 @@ -536,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1019 capsnap->gid = inode->i_gid;
1020
1021 if (dirty & CEPH_CAP_XATTR_EXCL) {
1022 - __ceph_build_xattrs_blob(ci);
1023 + old_blob = __ceph_build_xattrs_blob(ci);
1024 capsnap->xattr_blob =
1025 ceph_buffer_get(ci->i_xattrs.blob);
1026 capsnap->xattr_version = ci->i_xattrs.version;
1027 @@ -579,6 +580,7 @@ update_snapc:
1028 }
1029 spin_unlock(&ci->i_ceph_lock);
1030
1031 + ceph_buffer_put(old_blob);
1032 kfree(capsnap);
1033 ceph_put_snap_context(old_snapc);
1034 }
1035 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
1036 index d8579a56e5dc..018019309790 100644
1037 --- a/fs/ceph/super.h
1038 +++ b/fs/ceph/super.h
1039 @@ -896,7 +896,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
1040 int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
1041 ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
1042 extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
1043 -extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
1044 +extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
1045 extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
1046 extern void __init ceph_xattr_init(void);
1047 extern void ceph_xattr_exit(void);
1048 diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
1049 index 0a2d4898ee16..5e4f3f833e85 100644
1050 --- a/fs/ceph/xattr.c
1051 +++ b/fs/ceph/xattr.c
1052 @@ -734,12 +734,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
1053
1054 /*
1055 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
1056 - * and swap into place.
1057 + * and swap into place. It returns the old i_xattrs.blob (or NULL) so
1058 + * that it can be freed by the caller as the i_ceph_lock is likely to be
1059 + * held.
1060 */
1061 -void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1062 +struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1063 {
1064 struct rb_node *p;
1065 struct ceph_inode_xattr *xattr = NULL;
1066 + struct ceph_buffer *old_blob = NULL;
1067 void *dest;
1068
1069 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
1070 @@ -770,12 +773,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1071 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
1072
1073 if (ci->i_xattrs.blob)
1074 - ceph_buffer_put(ci->i_xattrs.blob);
1075 + old_blob = ci->i_xattrs.blob;
1076 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
1077 ci->i_xattrs.prealloc_blob = NULL;
1078 ci->i_xattrs.dirty = false;
1079 ci->i_xattrs.version++;
1080 }
1081 +
1082 + return old_blob;
1083 }
1084
1085 static inline int __get_request_mask(struct inode *in) {
1086 @@ -1011,6 +1016,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1087 struct ceph_inode_info *ci = ceph_inode(inode);
1088 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1089 struct ceph_cap_flush *prealloc_cf = NULL;
1090 + struct ceph_buffer *old_blob = NULL;
1091 int issued;
1092 int err;
1093 int dirty = 0;
1094 @@ -1084,13 +1090,15 @@ retry:
1095 struct ceph_buffer *blob;
1096
1097 spin_unlock(&ci->i_ceph_lock);
1098 - dout(" preaallocating new blob size=%d\n", required_blob_size);
1099 + ceph_buffer_put(old_blob); /* Shouldn't be required */
1100 + dout(" pre-allocating new blob size=%d\n", required_blob_size);
1101 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1102 if (!blob)
1103 goto do_sync_unlocked;
1104 spin_lock(&ci->i_ceph_lock);
1105 + /* prealloc_blob can't be released while holding i_ceph_lock */
1106 if (ci->i_xattrs.prealloc_blob)
1107 - ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1108 + old_blob = ci->i_xattrs.prealloc_blob;
1109 ci->i_xattrs.prealloc_blob = blob;
1110 goto retry;
1111 }
1112 @@ -1106,6 +1114,7 @@ retry:
1113 }
1114
1115 spin_unlock(&ci->i_ceph_lock);
1116 + ceph_buffer_put(old_blob);
1117 if (lock_snap_rwsem)
1118 up_read(&mdsc->snap_rwsem);
1119 if (dirty)
1120 diff --git a/fs/read_write.c b/fs/read_write.c
1121 index 85fd7a8ee29e..5fb5ee5b8cd7 100644
1122 --- a/fs/read_write.c
1123 +++ b/fs/read_write.c
1124 @@ -1888,10 +1888,7 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1125 }
1126 EXPORT_SYMBOL(vfs_clone_file_range);
1127
1128 -/*
1129 - * Read a page's worth of file data into the page cache. Return the page
1130 - * locked.
1131 - */
1132 +/* Read a page's worth of file data into the page cache. */
1133 static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1134 {
1135 struct address_space *mapping;
1136 @@ -1907,10 +1904,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1137 put_page(page);
1138 return ERR_PTR(-EIO);
1139 }
1140 - lock_page(page);
1141 return page;
1142 }
1143
1144 +/*
1145 + * Lock two pages, ensuring that we lock in offset order if the pages are from
1146 + * the same file.
1147 + */
1148 +static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1149 +{
1150 + /* Always lock in order of increasing index. */
1151 + if (page1->index > page2->index)
1152 + swap(page1, page2);
1153 +
1154 + lock_page(page1);
1155 + if (page1 != page2)
1156 + lock_page(page2);
1157 +}
1158 +
1159 +/* Unlock two pages, being careful not to unlock the same page twice. */
1160 +static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1161 +{
1162 + unlock_page(page1);
1163 + if (page1 != page2)
1164 + unlock_page(page2);
1165 +}
1166 +
1167 /*
1168 * Compare extents of two files to see if they are the same.
1169 * Caller must have locked both inodes to prevent write races.
1170 @@ -1948,10 +1967,24 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1171 dest_page = vfs_dedupe_get_page(dest, destoff);
1172 if (IS_ERR(dest_page)) {
1173 error = PTR_ERR(dest_page);
1174 - unlock_page(src_page);
1175 put_page(src_page);
1176 goto out_error;
1177 }
1178 +
1179 + vfs_lock_two_pages(src_page, dest_page);
1180 +
1181 + /*
1182 + * Now that we've locked both pages, make sure they're still
1183 + * mapped to the file data we're interested in. If not,
1184 + * someone is invalidating pages on us and we lose.
1185 + */
1186 + if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1187 + src_page->mapping != src->i_mapping ||
1188 + dest_page->mapping != dest->i_mapping) {
1189 + same = false;
1190 + goto unlock;
1191 + }
1192 +
1193 src_addr = kmap_atomic(src_page);
1194 dest_addr = kmap_atomic(dest_page);
1195
1196 @@ -1963,8 +1996,8 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1197
1198 kunmap_atomic(dest_addr);
1199 kunmap_atomic(src_addr);
1200 - unlock_page(dest_page);
1201 - unlock_page(src_page);
1202 +unlock:
1203 + vfs_unlock_two_pages(src_page, dest_page);
1204 put_page(dest_page);
1205 put_page(src_page);
1206
1207 diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
1208 index 5e58bb29b1a3..11cdc7c60480 100644
1209 --- a/include/linux/ceph/buffer.h
1210 +++ b/include/linux/ceph/buffer.h
1211 @@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
1212
1213 static inline void ceph_buffer_put(struct ceph_buffer *b)
1214 {
1215 - kref_put(&b->kref, ceph_buffer_release);
1216 + if (b)
1217 + kref_put(&b->kref, ceph_buffer_release);
1218 }
1219
1220 extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
1221 diff --git a/include/linux/gpio.h b/include/linux/gpio.h
1222 index 39745b8bdd65..b3115d1a7d49 100644
1223 --- a/include/linux/gpio.h
1224 +++ b/include/linux/gpio.h
1225 @@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq)
1226 return -EINVAL;
1227 }
1228
1229 -static inline int
1230 -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
1231 - unsigned int gpio_offset, unsigned int pin_offset,
1232 - unsigned int npins)
1233 -{
1234 - WARN_ON(1);
1235 - return -EINVAL;
1236 -}
1237 -
1238 -static inline int
1239 -gpiochip_add_pingroup_range(struct gpio_chip *chip,
1240 - struct pinctrl_dev *pctldev,
1241 - unsigned int gpio_offset, const char *pin_group)
1242 -{
1243 - WARN_ON(1);
1244 - return -EINVAL;
1245 -}
1246 -
1247 -static inline void
1248 -gpiochip_remove_pin_ranges(struct gpio_chip *chip)
1249 -{
1250 - WARN_ON(1);
1251 -}
1252 -
1253 static inline int devm_gpio_request(struct device *dev, unsigned gpio,
1254 const char *label)
1255 {
1256 diff --git a/include/net/act_api.h b/include/net/act_api.h
1257 index 970303448c90..0c82d7ea6ee1 100644
1258 --- a/include/net/act_api.h
1259 +++ b/include/net/act_api.h
1260 @@ -15,6 +15,7 @@
1261 struct tcf_idrinfo {
1262 spinlock_t lock;
1263 struct idr action_idr;
1264 + struct net *net;
1265 };
1266
1267 struct tc_action_ops;
1268 @@ -107,7 +108,7 @@ struct tc_action_net {
1269 };
1270
1271 static inline
1272 -int tc_action_net_init(struct tc_action_net *tn,
1273 +int tc_action_net_init(struct net *net, struct tc_action_net *tn,
1274 const struct tc_action_ops *ops)
1275 {
1276 int err = 0;
1277 @@ -116,6 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
1278 if (!tn->idrinfo)
1279 return -ENOMEM;
1280 tn->ops = ops;
1281 + tn->idrinfo->net = net;
1282 spin_lock_init(&tn->idrinfo->lock);
1283 idr_init(&tn->idrinfo->action_idr);
1284 return err;
1285 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
1286 index f2be5d041ba3..7685cbda9f28 100644
1287 --- a/include/net/netfilter/nf_tables.h
1288 +++ b/include/net/netfilter/nf_tables.h
1289 @@ -418,8 +418,7 @@ struct nft_set {
1290 unsigned char *udata;
1291 /* runtime data below here */
1292 const struct nft_set_ops *ops ____cacheline_aligned;
1293 - u16 flags:13,
1294 - bound:1,
1295 + u16 flags:14,
1296 genmask:2;
1297 u8 klen;
1298 u8 dlen;
1299 @@ -1337,12 +1336,15 @@ struct nft_trans_rule {
1300 struct nft_trans_set {
1301 struct nft_set *set;
1302 u32 set_id;
1303 + bool bound;
1304 };
1305
1306 #define nft_trans_set(trans) \
1307 (((struct nft_trans_set *)trans->data)->set)
1308 #define nft_trans_set_id(trans) \
1309 (((struct nft_trans_set *)trans->data)->set_id)
1310 +#define nft_trans_set_bound(trans) \
1311 + (((struct nft_trans_set *)trans->data)->bound)
1312
1313 struct nft_trans_chain {
1314 bool update;
1315 @@ -1373,12 +1375,15 @@ struct nft_trans_table {
1316 struct nft_trans_elem {
1317 struct nft_set *set;
1318 struct nft_set_elem elem;
1319 + bool bound;
1320 };
1321
1322 #define nft_trans_elem_set(trans) \
1323 (((struct nft_trans_elem *)trans->data)->set)
1324 #define nft_trans_elem(trans) \
1325 (((struct nft_trans_elem *)trans->data)->elem)
1326 +#define nft_trans_elem_set_bound(trans) \
1327 + (((struct nft_trans_elem *)trans->data)->bound)
1328
1329 struct nft_trans_obj {
1330 struct nft_object *obj;
1331 diff --git a/include/net/psample.h b/include/net/psample.h
1332 index 9b80f814ab04..94cb37a7bf75 100644
1333 --- a/include/net/psample.h
1334 +++ b/include/net/psample.h
1335 @@ -12,6 +12,7 @@ struct psample_group {
1336 u32 group_num;
1337 u32 refcount;
1338 u32 seq;
1339 + struct rcu_head rcu;
1340 };
1341
1342 struct psample_group *psample_group_get(struct net *net, u32 group_num);
1343 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1344 index 29ff6635d259..714d63f60460 100644
1345 --- a/kernel/kprobes.c
1346 +++ b/kernel/kprobes.c
1347 @@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
1348 */
1349 static void do_optimize_kprobes(void)
1350 {
1351 + lockdep_assert_held(&text_mutex);
1352 /*
1353 * The optimization/unoptimization refers online_cpus via
1354 * stop_machine() and cpu-hotplug modifies online_cpus.
1355 @@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
1356 list_empty(&optimizing_list))
1357 return;
1358
1359 - mutex_lock(&text_mutex);
1360 arch_optimize_kprobes(&optimizing_list);
1361 - mutex_unlock(&text_mutex);
1362 }
1363
1364 /*
1365 @@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
1366 {
1367 struct optimized_kprobe *op, *tmp;
1368
1369 + lockdep_assert_held(&text_mutex);
1370 /* See comment in do_optimize_kprobes() */
1371 lockdep_assert_cpus_held();
1372
1373 @@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
1374 if (list_empty(&unoptimizing_list))
1375 return;
1376
1377 - mutex_lock(&text_mutex);
1378 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
1379 /* Loop free_list for disarming */
1380 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
1381 @@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
1382 } else
1383 list_del_init(&op->list);
1384 }
1385 - mutex_unlock(&text_mutex);
1386 }
1387
1388 /* Reclaim all kprobes on the free_list */
1389 @@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
1390 {
1391 mutex_lock(&kprobe_mutex);
1392 cpus_read_lock();
1393 + mutex_lock(&text_mutex);
1394 /* Lock modules while optimizing kprobes */
1395 mutex_lock(&module_mutex);
1396
1397 @@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
1398 do_free_cleaned_kprobes();
1399
1400 mutex_unlock(&module_mutex);
1401 + mutex_unlock(&text_mutex);
1402 cpus_read_unlock();
1403 mutex_unlock(&kprobe_mutex);
1404
1405 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1406 index 3ae899805f8b..a581cf101cd9 100644
1407 --- a/net/core/netpoll.c
1408 +++ b/net/core/netpoll.c
1409 @@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
1410 txq = netdev_get_tx_queue(dev, q_index);
1411 HARD_TX_LOCK(dev, txq, smp_processor_id());
1412 if (netif_xmit_frozen_or_stopped(txq) ||
1413 - netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
1414 + !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
1415 skb_queue_head(&npinfo->txq, skb);
1416 HARD_TX_UNLOCK(dev, txq);
1417 local_irq_restore(flags);
1418 @@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1419
1420 HARD_TX_UNLOCK(dev, txq);
1421
1422 - if (status == NETDEV_TX_OK)
1423 + if (dev_xmit_complete(status))
1424 break;
1425
1426 }
1427 @@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1428
1429 }
1430
1431 - if (status != NETDEV_TX_OK) {
1432 + if (!dev_xmit_complete(status)) {
1433 skb_queue_tail(&npinfo->txq, skb);
1434 schedule_delayed_work(&npinfo->tx_work,0);
1435 }
1436 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1437 index b7ef367fe6a1..611ba174265c 100644
1438 --- a/net/ipv4/tcp.c
1439 +++ b/net/ipv4/tcp.c
1440 @@ -934,6 +934,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
1441 return mss_now;
1442 }
1443
1444 +/* In some cases, both sendpage() and sendmsg() could have added
1445 + * an skb to the write queue, but failed adding payload on it.
1446 + * We need to remove it to consume less memory, but more
1447 + * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
1448 + * users.
1449 + */
1450 +static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
1451 +{
1452 + if (skb && !skb->len) {
1453 + tcp_unlink_write_queue(skb, sk);
1454 + if (tcp_write_queue_empty(sk))
1455 + tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1456 + sk_wmem_free_skb(sk, skb);
1457 + }
1458 +}
1459 +
1460 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
1461 size_t size, int flags)
1462 {
1463 @@ -1056,6 +1072,7 @@ out:
1464 return copied;
1465
1466 do_error:
1467 + tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1468 if (copied)
1469 goto out;
1470 out_err:
1471 @@ -1409,17 +1426,11 @@ out_nopush:
1472 sock_zerocopy_put(uarg);
1473 return copied + copied_syn;
1474
1475 +do_error:
1476 + skb = tcp_write_queue_tail(sk);
1477 do_fault:
1478 - if (!skb->len) {
1479 - tcp_unlink_write_queue(skb, sk);
1480 - /* It is the one place in all of TCP, except connection
1481 - * reset, where we can be unlinking the send_head.
1482 - */
1483 - tcp_check_send_head(sk, skb);
1484 - sk_wmem_free_skb(sk, skb);
1485 - }
1486 + tcp_remove_empty_skb(sk, skb);
1487
1488 -do_error:
1489 if (copied + copied_syn)
1490 goto out;
1491 out_err:
1492 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1493 index 88c7e821fd11..2697e4397e46 100644
1494 --- a/net/ipv4/tcp_output.c
1495 +++ b/net/ipv4/tcp_output.c
1496 @@ -2046,7 +2046,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
1497 if (len <= skb->len)
1498 break;
1499
1500 - if (unlikely(TCP_SKB_CB(skb)->eor))
1501 + if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
1502 return false;
1503
1504 len -= skb->len;
1505 @@ -2162,6 +2162,7 @@ static int tcp_mtu_probe(struct sock *sk)
1506 * we need to propagate it to the new skb.
1507 */
1508 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
1509 + tcp_skb_collapse_tstamp(nskb, skb);
1510 tcp_unlink_write_queue(skb, sk);
1511 sk_wmem_free_skb(sk, skb);
1512 } else {
1513 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1514 index dbab62e3f0d7..2d80e913b82f 100644
1515 --- a/net/ipv6/mcast.c
1516 +++ b/net/ipv6/mcast.c
1517 @@ -791,14 +791,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
1518 if (pmc) {
1519 im->idev = pmc->idev;
1520 if (im->mca_sfmode == MCAST_INCLUDE) {
1521 - im->mca_tomb = pmc->mca_tomb;
1522 - im->mca_sources = pmc->mca_sources;
1523 + swap(im->mca_tomb, pmc->mca_tomb);
1524 + swap(im->mca_sources, pmc->mca_sources);
1525 for (psf = im->mca_sources; psf; psf = psf->sf_next)
1526 psf->sf_crcount = idev->mc_qrv;
1527 } else {
1528 im->mca_crcount = idev->mc_qrv;
1529 }
1530 in6_dev_put(pmc->idev);
1531 + ip6_mc_clear_src(pmc);
1532 kfree(pmc);
1533 }
1534 spin_unlock_bh(&im->mca_lock);
1535 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1536 index 29ff59dd99ac..2145581d7b3d 100644
1537 --- a/net/netfilter/nf_tables_api.c
1538 +++ b/net/netfilter/nf_tables_api.c
1539 @@ -121,9 +121,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
1540 return;
1541
1542 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
1543 - if (trans->msg_type == NFT_MSG_NEWSET &&
1544 - nft_trans_set(trans) == set) {
1545 - set->bound = true;
1546 + switch (trans->msg_type) {
1547 + case NFT_MSG_NEWSET:
1548 + if (nft_trans_set(trans) == set)
1549 + nft_trans_set_bound(trans) = true;
1550 + break;
1551 + case NFT_MSG_NEWSETELEM:
1552 + if (nft_trans_elem_set(trans) == set)
1553 + nft_trans_elem_set_bound(trans) = true;
1554 break;
1555 }
1556 }
1557 @@ -6656,7 +6661,7 @@ static int __nf_tables_abort(struct net *net)
1558 break;
1559 case NFT_MSG_NEWSET:
1560 trans->ctx.table->use--;
1561 - if (nft_trans_set(trans)->bound) {
1562 + if (nft_trans_set_bound(trans)) {
1563 nft_trans_destroy(trans);
1564 break;
1565 }
1566 @@ -6668,7 +6673,7 @@ static int __nf_tables_abort(struct net *net)
1567 nft_trans_destroy(trans);
1568 break;
1569 case NFT_MSG_NEWSETELEM:
1570 - if (nft_trans_elem_set(trans)->bound) {
1571 + if (nft_trans_elem_set_bound(trans)) {
1572 nft_trans_destroy(trans);
1573 break;
1574 }
1575 diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
1576 index 6e0c26025ab1..69decbe2c988 100644
1577 --- a/net/netfilter/nft_flow_offload.c
1578 +++ b/net/netfilter/nft_flow_offload.c
1579 @@ -71,11 +71,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1580 {
1581 struct nft_flow_offload *priv = nft_expr_priv(expr);
1582 struct nf_flowtable *flowtable = &priv->flowtable->data;
1583 + struct tcphdr _tcph, *tcph = NULL;
1584 enum ip_conntrack_info ctinfo;
1585 struct nf_flow_route route;
1586 struct flow_offload *flow;
1587 enum ip_conntrack_dir dir;
1588 - bool is_tcp = false;
1589 struct nf_conn *ct;
1590 int ret;
1591
1592 @@ -88,7 +88,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1593
1594 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
1595 case IPPROTO_TCP:
1596 - is_tcp = true;
1597 + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
1598 + sizeof(_tcph), &_tcph);
1599 + if (unlikely(!tcph || tcph->fin || tcph->rst))
1600 + goto out;
1601 break;
1602 case IPPROTO_UDP:
1603 break;
1604 @@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1605 if (!flow)
1606 goto err_flow_alloc;
1607
1608 - if (is_tcp) {
1609 + if (tcph) {
1610 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
1611 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
1612 }
1613 diff --git a/net/psample/psample.c b/net/psample/psample.c
1614 index 64f95624f219..4cea353221da 100644
1615 --- a/net/psample/psample.c
1616 +++ b/net/psample/psample.c
1617 @@ -156,7 +156,7 @@ static void psample_group_destroy(struct psample_group *group)
1618 {
1619 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
1620 list_del(&group->list);
1621 - kfree(group);
1622 + kfree_rcu(group, rcu);
1623 }
1624
1625 static struct psample_group *
1626 diff --git a/net/rds/recv.c b/net/rds/recv.c
1627 index 504cd6bcc54c..c0b945516cdb 100644
1628 --- a/net/rds/recv.c
1629 +++ b/net/rds/recv.c
1630 @@ -1,5 +1,5 @@
1631 /*
1632 - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
1633 + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
1634 *
1635 * This software is available to you under a choice of one of two
1636 * licenses. You may choose to be licensed under the terms of the GNU
1637 @@ -803,6 +803,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
1638
1639 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
1640 minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
1641 + minfo6.tos = 0;
1642
1643 if (flip) {
1644 minfo6.laddr = *daddr;
1645 @@ -816,6 +817,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
1646 minfo6.fport = inc->i_hdr.h_dport;
1647 }
1648
1649 + minfo6.flags = 0;
1650 +
1651 rds_info_copy(iter, &minfo6, sizeof(minfo6));
1652 }
1653 #endif
1654 diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
1655 index 20fae5ca87fa..800846d77a56 100644
1656 --- a/net/sched/act_bpf.c
1657 +++ b/net/sched/act_bpf.c
1658 @@ -413,7 +413,7 @@ static __net_init int bpf_init_net(struct net *net)
1659 {
1660 struct tc_action_net *tn = net_generic(net, bpf_net_id);
1661
1662 - return tc_action_net_init(tn, &act_bpf_ops);
1663 + return tc_action_net_init(net, tn, &act_bpf_ops);
1664 }
1665
1666 static void __net_exit bpf_exit_net(struct list_head *net_list)
1667 diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
1668 index 605436747978..538dedd84e21 100644
1669 --- a/net/sched/act_connmark.c
1670 +++ b/net/sched/act_connmark.c
1671 @@ -215,7 +215,7 @@ static __net_init int connmark_init_net(struct net *net)
1672 {
1673 struct tc_action_net *tn = net_generic(net, connmark_net_id);
1674
1675 - return tc_action_net_init(tn, &act_connmark_ops);
1676 + return tc_action_net_init(net, tn, &act_connmark_ops);
1677 }
1678
1679 static void __net_exit connmark_exit_net(struct list_head *net_list)
1680 diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
1681 index 40437197e053..1e269441065a 100644
1682 --- a/net/sched/act_csum.c
1683 +++ b/net/sched/act_csum.c
1684 @@ -678,7 +678,7 @@ static __net_init int csum_init_net(struct net *net)
1685 {
1686 struct tc_action_net *tn = net_generic(net, csum_net_id);
1687
1688 - return tc_action_net_init(tn, &act_csum_ops);
1689 + return tc_action_net_init(net, tn, &act_csum_ops);
1690 }
1691
1692 static void __net_exit csum_exit_net(struct list_head *net_list)
1693 diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
1694 index 72d3347bdd41..dfef9621375e 100644
1695 --- a/net/sched/act_gact.c
1696 +++ b/net/sched/act_gact.c
1697 @@ -263,7 +263,7 @@ static __net_init int gact_init_net(struct net *net)
1698 {
1699 struct tc_action_net *tn = net_generic(net, gact_net_id);
1700
1701 - return tc_action_net_init(tn, &act_gact_ops);
1702 + return tc_action_net_init(net, tn, &act_gact_ops);
1703 }
1704
1705 static void __net_exit gact_exit_net(struct list_head *net_list)
1706 diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1707 index 24047e0e5db0..bac353bea02f 100644
1708 --- a/net/sched/act_ife.c
1709 +++ b/net/sched/act_ife.c
1710 @@ -887,7 +887,7 @@ static __net_init int ife_init_net(struct net *net)
1711 {
1712 struct tc_action_net *tn = net_generic(net, ife_net_id);
1713
1714 - return tc_action_net_init(tn, &act_ife_ops);
1715 + return tc_action_net_init(net, tn, &act_ife_ops);
1716 }
1717
1718 static void __net_exit ife_exit_net(struct list_head *net_list)
1719 diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
1720 index 334f3a057671..01d3669ef498 100644
1721 --- a/net/sched/act_ipt.c
1722 +++ b/net/sched/act_ipt.c
1723 @@ -65,12 +65,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
1724 return 0;
1725 }
1726
1727 -static void ipt_destroy_target(struct xt_entry_target *t)
1728 +static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
1729 {
1730 struct xt_tgdtor_param par = {
1731 .target = t->u.kernel.target,
1732 .targinfo = t->data,
1733 .family = NFPROTO_IPV4,
1734 + .net = net,
1735 };
1736 if (par.target->destroy != NULL)
1737 par.target->destroy(&par);
1738 @@ -82,7 +83,7 @@ static void tcf_ipt_release(struct tc_action *a)
1739 struct tcf_ipt *ipt = to_ipt(a);
1740
1741 if (ipt->tcfi_t) {
1742 - ipt_destroy_target(ipt->tcfi_t);
1743 + ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
1744 kfree(ipt->tcfi_t);
1745 }
1746 kfree(ipt->tcfi_tname);
1747 @@ -182,7 +183,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
1748
1749 spin_lock_bh(&ipt->tcf_lock);
1750 if (ret != ACT_P_CREATED) {
1751 - ipt_destroy_target(ipt->tcfi_t);
1752 + ipt_destroy_target(ipt->tcfi_t, net);
1753 kfree(ipt->tcfi_tname);
1754 kfree(ipt->tcfi_t);
1755 }
1756 @@ -353,7 +354,7 @@ static __net_init int ipt_init_net(struct net *net)
1757 {
1758 struct tc_action_net *tn = net_generic(net, ipt_net_id);
1759
1760 - return tc_action_net_init(tn, &act_ipt_ops);
1761 + return tc_action_net_init(net, tn, &act_ipt_ops);
1762 }
1763
1764 static void __net_exit ipt_exit_net(struct list_head *net_list)
1765 @@ -403,7 +404,7 @@ static __net_init int xt_init_net(struct net *net)
1766 {
1767 struct tc_action_net *tn = net_generic(net, xt_net_id);
1768
1769 - return tc_action_net_init(tn, &act_xt_ops);
1770 + return tc_action_net_init(net, tn, &act_xt_ops);
1771 }
1772
1773 static void __net_exit xt_exit_net(struct list_head *net_list)
1774 diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1775 index 548614bd9366..399e3beae6cf 100644
1776 --- a/net/sched/act_mirred.c
1777 +++ b/net/sched/act_mirred.c
1778 @@ -419,7 +419,7 @@ static __net_init int mirred_init_net(struct net *net)
1779 {
1780 struct tc_action_net *tn = net_generic(net, mirred_net_id);
1781
1782 - return tc_action_net_init(tn, &act_mirred_ops);
1783 + return tc_action_net_init(net, tn, &act_mirred_ops);
1784 }
1785
1786 static void __net_exit mirred_exit_net(struct list_head *net_list)
1787 diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
1788 index 619828920b97..d1b47a1b145c 100644
1789 --- a/net/sched/act_nat.c
1790 +++ b/net/sched/act_nat.c
1791 @@ -317,7 +317,7 @@ static __net_init int nat_init_net(struct net *net)
1792 {
1793 struct tc_action_net *tn = net_generic(net, nat_net_id);
1794
1795 - return tc_action_net_init(tn, &act_nat_ops);
1796 + return tc_action_net_init(net, tn, &act_nat_ops);
1797 }
1798
1799 static void __net_exit nat_exit_net(struct list_head *net_list)
1800 diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1801 index 82d258b2a75a..33c0cc5ef229 100644
1802 --- a/net/sched/act_pedit.c
1803 +++ b/net/sched/act_pedit.c
1804 @@ -488,7 +488,7 @@ static __net_init int pedit_init_net(struct net *net)
1805 {
1806 struct tc_action_net *tn = net_generic(net, pedit_net_id);
1807
1808 - return tc_action_net_init(tn, &act_pedit_ops);
1809 + return tc_action_net_init(net, tn, &act_pedit_ops);
1810 }
1811
1812 static void __net_exit pedit_exit_net(struct list_head *net_list)
1813 diff --git a/net/sched/act_police.c b/net/sched/act_police.c
1814 index 997c34db1491..4db25959e156 100644
1815 --- a/net/sched/act_police.c
1816 +++ b/net/sched/act_police.c
1817 @@ -342,7 +342,7 @@ static __net_init int police_init_net(struct net *net)
1818 {
1819 struct tc_action_net *tn = net_generic(net, police_net_id);
1820
1821 - return tc_action_net_init(tn, &act_police_ops);
1822 + return tc_action_net_init(net, tn, &act_police_ops);
1823 }
1824
1825 static void __net_exit police_exit_net(struct list_head *net_list)
1826 diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
1827 index ac37654ca292..98635311a5a0 100644
1828 --- a/net/sched/act_sample.c
1829 +++ b/net/sched/act_sample.c
1830 @@ -99,7 +99,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1831 s->tcf_action = parm->action;
1832 s->rate = rate;
1833 s->psample_group_num = psample_group_num;
1834 - RCU_INIT_POINTER(s->psample_group, psample_group);
1835 + rcu_swap_protected(s->psample_group, psample_group,
1836 + lockdep_is_held(&s->tcf_lock));
1837
1838 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
1839 s->truncate = true;
1840 @@ -107,6 +108,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1841 }
1842 spin_unlock_bh(&s->tcf_lock);
1843
1844 + if (psample_group)
1845 + psample_group_put(psample_group);
1846 if (ret == ACT_P_CREATED)
1847 tcf_idr_insert(tn, *a);
1848 return ret;
1849 @@ -255,7 +258,7 @@ static __net_init int sample_init_net(struct net *net)
1850 {
1851 struct tc_action_net *tn = net_generic(net, sample_net_id);
1852
1853 - return tc_action_net_init(tn, &act_sample_ops);
1854 + return tc_action_net_init(net, tn, &act_sample_ops);
1855 }
1856
1857 static void __net_exit sample_exit_net(struct list_head *net_list)
1858 diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
1859 index 658efae71a09..b418ef62e0a4 100644
1860 --- a/net/sched/act_simple.c
1861 +++ b/net/sched/act_simple.c
1862 @@ -215,7 +215,7 @@ static __net_init int simp_init_net(struct net *net)
1863 {
1864 struct tc_action_net *tn = net_generic(net, simp_net_id);
1865
1866 - return tc_action_net_init(tn, &act_simp_ops);
1867 + return tc_action_net_init(net, tn, &act_simp_ops);
1868 }
1869
1870 static void __net_exit simp_exit_net(struct list_head *net_list)
1871 diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
1872 index 7709710a41f7..a80179c1075f 100644
1873 --- a/net/sched/act_skbedit.c
1874 +++ b/net/sched/act_skbedit.c
1875 @@ -316,7 +316,7 @@ static __net_init int skbedit_init_net(struct net *net)
1876 {
1877 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
1878
1879 - return tc_action_net_init(tn, &act_skbedit_ops);
1880 + return tc_action_net_init(net, tn, &act_skbedit_ops);
1881 }
1882
1883 static void __net_exit skbedit_exit_net(struct list_head *net_list)
1884 diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
1885 index 3038493d18ca..21d195296121 100644
1886 --- a/net/sched/act_skbmod.c
1887 +++ b/net/sched/act_skbmod.c
1888 @@ -277,7 +277,7 @@ static __net_init int skbmod_init_net(struct net *net)
1889 {
1890 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
1891
1892 - return tc_action_net_init(tn, &act_skbmod_ops);
1893 + return tc_action_net_init(net, tn, &act_skbmod_ops);
1894 }
1895
1896 static void __net_exit skbmod_exit_net(struct list_head *net_list)
1897 diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
1898 index 66bfe57e74ae..43309ff2b5dc 100644
1899 --- a/net/sched/act_tunnel_key.c
1900 +++ b/net/sched/act_tunnel_key.c
1901 @@ -579,7 +579,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
1902 {
1903 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
1904
1905 - return tc_action_net_init(tn, &act_tunnel_key_ops);
1906 + return tc_action_net_init(net, tn, &act_tunnel_key_ops);
1907 }
1908
1909 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
1910 diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
1911 index da993edd2e40..41528b966440 100644
1912 --- a/net/sched/act_vlan.c
1913 +++ b/net/sched/act_vlan.c
1914 @@ -324,7 +324,7 @@ static __net_init int vlan_init_net(struct net *net)
1915 {
1916 struct tc_action_net *tn = net_generic(net, vlan_net_id);
1917
1918 - return tc_action_net_init(tn, &act_vlan_ops);
1919 + return tc_action_net_init(net, tn, &act_vlan_ops);
1920 }
1921
1922 static void __net_exit vlan_exit_net(struct list_head *net_list)
1923 diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
1924 index fcaf00621102..be7aebff0c1e 100644
1925 --- a/tools/bpf/bpftool/common.c
1926 +++ b/tools/bpf/bpftool/common.c
1927 @@ -238,7 +238,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
1928
1929 fd = get_fd_by_id(id);
1930 if (fd < 0) {
1931 - p_err("can't get prog by id (%u): %s", id, strerror(errno));
1932 + p_err("can't open object by id (%u): %s", id, strerror(errno));
1933 return -1;
1934 }
1935
1936 diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1937 index 0ce50c319cfd..ef8a82f29f02 100644
1938 --- a/tools/hv/hv_kvp_daemon.c
1939 +++ b/tools/hv/hv_kvp_daemon.c
1940 @@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
1941 int sn_offset = 0;
1942 int error = 0;
1943 char *buffer;
1944 - struct hv_kvp_ipaddr_value *ip_buffer;
1945 + struct hv_kvp_ipaddr_value *ip_buffer = NULL;
1946 char cidr_mask[5]; /* /xyz */
1947 int weight;
1948 int i;
1949 diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
1950 index a3122f1949a8..4d35eba73dc9 100644
1951 --- a/tools/testing/selftests/kvm/lib/x86.c
1952 +++ b/tools/testing/selftests/kvm/lib/x86.c
1953 @@ -809,9 +809,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1954 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1955 r);
1956
1957 - r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1958 - TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1959 - r);
1960 + if (kvm_check_cap(KVM_CAP_XCRS)) {
1961 + r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1962 + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1963 + r);
1964 + }
1965
1966 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1967 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
1968 @@ -858,9 +860,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1969 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1970 r);
1971
1972 - r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1973 - TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1974 - r);
1975 + if (kvm_check_cap(KVM_CAP_XCRS)) {
1976 + r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1977 + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1978 + r);
1979 + }
1980
1981 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1982 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
1983 diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
1984 index 3764e7121265..65db510dddc3 100644
1985 --- a/tools/testing/selftests/kvm/platform_info_test.c
1986 +++ b/tools/testing/selftests/kvm/platform_info_test.c
1987 @@ -100,8 +100,8 @@ int main(int argc, char *argv[])
1988 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
1989 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
1990 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
1991 - test_msr_platform_info_disabled(vm);
1992 test_msr_platform_info_enabled(vm);
1993 + test_msr_platform_info_disabled(vm);
1994 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
1995
1996 kvm_vm_free(vm);
1997 diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
1998 index 08443a15e6be..3caee91bca08 100644
1999 --- a/virt/kvm/arm/mmio.c
2000 +++ b/virt/kvm/arm/mmio.c
2001 @@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
2002 unsigned int len;
2003 int mask;
2004
2005 + /* Detect an already handled MMIO return */
2006 + if (unlikely(!vcpu->mmio_needed))
2007 + return 0;
2008 +
2009 + vcpu->mmio_needed = 0;
2010 +
2011 if (!run->mmio.is_write) {
2012 len = run->mmio.len;
2013 if (len > sizeof(unsigned long))
2014 @@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
2015 run->mmio.is_write = is_write;
2016 run->mmio.phys_addr = fault_ipa;
2017 run->mmio.len = len;
2018 + vcpu->mmio_needed = 1;
2019
2020 if (!ret) {
2021 /* We handled the access successfully in the kernel. */
2022 diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
2023 index 8196e4f8731f..cd75df25fe14 100644
2024 --- a/virt/kvm/arm/vgic/vgic-init.c
2025 +++ b/virt/kvm/arm/vgic/vgic-init.c
2026 @@ -19,6 +19,7 @@
2027 #include <linux/cpu.h>
2028 #include <linux/kvm_host.h>
2029 #include <kvm/arm_vgic.h>
2030 +#include <asm/kvm_emulate.h>
2031 #include <asm/kvm_mmu.h>
2032 #include "vgic.h"
2033
2034 @@ -175,12 +176,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
2035 irq->vcpu = NULL;
2036 irq->target_vcpu = vcpu0;
2037 kref_init(&irq->refcount);
2038 - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2039 + switch (dist->vgic_model) {
2040 + case KVM_DEV_TYPE_ARM_VGIC_V2:
2041 irq->targets = 0;
2042 irq->group = 0;
2043 - } else {
2044 + break;
2045 + case KVM_DEV_TYPE_ARM_VGIC_V3:
2046 irq->mpidr = 0;
2047 irq->group = 1;
2048 + break;
2049 + default:
2050 + kfree(dist->spis);
2051 + return -EINVAL;
2052 }
2053 }
2054 return 0;
2055 @@ -220,7 +227,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
2056 irq->intid = i;
2057 irq->vcpu = NULL;
2058 irq->target_vcpu = vcpu;
2059 - irq->targets = 1U << vcpu->vcpu_id;
2060 kref_init(&irq->refcount);
2061 if (vgic_irq_is_sgi(i)) {
2062 /* SGIs */
2063 @@ -230,11 +236,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
2064 /* PPIs */
2065 irq->config = VGIC_CONFIG_LEVEL;
2066 }
2067 -
2068 - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
2069 - irq->group = 1;
2070 - else
2071 - irq->group = 0;
2072 }
2073
2074 if (!irqchip_in_kernel(vcpu->kvm))
2075 @@ -297,10 +298,19 @@ int vgic_init(struct kvm *kvm)
2076
2077 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
2078 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
2079 - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
2080 + switch (dist->vgic_model) {
2081 + case KVM_DEV_TYPE_ARM_VGIC_V3:
2082 irq->group = 1;
2083 - else
2084 + irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
2085 + break;
2086 + case KVM_DEV_TYPE_ARM_VGIC_V2:
2087 irq->group = 0;
2088 + irq->targets = 1U << idx;
2089 + break;
2090 + default:
2091 + ret = -EINVAL;
2092 + goto out;
2093 + }
2094 }
2095 }
2096