Contents of /trunk/kernel-alx/patches-5.4/0237-5.4.138-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 45143 byte(s)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 45143 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index 7cd8862d854ed..5a9d6caef82a0 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 137 |
10 | +SUBLEVEL = 138 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c |
15 | index ce71235c8b81f..16b744646ea94 100644 |
16 | --- a/arch/powerpc/platforms/pseries/setup.c |
17 | +++ b/arch/powerpc/platforms/pseries/setup.c |
18 | @@ -75,7 +75,7 @@ |
19 | #include "../../../../drivers/pci/pci.h" |
20 | |
21 | DEFINE_STATIC_KEY_FALSE(shared_processor); |
22 | -EXPORT_SYMBOL_GPL(shared_processor); |
23 | +EXPORT_SYMBOL(shared_processor); |
24 | |
25 | int CMO_PrPSP = -1; |
26 | int CMO_SecPSP = -1; |
27 | diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h |
28 | index 6e81788a30c12..0eaca7a130c9f 100644 |
29 | --- a/arch/x86/include/asm/proto.h |
30 | +++ b/arch/x86/include/asm/proto.h |
31 | @@ -4,6 +4,8 @@ |
32 | |
33 | #include <asm/ldt.h> |
34 | |
35 | +struct task_struct; |
36 | + |
37 | /* misc architecture specific prototypes */ |
38 | |
39 | void syscall_init(void); |
40 | diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c |
41 | index 24a6905d60ee2..642031b896f64 100644 |
42 | --- a/arch/x86/kvm/ioapic.c |
43 | +++ b/arch/x86/kvm/ioapic.c |
44 | @@ -91,7 +91,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, |
45 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) |
46 | { |
47 | ioapic->rtc_status.pending_eoi = 0; |
48 | - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); |
49 | + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1); |
50 | } |
51 | |
52 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); |
53 | diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h |
54 | index ea1a4e0297dae..283f1f489bcac 100644 |
55 | --- a/arch/x86/kvm/ioapic.h |
56 | +++ b/arch/x86/kvm/ioapic.h |
57 | @@ -43,13 +43,13 @@ struct kvm_vcpu; |
58 | |
59 | struct dest_map { |
60 | /* vcpu bitmap where IRQ has been sent */ |
61 | - DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); |
62 | + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1); |
63 | |
64 | /* |
65 | * Vector sent to a given vcpu, only valid when |
66 | * the vcpu's bit in map is set |
67 | */ |
68 | - u8 vectors[KVM_MAX_VCPU_ID]; |
69 | + u8 vectors[KVM_MAX_VCPU_ID + 1]; |
70 | }; |
71 | |
72 | |
73 | diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c |
74 | index 55c57b703ea3c..48ca9a844f06b 100644 |
75 | --- a/drivers/acpi/resource.c |
76 | +++ b/drivers/acpi/resource.c |
77 | @@ -430,13 +430,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, |
78 | } |
79 | } |
80 | |
81 | -static bool irq_is_legacy(struct acpi_resource_irq *irq) |
82 | -{ |
83 | - return irq->triggering == ACPI_EDGE_SENSITIVE && |
84 | - irq->polarity == ACPI_ACTIVE_HIGH && |
85 | - irq->shareable == ACPI_EXCLUSIVE; |
86 | -} |
87 | - |
88 | /** |
89 | * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information. |
90 | * @ares: Input ACPI resource object. |
91 | @@ -475,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, |
92 | } |
93 | acpi_dev_get_irqresource(res, irq->interrupts[index], |
94 | irq->triggering, irq->polarity, |
95 | - irq->shareable, irq_is_legacy(irq)); |
96 | + irq->shareable, true); |
97 | break; |
98 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
99 | ext_irq = &ares->data.extended_irq; |
100 | diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
101 | index b21cf764afc0d..d5425bc1ad61a 100644 |
102 | --- a/drivers/hid/wacom_wac.c |
103 | +++ b/drivers/hid/wacom_wac.c |
104 | @@ -3829,7 +3829,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, |
105 | wacom_wac->shared->touch->product == 0xF6) { |
106 | input_dev->evbit[0] |= BIT_MASK(EV_SW); |
107 | __set_bit(SW_MUTE_DEVICE, input_dev->swbit); |
108 | - wacom_wac->shared->has_mute_touch_switch = true; |
109 | + wacom_wac->has_mute_touch_switch = true; |
110 | } |
111 | /* fall through */ |
112 | |
113 | diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c |
114 | index 73d48c3b8ded3..7d2315c8cacb1 100644 |
115 | --- a/drivers/net/can/spi/hi311x.c |
116 | +++ b/drivers/net/can/spi/hi311x.c |
117 | @@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len) |
118 | return ret; |
119 | } |
120 | |
121 | -static u8 hi3110_cmd(struct spi_device *spi, u8 command) |
122 | +static int hi3110_cmd(struct spi_device *spi, u8 command) |
123 | { |
124 | struct hi3110_priv *priv = spi_get_drvdata(spi); |
125 | |
126 | diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c |
127 | index db9f15f17610b..249d2fba28c7f 100644 |
128 | --- a/drivers/net/can/usb/ems_usb.c |
129 | +++ b/drivers/net/can/usb/ems_usb.c |
130 | @@ -255,6 +255,8 @@ struct ems_usb { |
131 | unsigned int free_slots; /* remember number of available slots */ |
132 | |
133 | struct ems_cpc_msg active_params; /* active controller parameters */ |
134 | + void *rxbuf[MAX_RX_URBS]; |
135 | + dma_addr_t rxbuf_dma[MAX_RX_URBS]; |
136 | }; |
137 | |
138 | static void ems_usb_read_interrupt_callback(struct urb *urb) |
139 | @@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev) |
140 | for (i = 0; i < MAX_RX_URBS; i++) { |
141 | struct urb *urb = NULL; |
142 | u8 *buf = NULL; |
143 | + dma_addr_t buf_dma; |
144 | |
145 | /* create a URB, and a buffer for it */ |
146 | urb = usb_alloc_urb(0, GFP_KERNEL); |
147 | @@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev) |
148 | } |
149 | |
150 | buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, |
151 | - &urb->transfer_dma); |
152 | + &buf_dma); |
153 | if (!buf) { |
154 | netdev_err(netdev, "No memory left for USB buffer\n"); |
155 | usb_free_urb(urb); |
156 | @@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev) |
157 | break; |
158 | } |
159 | |
160 | + urb->transfer_dma = buf_dma; |
161 | + |
162 | usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), |
163 | buf, RX_BUFFER_SIZE, |
164 | ems_usb_read_bulk_callback, dev); |
165 | @@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev) |
166 | break; |
167 | } |
168 | |
169 | + dev->rxbuf[i] = buf; |
170 | + dev->rxbuf_dma[i] = buf_dma; |
171 | + |
172 | /* Drop reference, USB core will take care of freeing it */ |
173 | usb_free_urb(urb); |
174 | } |
175 | @@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev) |
176 | |
177 | usb_kill_anchored_urbs(&dev->rx_submitted); |
178 | |
179 | + for (i = 0; i < MAX_RX_URBS; ++i) |
180 | + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, |
181 | + dev->rxbuf[i], dev->rxbuf_dma[i]); |
182 | + |
183 | usb_kill_anchored_urbs(&dev->tx_submitted); |
184 | atomic_set(&dev->active_tx_urbs, 0); |
185 | |
186 | diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c |
187 | index b5d7ed21d7d9e..485e20e0dec2c 100644 |
188 | --- a/drivers/net/can/usb/esd_usb2.c |
189 | +++ b/drivers/net/can/usb/esd_usb2.c |
190 | @@ -195,6 +195,8 @@ struct esd_usb2 { |
191 | int net_count; |
192 | u32 version; |
193 | int rxinitdone; |
194 | + void *rxbuf[MAX_RX_URBS]; |
195 | + dma_addr_t rxbuf_dma[MAX_RX_URBS]; |
196 | }; |
197 | |
198 | struct esd_usb2_net_priv { |
199 | @@ -544,6 +546,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) |
200 | for (i = 0; i < MAX_RX_URBS; i++) { |
201 | struct urb *urb = NULL; |
202 | u8 *buf = NULL; |
203 | + dma_addr_t buf_dma; |
204 | |
205 | /* create a URB, and a buffer for it */ |
206 | urb = usb_alloc_urb(0, GFP_KERNEL); |
207 | @@ -553,7 +556,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) |
208 | } |
209 | |
210 | buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, |
211 | - &urb->transfer_dma); |
212 | + &buf_dma); |
213 | if (!buf) { |
214 | dev_warn(dev->udev->dev.parent, |
215 | "No memory left for USB buffer\n"); |
216 | @@ -561,6 +564,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) |
217 | goto freeurb; |
218 | } |
219 | |
220 | + urb->transfer_dma = buf_dma; |
221 | + |
222 | usb_fill_bulk_urb(urb, dev->udev, |
223 | usb_rcvbulkpipe(dev->udev, 1), |
224 | buf, RX_BUFFER_SIZE, |
225 | @@ -573,8 +578,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) |
226 | usb_unanchor_urb(urb); |
227 | usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, |
228 | urb->transfer_dma); |
229 | + goto freeurb; |
230 | } |
231 | |
232 | + dev->rxbuf[i] = buf; |
233 | + dev->rxbuf_dma[i] = buf_dma; |
234 | + |
235 | freeurb: |
236 | /* Drop reference, USB core will take care of freeing it */ |
237 | usb_free_urb(urb); |
238 | @@ -662,6 +671,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev) |
239 | int i, j; |
240 | |
241 | usb_kill_anchored_urbs(&dev->rx_submitted); |
242 | + |
243 | + for (i = 0; i < MAX_RX_URBS; ++i) |
244 | + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, |
245 | + dev->rxbuf[i], dev->rxbuf_dma[i]); |
246 | + |
247 | for (i = 0; i < dev->net_count; i++) { |
248 | priv = dev->nets[i]; |
249 | if (priv) { |
250 | diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c |
251 | index c5ed73d45623d..41eee6f0491c6 100644 |
252 | --- a/drivers/net/can/usb/mcba_usb.c |
253 | +++ b/drivers/net/can/usb/mcba_usb.c |
254 | @@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv) |
255 | break; |
256 | } |
257 | |
258 | + urb->transfer_dma = buf_dma; |
259 | + |
260 | usb_fill_bulk_urb(urb, priv->udev, |
261 | usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), |
262 | buf, MCBA_USB_RX_BUFF_SIZE, |
263 | diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c |
264 | index 8fa224b28218b..c43e98bb6e2d7 100644 |
265 | --- a/drivers/net/can/usb/usb_8dev.c |
266 | +++ b/drivers/net/can/usb/usb_8dev.c |
267 | @@ -137,7 +137,8 @@ struct usb_8dev_priv { |
268 | u8 *cmd_msg_buffer; |
269 | |
270 | struct mutex usb_8dev_cmd_lock; |
271 | - |
272 | + void *rxbuf[MAX_RX_URBS]; |
273 | + dma_addr_t rxbuf_dma[MAX_RX_URBS]; |
274 | }; |
275 | |
276 | /* tx frame */ |
277 | @@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) |
278 | for (i = 0; i < MAX_RX_URBS; i++) { |
279 | struct urb *urb = NULL; |
280 | u8 *buf; |
281 | + dma_addr_t buf_dma; |
282 | |
283 | /* create a URB, and a buffer for it */ |
284 | urb = usb_alloc_urb(0, GFP_KERNEL); |
285 | @@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) |
286 | } |
287 | |
288 | buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, |
289 | - &urb->transfer_dma); |
290 | + &buf_dma); |
291 | if (!buf) { |
292 | netdev_err(netdev, "No memory left for USB buffer\n"); |
293 | usb_free_urb(urb); |
294 | @@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) |
295 | break; |
296 | } |
297 | |
298 | + urb->transfer_dma = buf_dma; |
299 | + |
300 | usb_fill_bulk_urb(urb, priv->udev, |
301 | usb_rcvbulkpipe(priv->udev, |
302 | USB_8DEV_ENDP_DATA_RX), |
303 | @@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) |
304 | break; |
305 | } |
306 | |
307 | + priv->rxbuf[i] = buf; |
308 | + priv->rxbuf_dma[i] = buf_dma; |
309 | + |
310 | /* Drop reference, USB core will take care of freeing it */ |
311 | usb_free_urb(urb); |
312 | } |
313 | @@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv) |
314 | |
315 | usb_kill_anchored_urbs(&priv->rx_submitted); |
316 | |
317 | + for (i = 0; i < MAX_RX_URBS; ++i) |
318 | + usb_free_coherent(priv->udev, RX_BUFFER_SIZE, |
319 | + priv->rxbuf[i], priv->rxbuf_dma[i]); |
320 | + |
321 | usb_kill_anchored_urbs(&priv->tx_submitted); |
322 | atomic_set(&priv->active_tx_urbs, 0); |
323 | |
324 | diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c |
325 | index 70cb2d689c2cb..79bdd2a79dbd3 100644 |
326 | --- a/drivers/net/ethernet/dec/tulip/winbond-840.c |
327 | +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c |
328 | @@ -367,7 +367,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) |
329 | int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; |
330 | void __iomem *ioaddr; |
331 | |
332 | - i = pci_enable_device(pdev); |
333 | + i = pcim_enable_device(pdev); |
334 | if (i) return i; |
335 | |
336 | pci_set_master(pdev); |
337 | @@ -389,7 +389,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) |
338 | |
339 | ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); |
340 | if (!ioaddr) |
341 | - goto err_out_free_res; |
342 | + goto err_out_netdev; |
343 | |
344 | for (i = 0; i < 3; i++) |
345 | ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); |
346 | @@ -468,8 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) |
347 | |
348 | err_out_cleardev: |
349 | pci_iounmap(pdev, ioaddr); |
350 | -err_out_free_res: |
351 | - pci_release_regions(pdev); |
352 | err_out_netdev: |
353 | free_netdev (dev); |
354 | return -ENODEV; |
355 | @@ -1535,7 +1533,6 @@ static void w840_remove1(struct pci_dev *pdev) |
356 | if (dev) { |
357 | struct netdev_private *np = netdev_priv(dev); |
358 | unregister_netdev(dev); |
359 | - pci_release_regions(pdev); |
360 | pci_iounmap(pdev, np->base_addr); |
361 | free_netdev(dev); |
362 | } |
363 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
364 | index 2288a3855e522..2cc4f63b1e910 100644 |
365 | --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
366 | +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c |
367 | @@ -977,7 +977,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, |
368 | default: |
369 | /* if we got here and link is up something bad is afoot */ |
370 | netdev_info(netdev, |
371 | - "WARNING: Link is up but PHY type 0x%x is not recognized.\n", |
372 | + "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n", |
373 | hw_link_info->phy_type); |
374 | } |
375 | |
376 | @@ -5087,6 +5087,10 @@ flags_complete: |
377 | dev_warn(&pf->pdev->dev, |
378 | "Device configuration forbids SW from starting the LLDP agent.\n"); |
379 | return -EINVAL; |
380 | + case I40E_AQ_RC_EAGAIN: |
381 | + dev_warn(&pf->pdev->dev, |
382 | + "Stop FW LLDP agent command is still being processed, please try again in a second.\n"); |
383 | + return -EBUSY; |
384 | default: |
385 | dev_warn(&pf->pdev->dev, |
386 | "Starting FW LLDP agent failed: error: %s, %s\n", |
387 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
388 | index 4f4ec1f166ef4..21ab7d2caddf5 100644 |
389 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
390 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
391 | @@ -4403,11 +4403,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, |
392 | } |
393 | |
394 | /** |
395 | - * i40e_vsi_control_tx - Start or stop a VSI's rings |
396 | + * i40e_vsi_enable_tx - Start a VSI's rings |
397 | * @vsi: the VSI being configured |
398 | - * @enable: start or stop the rings |
399 | **/ |
400 | -static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) |
401 | +static int i40e_vsi_enable_tx(struct i40e_vsi *vsi) |
402 | { |
403 | struct i40e_pf *pf = vsi->back; |
404 | int i, pf_q, ret = 0; |
405 | @@ -4416,7 +4415,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) |
406 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { |
407 | ret = i40e_control_wait_tx_q(vsi->seid, pf, |
408 | pf_q, |
409 | - false /*is xdp*/, enable); |
410 | + false /*is xdp*/, true); |
411 | if (ret) |
412 | break; |
413 | |
414 | @@ -4425,7 +4424,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) |
415 | |
416 | ret = i40e_control_wait_tx_q(vsi->seid, pf, |
417 | pf_q + vsi->alloc_queue_pairs, |
418 | - true /*is xdp*/, enable); |
419 | + true /*is xdp*/, true); |
420 | if (ret) |
421 | break; |
422 | } |
423 | @@ -4523,32 +4522,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) |
424 | } |
425 | |
426 | /** |
427 | - * i40e_vsi_control_rx - Start or stop a VSI's rings |
428 | + * i40e_vsi_enable_rx - Start a VSI's rings |
429 | * @vsi: the VSI being configured |
430 | - * @enable: start or stop the rings |
431 | **/ |
432 | -static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) |
433 | +static int i40e_vsi_enable_rx(struct i40e_vsi *vsi) |
434 | { |
435 | struct i40e_pf *pf = vsi->back; |
436 | int i, pf_q, ret = 0; |
437 | |
438 | pf_q = vsi->base_queue; |
439 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { |
440 | - ret = i40e_control_wait_rx_q(pf, pf_q, enable); |
441 | + ret = i40e_control_wait_rx_q(pf, pf_q, true); |
442 | if (ret) { |
443 | dev_info(&pf->pdev->dev, |
444 | - "VSI seid %d Rx ring %d %sable timeout\n", |
445 | - vsi->seid, pf_q, (enable ? "en" : "dis")); |
446 | + "VSI seid %d Rx ring %d enable timeout\n", |
447 | + vsi->seid, pf_q); |
448 | break; |
449 | } |
450 | } |
451 | |
452 | - /* Due to HW errata, on Rx disable only, the register can indicate done |
453 | - * before it really is. Needs 50ms to be sure |
454 | - */ |
455 | - if (!enable) |
456 | - mdelay(50); |
457 | - |
458 | return ret; |
459 | } |
460 | |
461 | @@ -4561,29 +4553,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) |
462 | int ret = 0; |
463 | |
464 | /* do rx first for enable and last for disable */ |
465 | - ret = i40e_vsi_control_rx(vsi, true); |
466 | + ret = i40e_vsi_enable_rx(vsi); |
467 | if (ret) |
468 | return ret; |
469 | - ret = i40e_vsi_control_tx(vsi, true); |
470 | + ret = i40e_vsi_enable_tx(vsi); |
471 | |
472 | return ret; |
473 | } |
474 | |
475 | +#define I40E_DISABLE_TX_GAP_MSEC 50 |
476 | + |
477 | /** |
478 | * i40e_vsi_stop_rings - Stop a VSI's rings |
479 | * @vsi: the VSI being configured |
480 | **/ |
481 | void i40e_vsi_stop_rings(struct i40e_vsi *vsi) |
482 | { |
483 | + struct i40e_pf *pf = vsi->back; |
484 | + int pf_q, err, q_end; |
485 | + |
486 | /* When port TX is suspended, don't wait */ |
487 | if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) |
488 | return i40e_vsi_stop_rings_no_wait(vsi); |
489 | |
490 | - /* do rx first for enable and last for disable |
491 | - * Ignore return value, we need to shutdown whatever we can |
492 | - */ |
493 | - i40e_vsi_control_tx(vsi, false); |
494 | - i40e_vsi_control_rx(vsi, false); |
495 | + q_end = vsi->base_queue + vsi->num_queue_pairs; |
496 | + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) |
497 | + i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); |
498 | + |
499 | + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { |
500 | + err = i40e_control_wait_rx_q(pf, pf_q, false); |
501 | + if (err) |
502 | + dev_info(&pf->pdev->dev, |
503 | + "VSI seid %d Rx ring %d dissable timeout\n", |
504 | + vsi->seid, pf_q); |
505 | + } |
506 | + |
507 | + msleep(I40E_DISABLE_TX_GAP_MSEC); |
508 | + pf_q = vsi->base_queue; |
509 | + for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) |
510 | + wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); |
511 | + |
512 | + i40e_vsi_wait_queues_disabled(vsi); |
513 | } |
514 | |
515 | /** |
516 | @@ -6868,6 +6878,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, |
517 | } |
518 | if (vsi->num_queue_pairs < |
519 | (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { |
520 | + dev_err(&vsi->back->pdev->dev, |
521 | + "Failed to create traffic channel, insufficient number of queues.\n"); |
522 | return -EINVAL; |
523 | } |
524 | if (sum_max_rate > i40e_get_link_speed(vsi)) { |
525 | @@ -12883,6 +12895,7 @@ static const struct net_device_ops i40e_netdev_ops = { |
526 | .ndo_poll_controller = i40e_netpoll, |
527 | #endif |
528 | .ndo_setup_tc = __i40e_setup_tc, |
529 | + .ndo_select_queue = i40e_lan_select_queue, |
530 | .ndo_set_features = i40e_set_features, |
531 | .ndo_set_vf_mac = i40e_ndo_set_vf_mac, |
532 | .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, |
533 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
534 | index 68a2fcf4c0bf5..8e38c547b53f9 100644 |
535 | --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
536 | +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c |
537 | @@ -3521,6 +3521,56 @@ dma_error: |
538 | return -1; |
539 | } |
540 | |
541 | +static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev, |
542 | + const struct sk_buff *skb, |
543 | + u16 num_tx_queues) |
544 | +{ |
545 | + u32 jhash_initval_salt = 0xd631614b; |
546 | + u32 hash; |
547 | + |
548 | + if (skb->sk && skb->sk->sk_hash) |
549 | + hash = skb->sk->sk_hash; |
550 | + else |
551 | + hash = (__force u16)skb->protocol ^ skb->hash; |
552 | + |
553 | + hash = jhash_1word(hash, jhash_initval_salt); |
554 | + |
555 | + return (u16)(((u64)hash * num_tx_queues) >> 32); |
556 | +} |
557 | + |
558 | +u16 i40e_lan_select_queue(struct net_device *netdev, |
559 | + struct sk_buff *skb, |
560 | + struct net_device __always_unused *sb_dev) |
561 | +{ |
562 | + struct i40e_netdev_priv *np = netdev_priv(netdev); |
563 | + struct i40e_vsi *vsi = np->vsi; |
564 | + struct i40e_hw *hw; |
565 | + u16 qoffset; |
566 | + u16 qcount; |
567 | + u8 tclass; |
568 | + u16 hash; |
569 | + u8 prio; |
570 | + |
571 | + /* is DCB enabled at all? */ |
572 | + if (vsi->tc_config.numtc == 1) |
573 | + return i40e_swdcb_skb_tx_hash(netdev, skb, |
574 | + netdev->real_num_tx_queues); |
575 | + |
576 | + prio = skb->priority; |
577 | + hw = &vsi->back->hw; |
578 | + tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; |
579 | + /* sanity check */ |
580 | + if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) |
581 | + tclass = 0; |
582 | + |
583 | + /* select a queue assigned for the given TC */ |
584 | + qcount = vsi->tc_config.tc_info[tclass].qcount; |
585 | + hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount); |
586 | + |
587 | + qoffset = vsi->tc_config.tc_info[tclass].qoffset; |
588 | + return qoffset + hash; |
589 | +} |
590 | + |
591 | /** |
592 | * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring |
593 | * @xdp: data to transmit |
594 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h |
595 | index 36d37f31a287e..ba4ce80efeb06 100644 |
596 | --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h |
597 | +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h |
598 | @@ -481,6 +481,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) |
599 | |
600 | bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); |
601 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
602 | +u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb, |
603 | + struct net_device *sb_dev); |
604 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); |
605 | void i40e_clean_rx_ring(struct i40e_ring *rx_ring); |
606 | int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); |
607 | diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c |
608 | index cb4664a040ab5..35882d6ded8b3 100644 |
609 | --- a/drivers/net/ethernet/mellanox/mlx4/main.c |
610 | +++ b/drivers/net/ethernet/mellanox/mlx4/main.c |
611 | @@ -3526,6 +3526,7 @@ slave_start: |
612 | |
613 | if (!SRIOV_VALID_STATE(dev->flags)) { |
614 | mlx4_err(dev, "Invalid SRIOV state\n"); |
615 | + err = -EINVAL; |
616 | goto err_close; |
617 | } |
618 | } |
619 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
620 | index 9d26463f3fa5d..5abc15a92cfaa 100644 |
621 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
622 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c |
623 | @@ -444,12 +444,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, |
624 | static |
625 | struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) |
626 | { |
627 | + struct mlx5_core_dev *mdev; |
628 | struct net_device *netdev; |
629 | struct mlx5e_priv *priv; |
630 | |
631 | - netdev = __dev_get_by_index(net, ifindex); |
632 | + netdev = dev_get_by_index(net, ifindex); |
633 | + if (!netdev) |
634 | + return ERR_PTR(-ENODEV); |
635 | + |
636 | priv = netdev_priv(netdev); |
637 | - return priv->mdev; |
638 | + mdev = priv->mdev; |
639 | + dev_put(netdev); |
640 | + |
641 | + /* Mirred tc action holds a refcount on the ifindex net_device (see |
642 | + * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev |
643 | + * after dev_put(netdev), while we're in the context of adding a tc flow. |
644 | + * |
645 | + * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then |
646 | + * stored in a hairpin object, which exists until all flows, that refer to it, get |
647 | + * removed. |
648 | + * |
649 | + * On the other hand, after a hairpin object has been created, the peer net_device may |
650 | + * be removed/unbound while there are still some hairpin flows that are using it. This |
651 | + * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to |
652 | + * NETDEV_UNREGISTER event of the peer net_device. |
653 | + */ |
654 | + return mdev; |
655 | } |
656 | |
657 | static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) |
658 | @@ -648,6 +668,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params |
659 | |
660 | func_mdev = priv->mdev; |
661 | peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); |
662 | + if (IS_ERR(peer_mdev)) { |
663 | + err = PTR_ERR(peer_mdev); |
664 | + goto create_pair_err; |
665 | + } |
666 | |
667 | pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); |
668 | if (IS_ERR(pair)) { |
669 | @@ -786,6 +810,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, |
670 | int err; |
671 | |
672 | peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); |
673 | + if (IS_ERR(peer_mdev)) { |
674 | + NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device"); |
675 | + return PTR_ERR(peer_mdev); |
676 | + } |
677 | + |
678 | if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { |
679 | NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); |
680 | return -EOPNOTSUPP; |
681 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
682 | index 11e12761b0a6e..739bf5dc5a252 100644 |
683 | --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
684 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
685 | @@ -968,17 +968,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, |
686 | static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, |
687 | struct fs_prio *prio) |
688 | { |
689 | - struct mlx5_flow_table *next_ft; |
690 | + struct mlx5_flow_table *next_ft, *first_ft; |
691 | int err = 0; |
692 | |
693 | /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ |
694 | |
695 | - if (list_empty(&prio->node.children)) { |
696 | + first_ft = list_first_entry_or_null(&prio->node.children, |
697 | + struct mlx5_flow_table, node.list); |
698 | + if (!first_ft || first_ft->level > ft->level) { |
699 | err = connect_prev_fts(dev, ft, prio); |
700 | if (err) |
701 | return err; |
702 | |
703 | - next_ft = find_next_chained_ft(prio); |
704 | + next_ft = first_ft ? first_ft : find_next_chained_ft(prio); |
705 | err = connect_fwd_rules(dev, ft, next_ft); |
706 | if (err) |
707 | return err; |
708 | @@ -2026,7 +2028,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft) |
709 | node.list) == ft)) |
710 | return 0; |
711 | |
712 | - next_ft = find_next_chained_ft(prio); |
713 | + next_ft = find_next_ft(ft); |
714 | err = connect_fwd_rules(dev, next_ft, ft); |
715 | if (err) |
716 | return err; |
717 | diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c |
718 | index 44fe2adf0ee0c..30990d7a8e40e 100644 |
719 | --- a/drivers/net/ethernet/sis/sis900.c |
720 | +++ b/drivers/net/ethernet/sis/sis900.c |
721 | @@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev, |
722 | #endif |
723 | |
724 | /* setup various bits in PCI command register */ |
725 | - ret = pci_enable_device(pci_dev); |
726 | + ret = pcim_enable_device(pci_dev); |
727 | if(ret) return ret; |
728 | |
729 | i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
730 | @@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev, |
731 | ioaddr = pci_iomap(pci_dev, 0, 0); |
732 | if (!ioaddr) { |
733 | ret = -ENOMEM; |
734 | - goto err_out_cleardev; |
735 | + goto err_out; |
736 | } |
737 | |
738 | sis_priv = netdev_priv(net_dev); |
739 | @@ -579,8 +579,6 @@ err_unmap_tx: |
740 | sis_priv->tx_ring_dma); |
741 | err_out_unmap: |
742 | pci_iounmap(pci_dev, ioaddr); |
743 | -err_out_cleardev: |
744 | - pci_release_regions(pci_dev); |
745 | err_out: |
746 | free_netdev(net_dev); |
747 | return ret; |
748 | @@ -2489,7 +2487,6 @@ static void sis900_remove(struct pci_dev *pci_dev) |
749 | sis_priv->tx_ring_dma); |
750 | pci_iounmap(pci_dev, sis_priv->ioaddr); |
751 | free_netdev(net_dev); |
752 | - pci_release_regions(pci_dev); |
753 | } |
754 | |
755 | #ifdef CONFIG_PM |
756 | diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c |
757 | index 595f560937a71..70b9a7bfe4ec6 100644 |
758 | --- a/drivers/net/ethernet/sun/niu.c |
759 | +++ b/drivers/net/ethernet/sun/niu.c |
760 | @@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start) |
761 | err = niu_pci_vpd_scan_props(np, here, end); |
762 | if (err < 0) |
763 | return err; |
764 | + /* ret == 1 is not an error */ |
765 | if (err == 1) |
766 | - return -EINVAL; |
767 | + return 0; |
768 | } |
769 | return 0; |
770 | } |
771 | diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c |
772 | index a9864fcdfba6b..dd27c85190d34 100644 |
773 | --- a/drivers/nfc/nfcsim.c |
774 | +++ b/drivers/nfc/nfcsim.c |
775 | @@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work) |
776 | |
777 | if (!IS_ERR(skb)) |
778 | dev_kfree_skb(skb); |
779 | - |
780 | - skb = ERR_PTR(-ENODEV); |
781 | + return; |
782 | } |
783 | |
784 | dev->cb(dev->nfc_digital_dev, dev->arg, skb); |
785 | diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c |
786 | index d3a0419e42f28..5a2483e125a3f 100644 |
787 | --- a/drivers/pci/controller/pci-mvebu.c |
788 | +++ b/drivers/pci/controller/pci-mvebu.c |
789 | @@ -105,6 +105,7 @@ struct mvebu_pcie_port { |
790 | struct mvebu_pcie_window memwin; |
791 | struct mvebu_pcie_window iowin; |
792 | u32 saved_pcie_stat; |
793 | + struct resource regs; |
794 | }; |
795 | |
796 | static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) |
797 | @@ -149,7 +150,9 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) |
798 | |
799 | /* |
800 | * Setup PCIE BARs and Address Decode Wins: |
801 | - * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks |
802 | + * BAR[0] -> internal registers (needed for MSI) |
803 | + * BAR[1] -> covers all DRAM banks |
804 | + * BAR[2] -> Disabled |
805 | * WIN[0-3] -> DRAM bank[0-3] |
806 | */ |
807 | static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) |
808 | @@ -203,6 +206,12 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) |
809 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); |
810 | mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, |
811 | PCIE_BAR_CTRL_OFF(1)); |
812 | + |
813 | + /* |
814 | + * Point BAR[0] to the device's internal registers. |
815 | + */ |
816 | + mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); |
817 | + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); |
818 | } |
819 | |
820 | static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) |
821 | @@ -708,14 +717,13 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, |
822 | struct device_node *np, |
823 | struct mvebu_pcie_port *port) |
824 | { |
825 | - struct resource regs; |
826 | int ret = 0; |
827 | |
828 | - ret = of_address_to_resource(np, 0, ®s); |
829 | + ret = of_address_to_resource(np, 0, &port->regs); |
830 | if (ret) |
831 | return ERR_PTR(ret); |
832 | |
833 | - return devm_ioremap_resource(&pdev->dev, ®s); |
834 | + return devm_ioremap_resource(&pdev->dev, &port->regs); |
835 | } |
836 | |
837 | #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) |
838 | diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c |
839 | index b05b361e20623..28f78e4f2c87a 100644 |
840 | --- a/fs/btrfs/compression.c |
841 | +++ b/fs/btrfs/compression.c |
842 | @@ -273,7 +273,7 @@ static void end_compressed_bio_write(struct bio *bio) |
843 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
844 | btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0], |
845 | cb->start, cb->start + cb->len - 1, |
846 | - bio->bi_status == BLK_STS_OK); |
847 | + !cb->errors); |
848 | cb->compressed_pages[0]->mapping = NULL; |
849 | |
850 | end_compressed_writeback(inode, cb); |
851 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
852 | index db3b17bca11f5..3e3529c600cb7 100644 |
853 | --- a/fs/btrfs/volumes.c |
854 | +++ b/fs/btrfs/volumes.c |
855 | @@ -1266,6 +1266,7 @@ again: |
856 | if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { |
857 | list_del_init(&device->dev_alloc_list); |
858 | clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); |
859 | + fs_devices->rw_devices--; |
860 | } |
861 | list_del_init(&device->dev_list); |
862 | fs_devices->num_devices--; |
863 | diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c |
864 | index c2852d7cc14d4..58d4546a208e6 100644 |
865 | --- a/fs/ocfs2/file.c |
866 | +++ b/fs/ocfs2/file.c |
867 | @@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, |
868 | } |
869 | } |
870 | |
871 | +/* |
872 | + * zero out partial blocks of one cluster. |
873 | + * |
874 | + * start: file offset where zero starts, will be made upper block aligned. |
875 | + * len: it will be trimmed to the end of current cluster if "start + len" |
876 | + * is bigger than it. |
877 | + */ |
878 | +static int ocfs2_zeroout_partial_cluster(struct inode *inode, |
879 | + u64 start, u64 len) |
880 | +{ |
881 | + int ret; |
882 | + u64 start_block, end_block, nr_blocks; |
883 | + u64 p_block, offset; |
884 | + u32 cluster, p_cluster, nr_clusters; |
885 | + struct super_block *sb = inode->i_sb; |
886 | + u64 end = ocfs2_align_bytes_to_clusters(sb, start); |
887 | + |
888 | + if (start + len < end) |
889 | + end = start + len; |
890 | + |
891 | + start_block = ocfs2_blocks_for_bytes(sb, start); |
892 | + end_block = ocfs2_blocks_for_bytes(sb, end); |
893 | + nr_blocks = end_block - start_block; |
894 | + if (!nr_blocks) |
895 | + return 0; |
896 | + |
897 | + cluster = ocfs2_bytes_to_clusters(sb, start); |
898 | + ret = ocfs2_get_clusters(inode, cluster, &p_cluster, |
899 | + &nr_clusters, NULL); |
900 | + if (ret) |
901 | + return ret; |
902 | + if (!p_cluster) |
903 | + return 0; |
904 | + |
905 | + offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); |
906 | + p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; |
907 | + return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); |
908 | +} |
909 | + |
910 | static int ocfs2_zero_partial_clusters(struct inode *inode, |
911 | u64 start, u64 len) |
912 | { |
913 | @@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, |
914 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
915 | unsigned int csize = osb->s_clustersize; |
916 | handle_t *handle; |
917 | + loff_t isize = i_size_read(inode); |
918 | |
919 | /* |
920 | * The "start" and "end" values are NOT necessarily part of |
921 | @@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, |
922 | if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) |
923 | goto out; |
924 | |
925 | + /* No page cache for EOF blocks, issue zero out to disk. */ |
926 | + if (end > isize) { |
927 | + /* |
928 | + * zeroout eof blocks in last cluster starting from |
929 | + * "isize" even "start" > "isize" because it is |
930 | + * complicated to zeroout just at "start" as "start" |
931 | + * may be not aligned with block size, buffer write |
932 | + * would be required to do that, but out of eof buffer |
933 | + * write is not supported. |
934 | + */ |
935 | + ret = ocfs2_zeroout_partial_cluster(inode, isize, |
936 | + end - isize); |
937 | + if (ret) { |
938 | + mlog_errno(ret); |
939 | + goto out; |
940 | + } |
941 | + if (start >= isize) |
942 | + goto out; |
943 | + end = isize; |
944 | + } |
945 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); |
946 | if (IS_ERR(handle)) { |
947 | ret = PTR_ERR(handle); |
948 | @@ -1855,45 +1915,6 @@ out: |
949 | return ret; |
950 | } |
951 | |
952 | -/* |
953 | - * zero out partial blocks of one cluster. |
954 | - * |
955 | - * start: file offset where zero starts, will be made upper block aligned. |
956 | - * len: it will be trimmed to the end of current cluster if "start + len" |
957 | - * is bigger than it. |
958 | - */ |
959 | -static int ocfs2_zeroout_partial_cluster(struct inode *inode, |
960 | - u64 start, u64 len) |
961 | -{ |
962 | - int ret; |
963 | - u64 start_block, end_block, nr_blocks; |
964 | - u64 p_block, offset; |
965 | - u32 cluster, p_cluster, nr_clusters; |
966 | - struct super_block *sb = inode->i_sb; |
967 | - u64 end = ocfs2_align_bytes_to_clusters(sb, start); |
968 | - |
969 | - if (start + len < end) |
970 | - end = start + len; |
971 | - |
972 | - start_block = ocfs2_blocks_for_bytes(sb, start); |
973 | - end_block = ocfs2_blocks_for_bytes(sb, end); |
974 | - nr_blocks = end_block - start_block; |
975 | - if (!nr_blocks) |
976 | - return 0; |
977 | - |
978 | - cluster = ocfs2_bytes_to_clusters(sb, start); |
979 | - ret = ocfs2_get_clusters(inode, cluster, &p_cluster, |
980 | - &nr_clusters, NULL); |
981 | - if (ret) |
982 | - return ret; |
983 | - if (!p_cluster) |
984 | - return 0; |
985 | - |
986 | - offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); |
987 | - p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; |
988 | - return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); |
989 | -} |
990 | - |
991 | /* |
992 | * Parts of this function taken from xfs_change_file_space() |
993 | */ |
994 | @@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
995 | goto out_inode_unlock; |
996 | } |
997 | |
998 | - orig_isize = i_size_read(inode); |
999 | switch (sr->l_whence) { |
1000 | case 0: /*SEEK_SET*/ |
1001 | break; |
1002 | @@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
1003 | sr->l_start += f_pos; |
1004 | break; |
1005 | case 2: /*SEEK_END*/ |
1006 | - sr->l_start += orig_isize; |
1007 | + sr->l_start += i_size_read(inode); |
1008 | break; |
1009 | default: |
1010 | ret = -EINVAL; |
1011 | @@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, |
1012 | ret = -EINVAL; |
1013 | } |
1014 | |
1015 | + orig_isize = i_size_read(inode); |
1016 | /* zeroout eof blocks in the cluster. */ |
1017 | if (!ret && change_size && orig_isize < size) { |
1018 | ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, |
1019 | diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h |
1020 | index c0f0a13ed8183..49aa79c7b278a 100644 |
1021 | --- a/include/net/llc_pdu.h |
1022 | +++ b/include/net/llc_pdu.h |
1023 | @@ -15,9 +15,11 @@ |
1024 | #include <linux/if_ether.h> |
1025 | |
1026 | /* Lengths of frame formats */ |
1027 | -#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ |
1028 | -#define LLC_PDU_LEN_S 4 |
1029 | -#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ |
1030 | +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ |
1031 | +#define LLC_PDU_LEN_S 4 |
1032 | +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ |
1033 | +/* header and 1 control byte and XID info */ |
1034 | +#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info)) |
1035 | /* Known SAP addresses */ |
1036 | #define LLC_GLOBAL_SAP 0xFF |
1037 | #define LLC_NULL_SAP 0x00 /* not network-layer visible */ |
1038 | @@ -50,9 +52,10 @@ |
1039 | #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ |
1040 | #define LLC_PDU_TYPE_MASK 0x03 |
1041 | |
1042 | -#define LLC_PDU_TYPE_I 0 /* first bit */ |
1043 | -#define LLC_PDU_TYPE_S 1 /* first two bits */ |
1044 | -#define LLC_PDU_TYPE_U 3 /* first two bits */ |
1045 | +#define LLC_PDU_TYPE_I 0 /* first bit */ |
1046 | +#define LLC_PDU_TYPE_S 1 /* first two bits */ |
1047 | +#define LLC_PDU_TYPE_U 3 /* first two bits */ |
1048 | +#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */ |
1049 | |
1050 | #define LLC_PDU_TYPE_IS_I(pdu) \ |
1051 | ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0) |
1052 | @@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) |
1053 | static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, |
1054 | u8 ssap, u8 dsap, u8 cr) |
1055 | { |
1056 | - const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; |
1057 | + int hlen = 4; /* default value for I and S types */ |
1058 | struct llc_pdu_un *pdu; |
1059 | |
1060 | + switch (type) { |
1061 | + case LLC_PDU_TYPE_U: |
1062 | + hlen = 3; |
1063 | + break; |
1064 | + case LLC_PDU_TYPE_U_XID: |
1065 | + hlen = 6; |
1066 | + break; |
1067 | + } |
1068 | + |
1069 | skb_push(skb, hlen); |
1070 | skb_reset_network_header(skb); |
1071 | pdu = llc_pdu_un_hdr(skb); |
1072 | @@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, |
1073 | xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ |
1074 | xid_info->type = svcs_supported; |
1075 | xid_info->rw = rx_window << 1; /* size of receive window */ |
1076 | - skb_put(skb, sizeof(struct llc_xid_info)); |
1077 | + |
1078 | + /* no need to push/put since llc_pdu_header_init() has already |
1079 | + * pushed 3 + 3 bytes |
1080 | + */ |
1081 | } |
1082 | |
1083 | /** |
1084 | diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c |
1085 | index 5252bbd1617c6..6571895228f01 100644 |
1086 | --- a/net/can/j1939/transport.c |
1087 | +++ b/net/can/j1939/transport.c |
1088 | @@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session) |
1089 | |
1090 | static bool j1939_session_deactivate(struct j1939_session *session) |
1091 | { |
1092 | + struct j1939_priv *priv = session->priv; |
1093 | bool active; |
1094 | |
1095 | - j1939_session_list_lock(session->priv); |
1096 | + j1939_session_list_lock(priv); |
1097 | + /* This function should be called with a session ref-count of at |
1098 | + * least 2. |
1099 | + */ |
1100 | + WARN_ON_ONCE(kref_read(&session->kref) < 2); |
1101 | active = j1939_session_deactivate_locked(session); |
1102 | - j1939_session_list_unlock(session->priv); |
1103 | + j1939_session_list_unlock(priv); |
1104 | |
1105 | return active; |
1106 | } |
1107 | @@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, |
1108 | if (!session->transmission) |
1109 | j1939_tp_schedule_txtimer(session, 0); |
1110 | } else { |
1111 | - j1939_tp_set_rxtimeout(session, 250); |
1112 | + j1939_tp_set_rxtimeout(session, 750); |
1113 | } |
1114 | session->last_cmd = 0xff; |
1115 | consume_skb(se_skb); |
1116 | diff --git a/net/can/raw.c b/net/can/raw.c |
1117 | index c968034ed275b..bb837019d1724 100644 |
1118 | --- a/net/can/raw.c |
1119 | +++ b/net/can/raw.c |
1120 | @@ -548,10 +548,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, |
1121 | return -EFAULT; |
1122 | } |
1123 | |
1124 | + rtnl_lock(); |
1125 | lock_sock(sk); |
1126 | |
1127 | - if (ro->bound && ro->ifindex) |
1128 | + if (ro->bound && ro->ifindex) { |
1129 | dev = dev_get_by_index(sock_net(sk), ro->ifindex); |
1130 | + if (!dev) { |
1131 | + if (count > 1) |
1132 | + kfree(filter); |
1133 | + err = -ENODEV; |
1134 | + goto out_fil; |
1135 | + } |
1136 | + } |
1137 | |
1138 | if (ro->bound) { |
1139 | /* (try to) register the new filters */ |
1140 | @@ -590,6 +598,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, |
1141 | dev_put(dev); |
1142 | |
1143 | release_sock(sk); |
1144 | + rtnl_unlock(); |
1145 | |
1146 | break; |
1147 | |
1148 | @@ -602,10 +611,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, |
1149 | |
1150 | err_mask &= CAN_ERR_MASK; |
1151 | |
1152 | + rtnl_lock(); |
1153 | lock_sock(sk); |
1154 | |
1155 | - if (ro->bound && ro->ifindex) |
1156 | + if (ro->bound && ro->ifindex) { |
1157 | dev = dev_get_by_index(sock_net(sk), ro->ifindex); |
1158 | + if (!dev) { |
1159 | + err = -ENODEV; |
1160 | + goto out_err; |
1161 | + } |
1162 | + } |
1163 | |
1164 | /* remove current error mask */ |
1165 | if (ro->bound) { |
1166 | @@ -629,6 +644,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, |
1167 | dev_put(dev); |
1168 | |
1169 | release_sock(sk); |
1170 | + rtnl_unlock(); |
1171 | |
1172 | break; |
1173 | |
1174 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
1175 | index eb381a24a8f8d..38d3095ef9793 100644 |
1176 | --- a/net/ipv4/ip_tunnel.c |
1177 | +++ b/net/ipv4/ip_tunnel.c |
1178 | @@ -391,7 +391,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, |
1179 | tunnel->i_seqno = ntohl(tpi->seq) + 1; |
1180 | } |
1181 | |
1182 | - skb_reset_network_header(skb); |
1183 | + skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0); |
1184 | |
1185 | err = IP_ECN_decapsulate(iph, skb); |
1186 | if (unlikely(err)) { |
1187 | diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
1188 | index fa0f3c1543ba5..0b3adf7594ffe 100644 |
1189 | --- a/net/llc/af_llc.c |
1190 | +++ b/net/llc/af_llc.c |
1191 | @@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) |
1192 | { |
1193 | u8 rc = LLC_PDU_LEN_U; |
1194 | |
1195 | - if (addr->sllc_test || addr->sllc_xid) |
1196 | + if (addr->sllc_test) |
1197 | rc = LLC_PDU_LEN_U; |
1198 | + else if (addr->sllc_xid) |
1199 | + /* We need to expand header to sizeof(struct llc_xid_info) |
1200 | + * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header |
1201 | + * as XID PDU. In llc_ui_sendmsg() we reserved header size and then |
1202 | + * filled all other space with user data. If we won't reserve this |
1203 | + * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data |
1204 | + */ |
1205 | + rc = LLC_PDU_LEN_U_XID; |
1206 | else if (sk->sk_type == SOCK_STREAM) |
1207 | rc = LLC_PDU_LEN_I; |
1208 | return rc; |
1209 | diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c |
1210 | index 7ae4cc684d3ab..9fa3342c7a829 100644 |
1211 | --- a/net/llc/llc_s_ac.c |
1212 | +++ b/net/llc/llc_s_ac.c |
1213 | @@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) |
1214 | struct llc_sap_state_ev *ev = llc_sap_ev(skb); |
1215 | int rc; |
1216 | |
1217 | - llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, |
1218 | + llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, |
1219 | ev->daddr.lsap, LLC_PDU_CMD); |
1220 | llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); |
1221 | rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); |
1222 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
1223 | index 9a40312b1f161..4a988ce4264cb 100644 |
1224 | --- a/net/netfilter/nf_conntrack_core.c |
1225 | +++ b/net/netfilter/nf_conntrack_core.c |
1226 | @@ -660,8 +660,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) |
1227 | return false; |
1228 | |
1229 | tstamp = nf_conn_tstamp_find(ct); |
1230 | - if (tstamp && tstamp->stop == 0) |
1231 | + if (tstamp) { |
1232 | + s32 timeout = ct->timeout - nfct_time_stamp; |
1233 | + |
1234 | tstamp->stop = ktime_get_real_ns(); |
1235 | + if (timeout < 0) |
1236 | + tstamp->stop -= jiffies_to_nsecs(-timeout); |
1237 | + } |
1238 | |
1239 | if (nf_conntrack_event_report(IPCT_DESTROY, ct, |
1240 | portid, report) < 0) { |
1241 | diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c |
1242 | index 243e8107f456f..17c0f75dfcdb7 100644 |
1243 | --- a/net/netfilter/nft_nat.c |
1244 | +++ b/net/netfilter/nft_nat.c |
1245 | @@ -147,7 +147,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, |
1246 | alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6); |
1247 | break; |
1248 | default: |
1249 | - return -EAFNOSUPPORT; |
1250 | + if (tb[NFTA_NAT_REG_ADDR_MIN]) |
1251 | + return -EAFNOSUPPORT; |
1252 | + break; |
1253 | } |
1254 | priv->family = family; |
1255 | |
1256 | diff --git a/net/sched/act_api.c b/net/sched/act_api.c |
1257 | index 17e5cd9ebd89f..75132d0ca8870 100644 |
1258 | --- a/net/sched/act_api.c |
1259 | +++ b/net/sched/act_api.c |
1260 | @@ -231,6 +231,8 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, |
1261 | index++; |
1262 | if (index < s_i) |
1263 | continue; |
1264 | + if (IS_ERR(p)) |
1265 | + continue; |
1266 | |
1267 | if (jiffy_since && |
1268 | time_after(jiffy_since, |
1269 | diff --git a/net/sctp/input.c b/net/sctp/input.c |
1270 | index ab84ebf1af4a6..db4f917aafd90 100644 |
1271 | --- a/net/sctp/input.c |
1272 | +++ b/net/sctp/input.c |
1273 | @@ -1175,7 +1175,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( |
1274 | if (unlikely(!af)) |
1275 | return NULL; |
1276 | |
1277 | - if (af->from_addr_param(&paddr, param, peer_port, 0)) |
1278 | + if (!af->from_addr_param(&paddr, param, peer_port, 0)) |
1279 | return NULL; |
1280 | |
1281 | return __sctp_lookup_association(net, laddr, &paddr, transportp); |
1282 | diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
1283 | index cdade990fe445..a5922ce9109cf 100644 |
1284 | --- a/net/tipc/socket.c |
1285 | +++ b/net/tipc/socket.c |
1286 | @@ -2501,7 +2501,7 @@ static int tipc_listen(struct socket *sock, int len) |
1287 | static int tipc_wait_for_accept(struct socket *sock, long timeo) |
1288 | { |
1289 | struct sock *sk = sock->sk; |
1290 | - DEFINE_WAIT(wait); |
1291 | + DEFINE_WAIT_FUNC(wait, woken_wake_function); |
1292 | int err; |
1293 | |
1294 | /* True wake-one mechanism for incoming connections: only |
1295 | @@ -2510,12 +2510,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) |
1296 | * anymore, the common case will execute the loop only once. |
1297 | */ |
1298 | for (;;) { |
1299 | - prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
1300 | - TASK_INTERRUPTIBLE); |
1301 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
1302 | + add_wait_queue(sk_sleep(sk), &wait); |
1303 | release_sock(sk); |
1304 | - timeo = schedule_timeout(timeo); |
1305 | + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); |
1306 | lock_sock(sk); |
1307 | + remove_wait_queue(sk_sleep(sk), &wait); |
1308 | } |
1309 | err = 0; |
1310 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1311 | @@ -2527,7 +2527,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) |
1312 | if (signal_pending(current)) |
1313 | break; |
1314 | } |
1315 | - finish_wait(sk_sleep(sk), &wait); |
1316 | return err; |
1317 | } |
1318 | |
1319 | diff --git a/net/wireless/scan.c b/net/wireless/scan.c |
1320 | index 83297832744ac..1580535d53f86 100644 |
1321 | --- a/net/wireless/scan.c |
1322 | +++ b/net/wireless/scan.c |
1323 | @@ -1250,16 +1250,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, |
1324 | * be grouped with this beacon for updates ... |
1325 | */ |
1326 | if (!cfg80211_combine_bsses(rdev, new)) { |
1327 | - kfree(new); |
1328 | + bss_ref_put(rdev, new); |
1329 | goto drop; |
1330 | } |
1331 | } |
1332 | |
1333 | if (rdev->bss_entries >= bss_entries_limit && |
1334 | !cfg80211_bss_expire_oldest(rdev)) { |
1335 | - if (!list_empty(&new->hidden_list)) |
1336 | - list_del(&new->hidden_list); |
1337 | - kfree(new); |
1338 | + bss_ref_put(rdev, new); |
1339 | goto drop; |
1340 | } |
1341 | |
1342 | diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c |
1343 | index 1ae5c51a70359..571e99c908a0e 100644 |
1344 | --- a/tools/perf/util/map.c |
1345 | +++ b/tools/perf/util/map.c |
1346 | @@ -214,8 +214,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, |
1347 | if (!(prot & PROT_EXEC)) |
1348 | dso__set_loaded(dso); |
1349 | } |
1350 | - |
1351 | - nsinfo__put(dso->nsinfo); |
1352 | dso->nsinfo = nsi; |
1353 | dso__put(dso); |
1354 | } |
1355 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
1356 | index b2287e7d3ba4a..862c0708f58df 100644 |
1357 | --- a/virt/kvm/kvm_main.c |
1358 | +++ b/virt/kvm/kvm_main.c |
1359 | @@ -3570,6 +3570,16 @@ struct compat_kvm_dirty_log { |
1360 | }; |
1361 | }; |
1362 | |
1363 | +struct compat_kvm_clear_dirty_log { |
1364 | + __u32 slot; |
1365 | + __u32 num_pages; |
1366 | + __u64 first_page; |
1367 | + union { |
1368 | + compat_uptr_t dirty_bitmap; /* one bit per page */ |
1369 | + __u64 padding2; |
1370 | + }; |
1371 | +}; |
1372 | + |
1373 | static long kvm_vm_compat_ioctl(struct file *filp, |
1374 | unsigned int ioctl, unsigned long arg) |
1375 | { |
1376 | @@ -3579,6 +3589,24 @@ static long kvm_vm_compat_ioctl(struct file *filp, |
1377 | if (kvm->mm != current->mm) |
1378 | return -EIO; |
1379 | switch (ioctl) { |
1380 | +#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT |
1381 | + case KVM_CLEAR_DIRTY_LOG: { |
1382 | + struct compat_kvm_clear_dirty_log compat_log; |
1383 | + struct kvm_clear_dirty_log log; |
1384 | + |
1385 | + if (copy_from_user(&compat_log, (void __user *)arg, |
1386 | + sizeof(compat_log))) |
1387 | + return -EFAULT; |
1388 | + log.slot = compat_log.slot; |
1389 | + log.num_pages = compat_log.num_pages; |
1390 | + log.first_page = compat_log.first_page; |
1391 | + log.padding2 = compat_log.padding2; |
1392 | + log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); |
1393 | + |
1394 | + r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); |
1395 | + break; |
1396 | + } |
1397 | +#endif |
1398 | case KVM_GET_DIRTY_LOG: { |
1399 | struct compat_kvm_dirty_log compat_log; |
1400 | struct kvm_dirty_log log; |