Contents of /trunk/kernel-alx/patches-5.4/0264-5.4.165-all-fixes.patch
Parent Directory | Revision Log
Revision 3635 -
(show annotations)
(download)
Mon Oct 24 12:34:12 2022 UTC (23 months ago) by niro
File size: 160879 byte(s)
Mon Oct 24 12:34:12 2022 UTC (23 months ago) by niro
File size: 160879 byte(s)
-sync kernel patches
1 | diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml |
2 | index f70f18ff821f5..8f3e9c774b74a 100644 |
3 | --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml |
4 | +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml |
5 | @@ -87,6 +87,14 @@ properties: |
6 | compensate for the board being designed with the lanes |
7 | swapped. |
8 | |
9 | + enet-phy-lane-no-swap: |
10 | + $ref: /schemas/types.yaml#/definitions/flag |
11 | + description: |
12 | + If set, indicates that PHY will disable swap of the |
13 | + TX/RX lanes. This property allows the PHY to work correcly after |
14 | + e.g. wrong bootstrap configuration caused by issues in PCB |
15 | + layout design. |
16 | + |
17 | eee-broken-100tx: |
18 | $ref: /schemas/types.yaml#definitions/flag |
19 | description: |
20 | diff --git a/Makefile b/Makefile |
21 | index a87162756d61d..4a7a89de832b5 100644 |
22 | --- a/Makefile |
23 | +++ b/Makefile |
24 | @@ -1,7 +1,7 @@ |
25 | # SPDX-License-Identifier: GPL-2.0 |
26 | VERSION = 5 |
27 | PATCHLEVEL = 4 |
28 | -SUBLEVEL = 164 |
29 | +SUBLEVEL = 165 |
30 | EXTRAVERSION = |
31 | NAME = Kleptomaniac Octopus |
32 | |
33 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
34 | index c2a3ec3dd8506..c6c71592f6e46 100644 |
35 | --- a/arch/x86/Kconfig |
36 | +++ b/arch/x86/Kconfig |
37 | @@ -1990,6 +1990,7 @@ config EFI |
38 | depends on ACPI |
39 | select UCS2_STRING |
40 | select EFI_RUNTIME_WRAPPERS |
41 | + select ARCH_USE_MEMREMAP_PROT |
42 | ---help--- |
43 | This enables the kernel to use EFI runtime services that are |
44 | available (such as the EFI variable services). |
45 | diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c |
46 | index aefe845dff596..6ca88fbc009cd 100644 |
47 | --- a/arch/x86/platform/efi/quirks.c |
48 | +++ b/arch/x86/platform/efi/quirks.c |
49 | @@ -279,7 +279,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) |
50 | return; |
51 | } |
52 | |
53 | - new = early_memremap(new_phys, new_size); |
54 | + new = early_memremap_prot(new_phys, new_size, |
55 | + pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); |
56 | if (!new) { |
57 | pr_err("Failed to map new boot services memmap\n"); |
58 | return; |
59 | diff --git a/block/ioprio.c b/block/ioprio.c |
60 | index 77bcab11dce57..d70980d85c551 100644 |
61 | --- a/block/ioprio.c |
62 | +++ b/block/ioprio.c |
63 | @@ -207,6 +207,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) |
64 | pgrp = task_pgrp(current); |
65 | else |
66 | pgrp = find_vpid(who); |
67 | + read_lock(&tasklist_lock); |
68 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
69 | tmpio = get_task_ioprio(p); |
70 | if (tmpio < 0) |
71 | @@ -216,6 +217,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) |
72 | else |
73 | ret = ioprio_best(ret, tmpio); |
74 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
75 | + read_unlock(&tasklist_lock); |
76 | + |
77 | break; |
78 | case IOPRIO_WHO_USER: |
79 | uid = make_kuid(current_user_ns(), who); |
80 | diff --git a/drivers/android/binder.c b/drivers/android/binder.c |
81 | index 1cdc7426bd033..0512af0f04646 100644 |
82 | --- a/drivers/android/binder.c |
83 | +++ b/drivers/android/binder.c |
84 | @@ -4788,23 +4788,20 @@ static int binder_thread_release(struct binder_proc *proc, |
85 | __release(&t->lock); |
86 | |
87 | /* |
88 | - * If this thread used poll, make sure we remove the waitqueue |
89 | - * from any epoll data structures holding it with POLLFREE. |
90 | - * waitqueue_active() is safe to use here because we're holding |
91 | - * the inner lock. |
92 | + * If this thread used poll, make sure we remove the waitqueue from any |
93 | + * poll data structures holding it. |
94 | */ |
95 | - if ((thread->looper & BINDER_LOOPER_STATE_POLL) && |
96 | - waitqueue_active(&thread->wait)) { |
97 | - wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); |
98 | - } |
99 | + if (thread->looper & BINDER_LOOPER_STATE_POLL) |
100 | + wake_up_pollfree(&thread->wait); |
101 | |
102 | binder_inner_proc_unlock(thread->proc); |
103 | |
104 | /* |
105 | - * This is needed to avoid races between wake_up_poll() above and |
106 | - * and ep_remove_waitqueue() called for other reasons (eg the epoll file |
107 | - * descriptor being closed); ep_remove_waitqueue() holds an RCU read |
108 | - * lock, so we can be sure it's done after calling synchronize_rcu(). |
109 | + * This is needed to avoid races between wake_up_pollfree() above and |
110 | + * someone else removing the last entry from the queue for other reasons |
111 | + * (e.g. ep_remove_wait_queue() being called due to an epoll file |
112 | + * descriptor being closed). Such other users hold an RCU read lock, so |
113 | + * we can be sure they're done after we call synchronize_rcu(). |
114 | */ |
115 | if (thread->looper & BINDER_LOOPER_STATE_POLL) |
116 | synchronize_rcu(); |
117 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
118 | index bed433fd9c700..92fb1f5b240e8 100644 |
119 | --- a/drivers/ata/libata-core.c |
120 | +++ b/drivers/ata/libata-core.c |
121 | @@ -4437,6 +4437,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
122 | { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, |
123 | /* Odd clown on sil3726/4726 PMPs */ |
124 | { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, |
125 | + /* Similar story with ASMedia 1092 */ |
126 | + { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE }, |
127 | |
128 | /* Weird ATAPI devices */ |
129 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
130 | diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c |
131 | index b2d00b4519634..45d9cca28064f 100644 |
132 | --- a/drivers/clk/qcom/clk-regmap-mux.c |
133 | +++ b/drivers/clk/qcom/clk-regmap-mux.c |
134 | @@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw) |
135 | val &= mask; |
136 | |
137 | if (mux->parent_map) |
138 | - return qcom_find_src_index(hw, mux->parent_map, val); |
139 | + return qcom_find_cfg_index(hw, mux->parent_map, val); |
140 | |
141 | return val; |
142 | } |
143 | diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c |
144 | index bdeacebbf0e47..f52ccb62c00b7 100644 |
145 | --- a/drivers/clk/qcom/common.c |
146 | +++ b/drivers/clk/qcom/common.c |
147 | @@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) |
148 | } |
149 | EXPORT_SYMBOL_GPL(qcom_find_src_index); |
150 | |
151 | +int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg) |
152 | +{ |
153 | + int i, num_parents = clk_hw_get_num_parents(hw); |
154 | + |
155 | + for (i = 0; i < num_parents; i++) |
156 | + if (cfg == map[i].cfg) |
157 | + return i; |
158 | + |
159 | + return -ENOENT; |
160 | +} |
161 | +EXPORT_SYMBOL_GPL(qcom_find_cfg_index); |
162 | + |
163 | struct regmap * |
164 | qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) |
165 | { |
166 | diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h |
167 | index bb39a7e106d8a..9c8f7b798d9fc 100644 |
168 | --- a/drivers/clk/qcom/common.h |
169 | +++ b/drivers/clk/qcom/common.h |
170 | @@ -49,6 +49,8 @@ extern void |
171 | qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count); |
172 | extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, |
173 | u8 src); |
174 | +extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, |
175 | + u8 cfg); |
176 | |
177 | extern int qcom_cc_register_board_clk(struct device *dev, const char *path, |
178 | const char *name, unsigned long rate); |
179 | diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c |
180 | index 8fdb271354061..e558381c4c96e 100644 |
181 | --- a/drivers/gpu/drm/drm_syncobj.c |
182 | +++ b/drivers/gpu/drm/drm_syncobj.c |
183 | @@ -329,8 +329,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private, |
184 | |
185 | if (*fence) { |
186 | ret = dma_fence_chain_find_seqno(fence, point); |
187 | - if (!ret) |
188 | + if (!ret) { |
189 | + /* If the requested seqno is already signaled |
190 | + * drm_syncobj_find_fence may return a NULL |
191 | + * fence. To make sure the recipient gets |
192 | + * signalled, use a new fence instead. |
193 | + */ |
194 | + if (!*fence) |
195 | + *fence = dma_fence_get_stub(); |
196 | + |
197 | goto out; |
198 | + } |
199 | dma_fence_put(*fence); |
200 | } else { |
201 | ret = -EINVAL; |
202 | diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig |
203 | index 1ecb5124421c0..0cfcfb116a03d 100644 |
204 | --- a/drivers/hid/Kconfig |
205 | +++ b/drivers/hid/Kconfig |
206 | @@ -206,14 +206,14 @@ config HID_CHERRY |
207 | |
208 | config HID_CHICONY |
209 | tristate "Chicony devices" |
210 | - depends on HID |
211 | + depends on USB_HID |
212 | default !EXPERT |
213 | ---help--- |
214 | Support for Chicony Tactical pad and special keys on Chicony keyboards. |
215 | |
216 | config HID_CORSAIR |
217 | tristate "Corsair devices" |
218 | - depends on HID && USB && LEDS_CLASS |
219 | + depends on USB_HID && LEDS_CLASS |
220 | ---help--- |
221 | Support for Corsair devices that are not fully compliant with the |
222 | HID standard. |
223 | @@ -244,7 +244,7 @@ config HID_MACALLY |
224 | |
225 | config HID_PRODIKEYS |
226 | tristate "Prodikeys PC-MIDI Keyboard support" |
227 | - depends on HID && SND |
228 | + depends on USB_HID && SND |
229 | select SND_RAWMIDI |
230 | ---help--- |
231 | Support for Prodikeys PC-MIDI Keyboard device support. |
232 | @@ -524,7 +524,7 @@ config HID_LENOVO |
233 | |
234 | config HID_LOGITECH |
235 | tristate "Logitech devices" |
236 | - depends on HID |
237 | + depends on USB_HID |
238 | default !EXPERT |
239 | ---help--- |
240 | Support for Logitech devices that are not fully compliant with HID standard. |
241 | @@ -871,7 +871,7 @@ config HID_SAITEK |
242 | |
243 | config HID_SAMSUNG |
244 | tristate "Samsung InfraRed remote control or keyboards" |
245 | - depends on HID |
246 | + depends on USB_HID |
247 | ---help--- |
248 | Support for Samsung InfraRed remote control or keyboards. |
249 | |
250 | diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c |
251 | index e6e4c841fb06f..7f84ed0afdfef 100644 |
252 | --- a/drivers/hid/hid-asus.c |
253 | +++ b/drivers/hid/hid-asus.c |
254 | @@ -849,7 +849,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) |
255 | if (drvdata->quirks & QUIRK_IS_MULTITOUCH) |
256 | drvdata->tp = &asus_i2c_tp; |
257 | |
258 | - if (drvdata->quirks & QUIRK_T100_KEYBOARD) { |
259 | + if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) { |
260 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
261 | |
262 | if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) { |
263 | diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c |
264 | index db6da21ade063..74ad8bf98bfd5 100644 |
265 | --- a/drivers/hid/hid-bigbenff.c |
266 | +++ b/drivers/hid/hid-bigbenff.c |
267 | @@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work) |
268 | struct bigben_device, worker); |
269 | struct hid_field *report_field = bigben->report->field[0]; |
270 | |
271 | - if (bigben->removed) |
272 | + if (bigben->removed || !report_field) |
273 | return; |
274 | |
275 | if (bigben->work_led) { |
276 | diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c |
277 | index 3f0ed6a952234..e19e2b5973396 100644 |
278 | --- a/drivers/hid/hid-chicony.c |
279 | +++ b/drivers/hid/hid-chicony.c |
280 | @@ -58,8 +58,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, |
281 | static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
282 | unsigned int *rsize) |
283 | { |
284 | - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
285 | - |
286 | + struct usb_interface *intf; |
287 | + |
288 | + if (!hid_is_usb(hdev)) |
289 | + return rdesc; |
290 | + |
291 | + intf = to_usb_interface(hdev->dev.parent); |
292 | if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { |
293 | /* Change usage maximum and logical maximum from 0x7fff to |
294 | * 0x2fff, so they don't exceed HID_MAX_USAGES */ |
295 | diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c |
296 | index 902a60e249ed2..8c895c820b672 100644 |
297 | --- a/drivers/hid/hid-corsair.c |
298 | +++ b/drivers/hid/hid-corsair.c |
299 | @@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) |
300 | int ret; |
301 | unsigned long quirks = id->driver_data; |
302 | struct corsair_drvdata *drvdata; |
303 | - struct usb_interface *usbif = to_usb_interface(dev->dev.parent); |
304 | + struct usb_interface *usbif; |
305 | + |
306 | + if (!hid_is_usb(dev)) |
307 | + return -EINVAL; |
308 | + |
309 | + usbif = to_usb_interface(dev->dev.parent); |
310 | |
311 | drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), |
312 | GFP_KERNEL); |
313 | diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c |
314 | index dae193749d443..0e8f424025fea 100644 |
315 | --- a/drivers/hid/hid-elan.c |
316 | +++ b/drivers/hid/hid-elan.c |
317 | @@ -50,7 +50,7 @@ struct elan_drvdata { |
318 | |
319 | static int is_not_elan_touchpad(struct hid_device *hdev) |
320 | { |
321 | - if (hdev->bus == BUS_USB) { |
322 | + if (hid_is_usb(hdev)) { |
323 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
324 | |
325 | return (intf->altsetting->desc.bInterfaceNumber != |
326 | diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c |
327 | index 0d22713a38742..2876cb6a7dcab 100644 |
328 | --- a/drivers/hid/hid-elo.c |
329 | +++ b/drivers/hid/hid-elo.c |
330 | @@ -229,6 +229,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) |
331 | struct elo_priv *priv; |
332 | int ret; |
333 | |
334 | + if (!hid_is_usb(hdev)) |
335 | + return -EINVAL; |
336 | + |
337 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
338 | if (!priv) |
339 | return -ENOMEM; |
340 | diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c |
341 | index 505ed76a830e8..b6947d7573473 100644 |
342 | --- a/drivers/hid/hid-google-hammer.c |
343 | +++ b/drivers/hid/hid-google-hammer.c |
344 | @@ -469,6 +469,8 @@ static int hammer_probe(struct hid_device *hdev, |
345 | static const struct hid_device_id hammer_devices[] = { |
346 | { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
347 | USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, |
348 | + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
349 | + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) }, |
350 | { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
351 | USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, |
352 | { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, |
353 | diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c |
354 | index 0a38e8e9bc783..403506b9697e7 100644 |
355 | --- a/drivers/hid/hid-holtek-kbd.c |
356 | +++ b/drivers/hid/hid-holtek-kbd.c |
357 | @@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, |
358 | static int holtek_kbd_probe(struct hid_device *hdev, |
359 | const struct hid_device_id *id) |
360 | { |
361 | - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
362 | - int ret = hid_parse(hdev); |
363 | + struct usb_interface *intf; |
364 | + int ret; |
365 | + |
366 | + if (!hid_is_usb(hdev)) |
367 | + return -EINVAL; |
368 | |
369 | + ret = hid_parse(hdev); |
370 | if (!ret) |
371 | ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); |
372 | |
373 | + intf = to_usb_interface(hdev->dev.parent); |
374 | if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { |
375 | struct hid_input *hidinput; |
376 | list_for_each_entry(hidinput, &hdev->inputs, list) { |
377 | diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c |
378 | index 195b735b001d0..b7172c48ef9f0 100644 |
379 | --- a/drivers/hid/hid-holtek-mouse.c |
380 | +++ b/drivers/hid/hid-holtek-mouse.c |
381 | @@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
382 | return rdesc; |
383 | } |
384 | |
385 | +static int holtek_mouse_probe(struct hid_device *hdev, |
386 | + const struct hid_device_id *id) |
387 | +{ |
388 | + if (!hid_is_usb(hdev)) |
389 | + return -EINVAL; |
390 | + return 0; |
391 | +} |
392 | + |
393 | static const struct hid_device_id holtek_mouse_devices[] = { |
394 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, |
395 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, |
396 | @@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = { |
397 | .name = "holtek_mouse", |
398 | .id_table = holtek_mouse_devices, |
399 | .report_fixup = holtek_mouse_report_fixup, |
400 | + .probe = holtek_mouse_probe, |
401 | }; |
402 | |
403 | module_hid_driver(holtek_mouse_driver); |
404 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
405 | index 6ed6158d4f732..26758ea844c92 100644 |
406 | --- a/drivers/hid/hid-ids.h |
407 | +++ b/drivers/hid/hid-ids.h |
408 | @@ -489,6 +489,7 @@ |
409 | #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d |
410 | #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 |
411 | #define USB_DEVICE_ID_GOOGLE_DON 0x5050 |
412 | +#define USB_DEVICE_ID_GOOGLE_EEL 0x5057 |
413 | |
414 | #define USB_VENDOR_ID_GOTOP 0x08f2 |
415 | #define USB_DEVICE_ID_SUPER_Q2 0x007f |
416 | @@ -858,6 +859,7 @@ |
417 | #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 |
418 | #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 |
419 | #define USB_DEVICE_ID_MS_POWER_COVER 0x07da |
420 | +#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de |
421 | #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd |
422 | #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb |
423 | #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 |
424 | diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c |
425 | index 0dc7cdfc56f77..2c7e7c089bf99 100644 |
426 | --- a/drivers/hid/hid-lg.c |
427 | +++ b/drivers/hid/hid-lg.c |
428 | @@ -769,12 +769,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, |
429 | |
430 | static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) |
431 | { |
432 | - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); |
433 | - __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; |
434 | + struct usb_interface *iface; |
435 | + __u8 iface_num; |
436 | unsigned int connect_mask = HID_CONNECT_DEFAULT; |
437 | struct lg_drv_data *drv_data; |
438 | int ret; |
439 | |
440 | + if (!hid_is_usb(hdev)) |
441 | + return -EINVAL; |
442 | + |
443 | + iface = to_usb_interface(hdev->dev.parent); |
444 | + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; |
445 | + |
446 | /* G29 only work with the 1st interface */ |
447 | if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && |
448 | (iface_num != 0)) { |
449 | diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c |
450 | index b499ac37dc7b0..4267e2f2e70f4 100644 |
451 | --- a/drivers/hid/hid-logitech-dj.c |
452 | +++ b/drivers/hid/hid-logitech-dj.c |
453 | @@ -1686,7 +1686,7 @@ static int logi_dj_probe(struct hid_device *hdev, |
454 | case recvr_type_27mhz: no_dj_interfaces = 2; break; |
455 | case recvr_type_bluetooth: no_dj_interfaces = 2; break; |
456 | } |
457 | - if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) { |
458 | + if (hid_is_usb(hdev)) { |
459 | intf = to_usb_interface(hdev->dev.parent); |
460 | if (intf && intf->altsetting->desc.bInterfaceNumber >= |
461 | no_dj_interfaces) { |
462 | diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c |
463 | index 2666af02d5c1a..e4e9471d0f1e9 100644 |
464 | --- a/drivers/hid/hid-prodikeys.c |
465 | +++ b/drivers/hid/hid-prodikeys.c |
466 | @@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, |
467 | static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) |
468 | { |
469 | int ret; |
470 | - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
471 | - unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; |
472 | + struct usb_interface *intf; |
473 | + unsigned short ifnum; |
474 | unsigned long quirks = id->driver_data; |
475 | struct pk_device *pk; |
476 | struct pcmidi_snd *pm = NULL; |
477 | |
478 | + if (!hid_is_usb(hdev)) |
479 | + return -EINVAL; |
480 | + |
481 | + intf = to_usb_interface(hdev->dev.parent); |
482 | + ifnum = intf->cur_altsetting->desc.bInterfaceNumber; |
483 | + |
484 | pk = kzalloc(sizeof(*pk), GFP_KERNEL); |
485 | if (pk == NULL) { |
486 | hid_err(hdev, "can't alloc descriptor\n"); |
487 | diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c |
488 | index efc9d0d281709..8a0132c54c312 100644 |
489 | --- a/drivers/hid/hid-quirks.c |
490 | +++ b/drivers/hid/hid-quirks.c |
491 | @@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = { |
492 | { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, |
493 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, |
494 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, |
495 | + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS }, |
496 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, |
497 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, |
498 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, |
499 | diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c |
500 | index ffcd444ae2ba6..4b18e1a4fc7ac 100644 |
501 | --- a/drivers/hid/hid-roccat-arvo.c |
502 | +++ b/drivers/hid/hid-roccat-arvo.c |
503 | @@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev, |
504 | { |
505 | int retval; |
506 | |
507 | + if (!hid_is_usb(hdev)) |
508 | + return -EINVAL; |
509 | + |
510 | retval = hid_parse(hdev); |
511 | if (retval) { |
512 | hid_err(hdev, "parse failed\n"); |
513 | diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c |
514 | index ce5f22519956a..e95d59cd8d075 100644 |
515 | --- a/drivers/hid/hid-roccat-isku.c |
516 | +++ b/drivers/hid/hid-roccat-isku.c |
517 | @@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev, |
518 | { |
519 | int retval; |
520 | |
521 | + if (!hid_is_usb(hdev)) |
522 | + return -EINVAL; |
523 | + |
524 | retval = hid_parse(hdev); |
525 | if (retval) { |
526 | hid_err(hdev, "parse failed\n"); |
527 | diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c |
528 | index 509b9bb1362cb..6cf59b5e5484b 100644 |
529 | --- a/drivers/hid/hid-roccat-kone.c |
530 | +++ b/drivers/hid/hid-roccat-kone.c |
531 | @@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) |
532 | { |
533 | int retval; |
534 | |
535 | + if (!hid_is_usb(hdev)) |
536 | + return -EINVAL; |
537 | + |
538 | retval = hid_parse(hdev); |
539 | if (retval) { |
540 | hid_err(hdev, "parse failed\n"); |
541 | diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c |
542 | index 0316edf8c5bb4..1896c69ea512f 100644 |
543 | --- a/drivers/hid/hid-roccat-koneplus.c |
544 | +++ b/drivers/hid/hid-roccat-koneplus.c |
545 | @@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev, |
546 | { |
547 | int retval; |
548 | |
549 | + if (!hid_is_usb(hdev)) |
550 | + return -EINVAL; |
551 | + |
552 | retval = hid_parse(hdev); |
553 | if (retval) { |
554 | hid_err(hdev, "parse failed\n"); |
555 | diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c |
556 | index 5248b3c7cf785..cf8eeb33a1257 100644 |
557 | --- a/drivers/hid/hid-roccat-konepure.c |
558 | +++ b/drivers/hid/hid-roccat-konepure.c |
559 | @@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev, |
560 | { |
561 | int retval; |
562 | |
563 | + if (!hid_is_usb(hdev)) |
564 | + return -EINVAL; |
565 | + |
566 | retval = hid_parse(hdev); |
567 | if (retval) { |
568 | hid_err(hdev, "parse failed\n"); |
569 | diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c |
570 | index 9600128815705..6fb9b9563769d 100644 |
571 | --- a/drivers/hid/hid-roccat-kovaplus.c |
572 | +++ b/drivers/hid/hid-roccat-kovaplus.c |
573 | @@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev, |
574 | { |
575 | int retval; |
576 | |
577 | + if (!hid_is_usb(hdev)) |
578 | + return -EINVAL; |
579 | + |
580 | retval = hid_parse(hdev); |
581 | if (retval) { |
582 | hid_err(hdev, "parse failed\n"); |
583 | diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c |
584 | index 4a88a76d5c622..d5ddf0d68346b 100644 |
585 | --- a/drivers/hid/hid-roccat-lua.c |
586 | +++ b/drivers/hid/hid-roccat-lua.c |
587 | @@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev, |
588 | { |
589 | int retval; |
590 | |
591 | + if (!hid_is_usb(hdev)) |
592 | + return -EINVAL; |
593 | + |
594 | retval = hid_parse(hdev); |
595 | if (retval) { |
596 | hid_err(hdev, "parse failed\n"); |
597 | diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c |
598 | index 989927defe8db..4fcc8e7d276f2 100644 |
599 | --- a/drivers/hid/hid-roccat-pyra.c |
600 | +++ b/drivers/hid/hid-roccat-pyra.c |
601 | @@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) |
602 | { |
603 | int retval; |
604 | |
605 | + if (!hid_is_usb(hdev)) |
606 | + return -EINVAL; |
607 | + |
608 | retval = hid_parse(hdev); |
609 | if (retval) { |
610 | hid_err(hdev, "parse failed\n"); |
611 | diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c |
612 | index 3956a6c9c5217..5bf1971a2b14d 100644 |
613 | --- a/drivers/hid/hid-roccat-ryos.c |
614 | +++ b/drivers/hid/hid-roccat-ryos.c |
615 | @@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev, |
616 | { |
617 | int retval; |
618 | |
619 | + if (!hid_is_usb(hdev)) |
620 | + return -EINVAL; |
621 | + |
622 | retval = hid_parse(hdev); |
623 | if (retval) { |
624 | hid_err(hdev, "parse failed\n"); |
625 | diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c |
626 | index 818701f7a0281..a784bb4ee6512 100644 |
627 | --- a/drivers/hid/hid-roccat-savu.c |
628 | +++ b/drivers/hid/hid-roccat-savu.c |
629 | @@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev, |
630 | { |
631 | int retval; |
632 | |
633 | + if (!hid_is_usb(hdev)) |
634 | + return -EINVAL; |
635 | + |
636 | retval = hid_parse(hdev); |
637 | if (retval) { |
638 | hid_err(hdev, "parse failed\n"); |
639 | diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c |
640 | index 2e1c31156eca0..cf5992e970940 100644 |
641 | --- a/drivers/hid/hid-samsung.c |
642 | +++ b/drivers/hid/hid-samsung.c |
643 | @@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev, |
644 | int ret; |
645 | unsigned int cmask = HID_CONNECT_DEFAULT; |
646 | |
647 | + if (!hid_is_usb(hdev)) |
648 | + return -EINVAL; |
649 | + |
650 | ret = hid_parse(hdev); |
651 | if (ret) { |
652 | hid_err(hdev, "parse failed\n"); |
653 | diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c |
654 | index 67ae2b18e33ac..ac3fd870673d2 100644 |
655 | --- a/drivers/hid/hid-u2fzero.c |
656 | +++ b/drivers/hid/hid-u2fzero.c |
657 | @@ -290,7 +290,7 @@ static int u2fzero_probe(struct hid_device *hdev, |
658 | unsigned int minor; |
659 | int ret; |
660 | |
661 | - if (!hid_is_using_ll_driver(hdev, &usb_hid_driver)) |
662 | + if (!hid_is_usb(hdev)) |
663 | return -EINVAL; |
664 | |
665 | dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); |
666 | diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c |
667 | index 8e9c9e646cb7d..4edb241957040 100644 |
668 | --- a/drivers/hid/hid-uclogic-core.c |
669 | +++ b/drivers/hid/hid-uclogic-core.c |
670 | @@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev, |
671 | struct uclogic_drvdata *drvdata = NULL; |
672 | bool params_initialized = false; |
673 | |
674 | + if (!hid_is_usb(hdev)) |
675 | + return -EINVAL; |
676 | + |
677 | /* |
678 | * libinput requires the pad interface to be on a different node |
679 | * than the pen, so use QUIRK_MULTI_INPUT for all tablets. |
680 | diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c |
681 | index e80c812f44a77..ed4ede52b017f 100644 |
682 | --- a/drivers/hid/hid-uclogic-params.c |
683 | +++ b/drivers/hid/hid-uclogic-params.c |
684 | @@ -841,8 +841,7 @@ int uclogic_params_init(struct uclogic_params *params, |
685 | struct uclogic_params p = {0, }; |
686 | |
687 | /* Check arguments */ |
688 | - if (params == NULL || hdev == NULL || |
689 | - !hid_is_using_ll_driver(hdev, &usb_hid_driver)) { |
690 | + if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) { |
691 | rc = -EINVAL; |
692 | goto cleanup; |
693 | } |
694 | diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
695 | index 73dafa60080f1..329bb1a46f90e 100644 |
696 | --- a/drivers/hid/wacom_sys.c |
697 | +++ b/drivers/hid/wacom_sys.c |
698 | @@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, |
699 | * Skip the query for this type and modify defaults based on |
700 | * interface number. |
701 | */ |
702 | - if (features->type == WIRELESS) { |
703 | + if (features->type == WIRELESS && intf) { |
704 | if (intf->cur_altsetting->desc.bInterfaceNumber == 0) |
705 | features->device_type = WACOM_DEVICETYPE_WL_MONITOR; |
706 | else |
707 | @@ -2217,7 +2217,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) |
708 | if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { |
709 | char *product_name = wacom->hdev->name; |
710 | |
711 | - if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) { |
712 | + if (hid_is_usb(wacom->hdev)) { |
713 | struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); |
714 | struct usb_device *dev = interface_to_usbdev(intf); |
715 | product_name = dev->product; |
716 | @@ -2448,6 +2448,9 @@ static void wacom_wireless_work(struct work_struct *work) |
717 | |
718 | wacom_destroy_battery(wacom); |
719 | |
720 | + if (!usbdev) |
721 | + return; |
722 | + |
723 | /* Stylus interface */ |
724 | hdev1 = usb_get_intfdata(usbdev->config->interface[1]); |
725 | wacom1 = hid_get_drvdata(hdev1); |
726 | @@ -2727,8 +2730,6 @@ static void wacom_mode_change_work(struct work_struct *work) |
727 | static int wacom_probe(struct hid_device *hdev, |
728 | const struct hid_device_id *id) |
729 | { |
730 | - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
731 | - struct usb_device *dev = interface_to_usbdev(intf); |
732 | struct wacom *wacom; |
733 | struct wacom_wac *wacom_wac; |
734 | struct wacom_features *features; |
735 | @@ -2763,8 +2764,14 @@ static int wacom_probe(struct hid_device *hdev, |
736 | wacom_wac->hid_data.inputmode = -1; |
737 | wacom_wac->mode_report = -1; |
738 | |
739 | - wacom->usbdev = dev; |
740 | - wacom->intf = intf; |
741 | + if (hid_is_usb(hdev)) { |
742 | + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); |
743 | + struct usb_device *dev = interface_to_usbdev(intf); |
744 | + |
745 | + wacom->usbdev = dev; |
746 | + wacom->intf = intf; |
747 | + } |
748 | + |
749 | mutex_init(&wacom->lock); |
750 | INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); |
751 | INIT_WORK(&wacom->wireless_work, wacom_wireless_work); |
752 | diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c |
753 | index 57db60bf2d4ca..6ff6f625bbf69 100644 |
754 | --- a/drivers/iio/accel/kxcjk-1013.c |
755 | +++ b/drivers/iio/accel/kxcjk-1013.c |
756 | @@ -1415,8 +1415,7 @@ static int kxcjk1013_probe(struct i2c_client *client, |
757 | return 0; |
758 | |
759 | err_buffer_cleanup: |
760 | - if (data->dready_trig) |
761 | - iio_triggered_buffer_cleanup(indio_dev); |
762 | + iio_triggered_buffer_cleanup(indio_dev); |
763 | err_trigger_unregister: |
764 | if (data->dready_trig) |
765 | iio_trigger_unregister(data->dready_trig); |
766 | @@ -1439,8 +1438,8 @@ static int kxcjk1013_remove(struct i2c_client *client) |
767 | pm_runtime_set_suspended(&client->dev); |
768 | pm_runtime_put_noidle(&client->dev); |
769 | |
770 | + iio_triggered_buffer_cleanup(indio_dev); |
771 | if (data->dready_trig) { |
772 | - iio_triggered_buffer_cleanup(indio_dev); |
773 | iio_trigger_unregister(data->dready_trig); |
774 | iio_trigger_unregister(data->motion_trig); |
775 | } |
776 | diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c |
777 | index 76429e2a6fb8f..bc18f33c53e14 100644 |
778 | --- a/drivers/iio/accel/kxsd9.c |
779 | +++ b/drivers/iio/accel/kxsd9.c |
780 | @@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) |
781 | hw_values.chan, |
782 | sizeof(hw_values.chan)); |
783 | if (ret) { |
784 | - dev_err(st->dev, |
785 | - "error reading data\n"); |
786 | - return ret; |
787 | + dev_err(st->dev, "error reading data: %d\n", ret); |
788 | + goto out; |
789 | } |
790 | |
791 | iio_push_to_buffers_with_timestamp(indio_dev, |
792 | &hw_values, |
793 | iio_get_time_ns(indio_dev)); |
794 | +out: |
795 | iio_trigger_notify_done(indio_dev->trig); |
796 | |
797 | return IRQ_HANDLED; |
798 | diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c |
799 | index 85d453b3f5ec1..eaaee6f2a978b 100644 |
800 | --- a/drivers/iio/accel/mma8452.c |
801 | +++ b/drivers/iio/accel/mma8452.c |
802 | @@ -1473,7 +1473,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev) |
803 | if (ret) |
804 | return ret; |
805 | |
806 | - indio_dev->trig = trig; |
807 | + indio_dev->trig = iio_trigger_get(trig); |
808 | |
809 | return 0; |
810 | } |
811 | diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c |
812 | index 0f6c1be1cda2c..9e26517870595 100644 |
813 | --- a/drivers/iio/adc/ad7768-1.c |
814 | +++ b/drivers/iio/adc/ad7768-1.c |
815 | @@ -470,8 +470,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p) |
816 | iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan, |
817 | iio_get_time_ns(indio_dev)); |
818 | |
819 | - iio_trigger_notify_done(indio_dev->trig); |
820 | err_unlock: |
821 | + iio_trigger_notify_done(indio_dev->trig); |
822 | mutex_unlock(&st->lock); |
823 | |
824 | return IRQ_HANDLED; |
825 | diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c |
826 | index def4abeb47cae..8854da453669c 100644 |
827 | --- a/drivers/iio/adc/at91-sama5d2_adc.c |
828 | +++ b/drivers/iio/adc/at91-sama5d2_adc.c |
829 | @@ -1369,7 +1369,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, |
830 | *val = st->conversion_value; |
831 | ret = at91_adc_adjust_val_osr(st, val); |
832 | if (chan->scan_type.sign == 's') |
833 | - *val = sign_extend32(*val, 11); |
834 | + *val = sign_extend32(*val, |
835 | + chan->scan_type.realbits - 1); |
836 | st->conversion_done = false; |
837 | } |
838 | |
839 | diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c |
840 | index 88059480da17c..e526c7d6e044e 100644 |
841 | --- a/drivers/iio/adc/axp20x_adc.c |
842 | +++ b/drivers/iio/adc/axp20x_adc.c |
843 | @@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev, |
844 | struct iio_chan_spec const *chan, int *val) |
845 | { |
846 | struct axp20x_adc_iio *info = iio_priv(indio_dev); |
847 | - int size; |
848 | |
849 | - /* |
850 | - * N.B.: Unlike the Chinese datasheets tell, the charging current is |
851 | - * stored on 12 bits, not 13 bits. Only discharging current is on 13 |
852 | - * bits. |
853 | - */ |
854 | - if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I) |
855 | - size = 13; |
856 | - else |
857 | - size = 12; |
858 | - |
859 | - *val = axp20x_read_variable_width(info->regmap, chan->address, size); |
860 | + *val = axp20x_read_variable_width(info->regmap, chan->address, 12); |
861 | if (*val < 0) |
862 | return *val; |
863 | |
864 | @@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val, |
865 | return IIO_VAL_INT_PLUS_MICRO; |
866 | |
867 | case IIO_CURRENT: |
868 | - *val = 0; |
869 | - *val2 = 500000; |
870 | - return IIO_VAL_INT_PLUS_MICRO; |
871 | + *val = 1; |
872 | + return IIO_VAL_INT; |
873 | |
874 | case IIO_TEMP: |
875 | *val = 100; |
876 | diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c |
877 | index 65c7c9329b1c3..2e37834633ff8 100644 |
878 | --- a/drivers/iio/adc/dln2-adc.c |
879 | +++ b/drivers/iio/adc/dln2-adc.c |
880 | @@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2, |
881 | static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel) |
882 | { |
883 | int ret, i; |
884 | - struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev); |
885 | u16 conflict; |
886 | __le16 value; |
887 | int olen = sizeof(value); |
888 | @@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel) |
889 | .chan = channel, |
890 | }; |
891 | |
892 | - ret = iio_device_claim_direct_mode(indio_dev); |
893 | - if (ret < 0) |
894 | - return ret; |
895 | - |
896 | ret = dln2_adc_set_chan_enabled(dln2, channel, true); |
897 | if (ret < 0) |
898 | - goto release_direct; |
899 | + return ret; |
900 | |
901 | ret = dln2_adc_set_port_enabled(dln2, true, &conflict); |
902 | if (ret < 0) { |
903 | @@ -300,8 +295,6 @@ disable_port: |
904 | dln2_adc_set_port_enabled(dln2, false, NULL); |
905 | disable_chan: |
906 | dln2_adc_set_chan_enabled(dln2, channel, false); |
907 | -release_direct: |
908 | - iio_device_release_direct_mode(indio_dev); |
909 | |
910 | return ret; |
911 | } |
912 | @@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev, |
913 | |
914 | switch (mask) { |
915 | case IIO_CHAN_INFO_RAW: |
916 | + ret = iio_device_claim_direct_mode(indio_dev); |
917 | + if (ret < 0) |
918 | + return ret; |
919 | + |
920 | mutex_lock(&dln2->mutex); |
921 | ret = dln2_adc_read(dln2, chan->channel); |
922 | mutex_unlock(&dln2->mutex); |
923 | |
924 | + iio_device_release_direct_mode(indio_dev); |
925 | + |
926 | if (ret < 0) |
927 | return ret; |
928 | |
929 | @@ -666,7 +665,11 @@ static int dln2_adc_probe(struct platform_device *pdev) |
930 | return -ENOMEM; |
931 | } |
932 | iio_trigger_set_drvdata(dln2->trig, dln2); |
933 | - devm_iio_trigger_register(dev, dln2->trig); |
934 | + ret = devm_iio_trigger_register(dev, dln2->trig); |
935 | + if (ret) { |
936 | + dev_err(dev, "failed to register trigger: %d\n", ret); |
937 | + return ret; |
938 | + } |
939 | iio_trigger_set_immutable(indio_dev, dln2->trig); |
940 | |
941 | ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, |
942 | diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c |
943 | index 1c3c1bd53374a..98b3f021f0bec 100644 |
944 | --- a/drivers/iio/gyro/itg3200_buffer.c |
945 | +++ b/drivers/iio/gyro/itg3200_buffer.c |
946 | @@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) |
947 | |
948 | iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); |
949 | |
950 | +error_ret: |
951 | iio_trigger_notify_done(indio_dev->trig); |
952 | |
953 | -error_ret: |
954 | return IRQ_HANDLED; |
955 | } |
956 | |
957 | diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c |
958 | index 3908a9a900354..65de21d9dda15 100644 |
959 | --- a/drivers/iio/industrialio-trigger.c |
960 | +++ b/drivers/iio/industrialio-trigger.c |
961 | @@ -549,7 +549,6 @@ static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs) |
962 | irq_modify_status(trig->subirq_base + i, |
963 | IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); |
964 | } |
965 | - get_device(&trig->dev); |
966 | |
967 | return trig; |
968 | |
969 | diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c |
970 | index 8bc01e8a424b0..d84d6895ad30d 100644 |
971 | --- a/drivers/iio/light/ltr501.c |
972 | +++ b/drivers/iio/light/ltr501.c |
973 | @@ -1272,7 +1272,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) |
974 | ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1, |
975 | (u8 *)als_buf, sizeof(als_buf)); |
976 | if (ret < 0) |
977 | - return ret; |
978 | + goto done; |
979 | if (test_bit(0, indio_dev->active_scan_mask)) |
980 | scan.channels[j++] = le16_to_cpu(als_buf[1]); |
981 | if (test_bit(1, indio_dev->active_scan_mask)) |
982 | diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c |
983 | index 185c24a75ae6f..d9d32ada635bb 100644 |
984 | --- a/drivers/iio/light/stk3310.c |
985 | +++ b/drivers/iio/light/stk3310.c |
986 | @@ -544,9 +544,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) |
987 | mutex_lock(&data->lock); |
988 | ret = regmap_field_read(data->reg_flag_nf, &dir); |
989 | if (ret < 0) { |
990 | - dev_err(&data->client->dev, "register read failed\n"); |
991 | - mutex_unlock(&data->lock); |
992 | - return ret; |
993 | + dev_err(&data->client->dev, "register read failed: %d\n", ret); |
994 | + goto out; |
995 | } |
996 | event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, |
997 | IIO_EV_TYPE_THRESH, |
998 | @@ -558,6 +557,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) |
999 | ret = regmap_field_write(data->reg_flag_psint, 0); |
1000 | if (ret < 0) |
1001 | dev_err(&data->client->dev, "failed to reset interrupts\n"); |
1002 | +out: |
1003 | mutex_unlock(&data->lock); |
1004 | |
1005 | return IRQ_HANDLED; |
1006 | diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c |
1007 | index f98510c714b57..4ca9daadb3f16 100644 |
1008 | --- a/drivers/iio/trigger/stm32-timer-trigger.c |
1009 | +++ b/drivers/iio/trigger/stm32-timer-trigger.c |
1010 | @@ -800,6 +800,6 @@ static struct platform_driver stm32_timer_trigger_driver = { |
1011 | }; |
1012 | module_platform_driver(stm32_timer_trigger_driver); |
1013 | |
1014 | -MODULE_ALIAS("platform: stm32-timer-trigger"); |
1015 | +MODULE_ALIAS("platform:stm32-timer-trigger"); |
1016 | MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver"); |
1017 | MODULE_LICENSE("GPL v2"); |
1018 | diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c |
1019 | index 1256dbd5b2ef0..85583f51124e2 100644 |
1020 | --- a/drivers/infiniband/hw/hfi1/init.c |
1021 | +++ b/drivers/infiniband/hw/hfi1/init.c |
1022 | @@ -1175,7 +1175,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) |
1023 | rcd->egrbufs.rcvtids = NULL; |
1024 | |
1025 | for (e = 0; e < rcd->egrbufs.alloced; e++) { |
1026 | - if (rcd->egrbufs.buffers[e].dma) |
1027 | + if (rcd->egrbufs.buffers[e].addr) |
1028 | dma_free_coherent(&dd->pcidev->dev, |
1029 | rcd->egrbufs.buffers[e].len, |
1030 | rcd->egrbufs.buffers[e].addr, |
1031 | diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c |
1032 | index c9bdc5221b82f..5849ac5a2ad3b 100644 |
1033 | --- a/drivers/irqchip/irq-armada-370-xp.c |
1034 | +++ b/drivers/irqchip/irq-armada-370-xp.c |
1035 | @@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, |
1036 | int hwirq, i; |
1037 | |
1038 | mutex_lock(&msi_used_lock); |
1039 | + hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR, |
1040 | + order_base_2(nr_irqs)); |
1041 | + mutex_unlock(&msi_used_lock); |
1042 | |
1043 | - hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR, |
1044 | - 0, nr_irqs, 0); |
1045 | - if (hwirq >= PCI_MSI_DOORBELL_NR) { |
1046 | - mutex_unlock(&msi_used_lock); |
1047 | + if (hwirq < 0) |
1048 | return -ENOSPC; |
1049 | - } |
1050 | - |
1051 | - bitmap_set(msi_used, hwirq, nr_irqs); |
1052 | - mutex_unlock(&msi_used_lock); |
1053 | |
1054 | for (i = 0; i < nr_irqs; i++) { |
1055 | irq_domain_set_info(domain, virq + i, hwirq + i, |
1056 | @@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, |
1057 | NULL, NULL); |
1058 | } |
1059 | |
1060 | - return hwirq; |
1061 | + return 0; |
1062 | } |
1063 | |
1064 | static void armada_370_xp_msi_free(struct irq_domain *domain, |
1065 | @@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain, |
1066 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
1067 | |
1068 | mutex_lock(&msi_used_lock); |
1069 | - bitmap_clear(msi_used, d->hwirq, nr_irqs); |
1070 | + bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs)); |
1071 | mutex_unlock(&msi_used_lock); |
1072 | } |
1073 | |
1074 | diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c |
1075 | index 398c54387988a..fadd48882ef4c 100644 |
1076 | --- a/drivers/irqchip/irq-gic-v3-its.c |
1077 | +++ b/drivers/irqchip/irq-gic-v3-its.c |
1078 | @@ -574,7 +574,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, |
1079 | |
1080 | its_fixup_cmd(cmd); |
1081 | |
1082 | - return NULL; |
1083 | + return desc->its_invall_cmd.col; |
1084 | } |
1085 | |
1086 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
1087 | diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c |
1088 | index a166d30deea26..160b39b5cd108 100644 |
1089 | --- a/drivers/irqchip/irq-nvic.c |
1090 | +++ b/drivers/irqchip/irq-nvic.c |
1091 | @@ -26,7 +26,7 @@ |
1092 | |
1093 | #define NVIC_ISER 0x000 |
1094 | #define NVIC_ICER 0x080 |
1095 | -#define NVIC_IPR 0x300 |
1096 | +#define NVIC_IPR 0x400 |
1097 | |
1098 | #define NVIC_MAX_BANKS 16 |
1099 | /* |
1100 | diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c |
1101 | index beaf15807f789..f76c64084f4b2 100644 |
1102 | --- a/drivers/misc/fastrpc.c |
1103 | +++ b/drivers/misc/fastrpc.c |
1104 | @@ -693,16 +693,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) |
1105 | static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) |
1106 | { |
1107 | u64 size = 0; |
1108 | - int i; |
1109 | + int oix; |
1110 | |
1111 | size = ALIGN(metalen, FASTRPC_ALIGN); |
1112 | - for (i = 0; i < ctx->nscalars; i++) { |
1113 | + for (oix = 0; oix < ctx->nbufs; oix++) { |
1114 | + int i = ctx->olaps[oix].raix; |
1115 | + |
1116 | if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { |
1117 | |
1118 | - if (ctx->olaps[i].offset == 0) |
1119 | + if (ctx->olaps[oix].offset == 0) |
1120 | size = ALIGN(size, FASTRPC_ALIGN); |
1121 | |
1122 | - size += (ctx->olaps[i].mend - ctx->olaps[i].mstart); |
1123 | + size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); |
1124 | } |
1125 | } |
1126 | |
1127 | diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c |
1128 | index 81e4b0f466623..cc8369a595de3 100644 |
1129 | --- a/drivers/mtd/nand/raw/fsmc_nand.c |
1130 | +++ b/drivers/mtd/nand/raw/fsmc_nand.c |
1131 | @@ -15,6 +15,7 @@ |
1132 | |
1133 | #include <linux/clk.h> |
1134 | #include <linux/completion.h> |
1135 | +#include <linux/delay.h> |
1136 | #include <linux/dmaengine.h> |
1137 | #include <linux/dma-direction.h> |
1138 | #include <linux/dma-mapping.h> |
1139 | @@ -93,6 +94,14 @@ |
1140 | |
1141 | #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) |
1142 | |
1143 | +/* |
1144 | + * According to SPEAr300 Reference Manual (RM0082) |
1145 | + * TOUDEL = 7ns (Output delay from the flip-flops to the board) |
1146 | + * TINDEL = 5ns (Input delay from the board to the flipflop) |
1147 | + */ |
1148 | +#define TOUTDEL 7000 |
1149 | +#define TINDEL 5000 |
1150 | + |
1151 | struct fsmc_nand_timings { |
1152 | u8 tclr; |
1153 | u8 tar; |
1154 | @@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, |
1155 | { |
1156 | unsigned long hclk = clk_get_rate(host->clk); |
1157 | unsigned long hclkn = NSEC_PER_SEC / hclk; |
1158 | - u32 thiz, thold, twait, tset; |
1159 | + u32 thiz, thold, twait, tset, twait_min; |
1160 | |
1161 | if (sdrt->tRC_min < 30000) |
1162 | return -EOPNOTSUPP; |
1163 | @@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, |
1164 | else if (tims->thold > FSMC_THOLD_MASK) |
1165 | tims->thold = FSMC_THOLD_MASK; |
1166 | |
1167 | - twait = max(sdrt->tRP_min, sdrt->tWP_min); |
1168 | - tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; |
1169 | - if (tims->twait == 0) |
1170 | - tims->twait = 1; |
1171 | - else if (tims->twait > FSMC_TWAIT_MASK) |
1172 | - tims->twait = FSMC_TWAIT_MASK; |
1173 | - |
1174 | tset = max(sdrt->tCS_min - sdrt->tWP_min, |
1175 | sdrt->tCEA_max - sdrt->tREA_max); |
1176 | tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1; |
1177 | @@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, |
1178 | else if (tims->tset > FSMC_TSET_MASK) |
1179 | tims->tset = FSMC_TSET_MASK; |
1180 | |
1181 | + /* |
1182 | + * According to SPEAr300 Reference Manual (RM0082) which gives more |
1183 | + * information related to FSMSC timings than the SPEAr600 one (RM0305), |
1184 | + * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL |
1185 | + */ |
1186 | + twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000) |
1187 | + + TOUTDEL + TINDEL; |
1188 | + twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min); |
1189 | + |
1190 | + tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; |
1191 | + if (tims->twait == 0) |
1192 | + tims->twait = 1; |
1193 | + else if (tims->twait > FSMC_TWAIT_MASK) |
1194 | + tims->twait = FSMC_TWAIT_MASK; |
1195 | + |
1196 | return 0; |
1197 | } |
1198 | |
1199 | @@ -650,6 +667,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, |
1200 | instr->ctx.waitrdy.timeout_ms); |
1201 | break; |
1202 | } |
1203 | + |
1204 | + if (instr->delay_ns) |
1205 | + ndelay(instr->delay_ns); |
1206 | } |
1207 | |
1208 | return ret; |
1209 | diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c |
1210 | index c81698550e5a7..a4af185fa466d 100644 |
1211 | --- a/drivers/net/bonding/bond_alb.c |
1212 | +++ b/drivers/net/bonding/bond_alb.c |
1213 | @@ -1514,14 +1514,14 @@ void bond_alb_monitor(struct work_struct *work) |
1214 | struct slave *slave; |
1215 | |
1216 | if (!bond_has_slaves(bond)) { |
1217 | - bond_info->tx_rebalance_counter = 0; |
1218 | + atomic_set(&bond_info->tx_rebalance_counter, 0); |
1219 | bond_info->lp_counter = 0; |
1220 | goto re_arm; |
1221 | } |
1222 | |
1223 | rcu_read_lock(); |
1224 | |
1225 | - bond_info->tx_rebalance_counter++; |
1226 | + atomic_inc(&bond_info->tx_rebalance_counter); |
1227 | bond_info->lp_counter++; |
1228 | |
1229 | /* send learning packets */ |
1230 | @@ -1543,7 +1543,7 @@ void bond_alb_monitor(struct work_struct *work) |
1231 | } |
1232 | |
1233 | /* rebalance tx traffic */ |
1234 | - if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { |
1235 | + if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) { |
1236 | bond_for_each_slave_rcu(bond, slave, iter) { |
1237 | tlb_clear_slave(bond, slave, 1); |
1238 | if (slave == rcu_access_pointer(bond->curr_active_slave)) { |
1239 | @@ -1553,7 +1553,7 @@ void bond_alb_monitor(struct work_struct *work) |
1240 | bond_info->unbalanced_load = 0; |
1241 | } |
1242 | } |
1243 | - bond_info->tx_rebalance_counter = 0; |
1244 | + atomic_set(&bond_info->tx_rebalance_counter, 0); |
1245 | } |
1246 | |
1247 | if (bond_info->rlb_enabled) { |
1248 | @@ -1623,7 +1623,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) |
1249 | tlb_init_slave(slave); |
1250 | |
1251 | /* order a rebalance ASAP */ |
1252 | - bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; |
1253 | + atomic_set(&bond->alb_info.tx_rebalance_counter, |
1254 | + BOND_TLB_REBALANCE_TICKS); |
1255 | |
1256 | if (bond->alb_info.rlb_enabled) |
1257 | bond->alb_info.rlb_rebalance = 1; |
1258 | @@ -1660,7 +1661,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char |
1259 | rlb_clear_slave(bond, slave); |
1260 | } else if (link == BOND_LINK_UP) { |
1261 | /* order a rebalance ASAP */ |
1262 | - bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; |
1263 | + atomic_set(&bond_info->tx_rebalance_counter, |
1264 | + BOND_TLB_REBALANCE_TICKS); |
1265 | if (bond->alb_info.rlb_enabled) { |
1266 | bond->alb_info.rlb_rebalance = 1; |
1267 | /* If the updelay module parameter is smaller than the |
1268 | diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c |
1269 | index e7a26ec9bdc11..faa78d38d752b 100644 |
1270 | --- a/drivers/net/can/kvaser_pciefd.c |
1271 | +++ b/drivers/net/can/kvaser_pciefd.c |
1272 | @@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); |
1273 | #define KVASER_PCIEFD_SPACK_EWLR BIT(23) |
1274 | #define KVASER_PCIEFD_SPACK_EPLR BIT(24) |
1275 | |
1276 | +/* Kvaser KCAN_EPACK second word */ |
1277 | +#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) |
1278 | + |
1279 | struct kvaser_pciefd; |
1280 | |
1281 | struct kvaser_pciefd_can { |
1282 | @@ -1283,7 +1286,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, |
1283 | |
1284 | can->err_rep_cnt++; |
1285 | can->can.can_stats.bus_error++; |
1286 | - stats->rx_errors++; |
1287 | + if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) |
1288 | + stats->tx_errors++; |
1289 | + else |
1290 | + stats->rx_errors++; |
1291 | |
1292 | can->bec.txerr = bec.txerr; |
1293 | can->bec.rxerr = bec.rxerr; |
1294 | diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c |
1295 | index de275ccb4fd0b..9ae3efce0f66b 100644 |
1296 | --- a/drivers/net/can/m_can/m_can.c |
1297 | +++ b/drivers/net/can/m_can/m_can.c |
1298 | @@ -206,15 +206,15 @@ enum m_can_reg { |
1299 | |
1300 | /* Interrupts for version 3.0.x */ |
1301 | #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) |
1302 | -#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ |
1303 | - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ |
1304 | - IR_RF1L | IR_RF0L) |
1305 | +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ |
1306 | + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ |
1307 | + IR_RF0L) |
1308 | #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) |
1309 | /* Interrupts for version >= 3.1.x */ |
1310 | #define IR_ERR_LEC_31X (IR_PED | IR_PEA) |
1311 | -#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ |
1312 | - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ |
1313 | - IR_RF1L | IR_RF0L) |
1314 | +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ |
1315 | + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ |
1316 | + IR_RF0L) |
1317 | #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) |
1318 | |
1319 | /* Interrupt Line Select (ILS) */ |
1320 | @@ -751,8 +751,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) |
1321 | { |
1322 | if (irqstatus & IR_WDI) |
1323 | netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); |
1324 | - if (irqstatus & IR_ELO) |
1325 | - netdev_err(dev, "Error Logging Overflow\n"); |
1326 | if (irqstatus & IR_BEU) |
1327 | netdev_err(dev, "Bit Error Uncorrected\n"); |
1328 | if (irqstatus & IR_BEC) |
1329 | diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c |
1330 | index db41dddd57716..e90651f7b2eaf 100644 |
1331 | --- a/drivers/net/can/pch_can.c |
1332 | +++ b/drivers/net/can/pch_can.c |
1333 | @@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) |
1334 | cf->data[i + 1] = data_reg >> 8; |
1335 | } |
1336 | |
1337 | - netif_receive_skb(skb); |
1338 | rcv_pkts++; |
1339 | stats->rx_packets++; |
1340 | quota--; |
1341 | stats->rx_bytes += cf->can_dlc; |
1342 | + netif_receive_skb(skb); |
1343 | |
1344 | pch_fifo_thresh(priv, obj_num); |
1345 | obj_num++; |
1346 | diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c |
1347 | index 770304eaef950..80b30768d9c6c 100644 |
1348 | --- a/drivers/net/can/sja1000/ems_pcmcia.c |
1349 | +++ b/drivers/net/can/sja1000/ems_pcmcia.c |
1350 | @@ -235,7 +235,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) |
1351 | free_sja1000dev(dev); |
1352 | } |
1353 | |
1354 | - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, |
1355 | + if (!card->channels) { |
1356 | + err = -ENODEV; |
1357 | + goto failure_cleanup; |
1358 | + } |
1359 | + |
1360 | + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, |
1361 | DRV_NAME, card); |
1362 | if (!err) |
1363 | return 0; |
1364 | diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
1365 | index 1b9957f12459a..8b5d1add899a6 100644 |
1366 | --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
1367 | +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c |
1368 | @@ -28,10 +28,6 @@ |
1369 | |
1370 | #include "kvaser_usb.h" |
1371 | |
1372 | -/* Forward declaration */ |
1373 | -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; |
1374 | - |
1375 | -#define CAN_USB_CLOCK 8000000 |
1376 | #define MAX_USBCAN_NET_DEVICES 2 |
1377 | |
1378 | /* Command header size */ |
1379 | @@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; |
1380 | |
1381 | #define CMD_LEAF_LOG_MESSAGE 106 |
1382 | |
1383 | +/* Leaf frequency options */ |
1384 | +#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 |
1385 | +#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 |
1386 | +#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) |
1387 | +#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) |
1388 | + |
1389 | /* error factors */ |
1390 | #define M16C_EF_ACKE BIT(0) |
1391 | #define M16C_EF_CRCE BIT(1) |
1392 | @@ -340,6 +342,50 @@ struct kvaser_usb_err_summary { |
1393 | }; |
1394 | }; |
1395 | |
1396 | +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { |
1397 | + .name = "kvaser_usb", |
1398 | + .tseg1_min = KVASER_USB_TSEG1_MIN, |
1399 | + .tseg1_max = KVASER_USB_TSEG1_MAX, |
1400 | + .tseg2_min = KVASER_USB_TSEG2_MIN, |
1401 | + .tseg2_max = KVASER_USB_TSEG2_MAX, |
1402 | + .sjw_max = KVASER_USB_SJW_MAX, |
1403 | + .brp_min = KVASER_USB_BRP_MIN, |
1404 | + .brp_max = KVASER_USB_BRP_MAX, |
1405 | + .brp_inc = KVASER_USB_BRP_INC, |
1406 | +}; |
1407 | + |
1408 | +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { |
1409 | + .clock = { |
1410 | + .freq = 8000000, |
1411 | + }, |
1412 | + .timestamp_freq = 1, |
1413 | + .bittiming_const = &kvaser_usb_leaf_bittiming_const, |
1414 | +}; |
1415 | + |
1416 | +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { |
1417 | + .clock = { |
1418 | + .freq = 16000000, |
1419 | + }, |
1420 | + .timestamp_freq = 1, |
1421 | + .bittiming_const = &kvaser_usb_leaf_bittiming_const, |
1422 | +}; |
1423 | + |
1424 | +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { |
1425 | + .clock = { |
1426 | + .freq = 24000000, |
1427 | + }, |
1428 | + .timestamp_freq = 1, |
1429 | + .bittiming_const = &kvaser_usb_leaf_bittiming_const, |
1430 | +}; |
1431 | + |
1432 | +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { |
1433 | + .clock = { |
1434 | + .freq = 32000000, |
1435 | + }, |
1436 | + .timestamp_freq = 1, |
1437 | + .bittiming_const = &kvaser_usb_leaf_bittiming_const, |
1438 | +}; |
1439 | + |
1440 | static void * |
1441 | kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, |
1442 | const struct sk_buff *skb, int *frame_len, |
1443 | @@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, |
1444 | return rc; |
1445 | } |
1446 | |
1447 | +static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, |
1448 | + const struct leaf_cmd_softinfo *softinfo) |
1449 | +{ |
1450 | + u32 sw_options = le32_to_cpu(softinfo->sw_options); |
1451 | + |
1452 | + dev->fw_version = le32_to_cpu(softinfo->fw_version); |
1453 | + dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); |
1454 | + |
1455 | + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { |
1456 | + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: |
1457 | + dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; |
1458 | + break; |
1459 | + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: |
1460 | + dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; |
1461 | + break; |
1462 | + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: |
1463 | + dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; |
1464 | + break; |
1465 | + } |
1466 | +} |
1467 | + |
1468 | static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) |
1469 | { |
1470 | struct kvaser_cmd cmd; |
1471 | @@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) |
1472 | |
1473 | switch (dev->card_data.leaf.family) { |
1474 | case KVASER_LEAF: |
1475 | - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); |
1476 | - dev->max_tx_urbs = |
1477 | - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); |
1478 | + kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); |
1479 | break; |
1480 | case KVASER_USBCAN: |
1481 | dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); |
1482 | dev->max_tx_urbs = |
1483 | le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); |
1484 | + dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; |
1485 | break; |
1486 | } |
1487 | |
1488 | @@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) |
1489 | { |
1490 | struct kvaser_usb_dev_card_data *card_data = &dev->card_data; |
1491 | |
1492 | - dev->cfg = &kvaser_usb_leaf_dev_cfg; |
1493 | card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; |
1494 | |
1495 | return 0; |
1496 | } |
1497 | |
1498 | -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { |
1499 | - .name = "kvaser_usb", |
1500 | - .tseg1_min = KVASER_USB_TSEG1_MIN, |
1501 | - .tseg1_max = KVASER_USB_TSEG1_MAX, |
1502 | - .tseg2_min = KVASER_USB_TSEG2_MIN, |
1503 | - .tseg2_max = KVASER_USB_TSEG2_MAX, |
1504 | - .sjw_max = KVASER_USB_SJW_MAX, |
1505 | - .brp_min = KVASER_USB_BRP_MIN, |
1506 | - .brp_max = KVASER_USB_BRP_MAX, |
1507 | - .brp_inc = KVASER_USB_BRP_INC, |
1508 | -}; |
1509 | - |
1510 | static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) |
1511 | { |
1512 | struct kvaser_usb_net_priv *priv = netdev_priv(netdev); |
1513 | @@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { |
1514 | .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, |
1515 | .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, |
1516 | }; |
1517 | - |
1518 | -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { |
1519 | - .clock = { |
1520 | - .freq = CAN_USB_CLOCK, |
1521 | - }, |
1522 | - .timestamp_freq = 1, |
1523 | - .bittiming_const = &kvaser_usb_leaf_bittiming_const, |
1524 | -}; |
1525 | diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c |
1526 | index bb032be7fe31e..1f8c3b669dc14 100644 |
1527 | --- a/drivers/net/ethernet/altera/altera_tse_main.c |
1528 | +++ b/drivers/net/ethernet/altera/altera_tse_main.c |
1529 | @@ -1431,16 +1431,19 @@ static int altera_tse_probe(struct platform_device *pdev) |
1530 | priv->rxdescmem_busaddr = dma_res->start; |
1531 | |
1532 | } else { |
1533 | + ret = -ENODEV; |
1534 | goto err_free_netdev; |
1535 | } |
1536 | |
1537 | - if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) |
1538 | + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { |
1539 | dma_set_coherent_mask(priv->device, |
1540 | DMA_BIT_MASK(priv->dmaops->dmamask)); |
1541 | - else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) |
1542 | + } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) { |
1543 | dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); |
1544 | - else |
1545 | + } else { |
1546 | + ret = -EIO; |
1547 | goto err_free_netdev; |
1548 | + } |
1549 | |
1550 | /* MAC address space */ |
1551 | ret = request_and_map(pdev, "control_port", &control_port, |
1552 | diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h |
1553 | index d89568f810bc4..5163e06a7dd7d 100644 |
1554 | --- a/drivers/net/ethernet/freescale/fec.h |
1555 | +++ b/drivers/net/ethernet/freescale/fec.h |
1556 | @@ -373,6 +373,9 @@ struct bufdesc_ex { |
1557 | #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ |
1558 | #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) |
1559 | #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) |
1560 | +#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \ |
1561 | + (((X) == 1) ? FEC_ENET_RXF_1 : \ |
1562 | + FEC_ENET_RXF_2)) |
1563 | #define FEC_ENET_TS_AVAIL ((uint)0x00010000) |
1564 | #define FEC_ENET_TS_TIMER ((uint)0x00008000) |
1565 | |
1566 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
1567 | index a53c2d637a971..a31f891d51fbc 100644 |
1568 | --- a/drivers/net/ethernet/freescale/fec_main.c |
1569 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
1570 | @@ -1444,7 +1444,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
1571 | break; |
1572 | pkt_received++; |
1573 | |
1574 | - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); |
1575 | + writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); |
1576 | |
1577 | /* Check for errors. */ |
1578 | status ^= BD_ENET_RX_LAST; |
1579 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1580 | index 6e61aea42a0dd..cd6f5bd982559 100644 |
1581 | --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1582 | +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1583 | @@ -1804,6 +1804,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, |
1584 | return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); |
1585 | } |
1586 | |
1587 | +/** |
1588 | + * i40e_sync_vf_state |
1589 | + * @vf: pointer to the VF info |
1590 | + * @state: VF state |
1591 | + * |
1592 | + * Called from a VF message to synchronize the service with a potential |
1593 | + * VF reset state |
1594 | + **/ |
1595 | +static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) |
1596 | +{ |
1597 | + int i; |
1598 | + |
1599 | + /* When handling some messages, it needs VF state to be set. |
1600 | + * It is possible that this flag is cleared during VF reset, |
1601 | + * so there is a need to wait until the end of the reset to |
1602 | + * handle the request message correctly. |
1603 | + */ |
1604 | + for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { |
1605 | + if (test_bit(state, &vf->vf_states)) |
1606 | + return true; |
1607 | + usleep_range(10000, 20000); |
1608 | + } |
1609 | + |
1610 | + return test_bit(state, &vf->vf_states); |
1611 | +} |
1612 | + |
1613 | /** |
1614 | * i40e_vc_get_version_msg |
1615 | * @vf: pointer to the VF info |
1616 | @@ -1864,7 +1890,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) |
1617 | size_t len = 0; |
1618 | int ret; |
1619 | |
1620 | - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { |
1621 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { |
1622 | aq_ret = I40E_ERR_PARAM; |
1623 | goto err; |
1624 | } |
1625 | @@ -2019,7 +2045,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) |
1626 | bool allmulti = false; |
1627 | bool alluni = false; |
1628 | |
1629 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1630 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1631 | aq_ret = I40E_ERR_PARAM; |
1632 | goto err_out; |
1633 | } |
1634 | @@ -2107,7 +2133,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) |
1635 | struct i40e_vsi *vsi; |
1636 | u16 num_qps_all = 0; |
1637 | |
1638 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1639 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1640 | aq_ret = I40E_ERR_PARAM; |
1641 | goto error_param; |
1642 | } |
1643 | @@ -2255,7 +2281,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) |
1644 | i40e_status aq_ret = 0; |
1645 | int i; |
1646 | |
1647 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1648 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1649 | aq_ret = I40E_ERR_PARAM; |
1650 | goto error_param; |
1651 | } |
1652 | @@ -2427,7 +2453,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) |
1653 | struct i40e_pf *pf = vf->pf; |
1654 | i40e_status aq_ret = 0; |
1655 | |
1656 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1657 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1658 | aq_ret = I40E_ERR_PARAM; |
1659 | goto error_param; |
1660 | } |
1661 | @@ -2477,7 +2503,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) |
1662 | u8 cur_pairs = vf->num_queue_pairs; |
1663 | struct i40e_pf *pf = vf->pf; |
1664 | |
1665 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) |
1666 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) |
1667 | return -EINVAL; |
1668 | |
1669 | if (req_pairs > I40E_MAX_VF_QUEUES) { |
1670 | @@ -2523,7 +2549,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) |
1671 | |
1672 | memset(&stats, 0, sizeof(struct i40e_eth_stats)); |
1673 | |
1674 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1675 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1676 | aq_ret = I40E_ERR_PARAM; |
1677 | goto error_param; |
1678 | } |
1679 | @@ -2632,7 +2658,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) |
1680 | i40e_status ret = 0; |
1681 | int i; |
1682 | |
1683 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
1684 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || |
1685 | !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { |
1686 | ret = I40E_ERR_PARAM; |
1687 | goto error_param; |
1688 | @@ -2701,7 +2727,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) |
1689 | i40e_status ret = 0; |
1690 | int i; |
1691 | |
1692 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
1693 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || |
1694 | !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { |
1695 | ret = I40E_ERR_PARAM; |
1696 | goto error_param; |
1697 | @@ -2840,7 +2866,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) |
1698 | i40e_status aq_ret = 0; |
1699 | int i; |
1700 | |
1701 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
1702 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || |
1703 | !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { |
1704 | aq_ret = I40E_ERR_PARAM; |
1705 | goto error_param; |
1706 | @@ -2960,9 +2986,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) |
1707 | struct i40e_vsi *vsi = NULL; |
1708 | i40e_status aq_ret = 0; |
1709 | |
1710 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
1711 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || |
1712 | !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || |
1713 | - (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { |
1714 | + vrk->key_len != I40E_HKEY_ARRAY_SIZE) { |
1715 | aq_ret = I40E_ERR_PARAM; |
1716 | goto err; |
1717 | } |
1718 | @@ -2991,9 +3017,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) |
1719 | i40e_status aq_ret = 0; |
1720 | u16 i; |
1721 | |
1722 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
1723 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || |
1724 | !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || |
1725 | - (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { |
1726 | + vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { |
1727 | aq_ret = I40E_ERR_PARAM; |
1728 | goto err; |
1729 | } |
1730 | @@ -3026,7 +3052,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) |
1731 | i40e_status aq_ret = 0; |
1732 | int len = 0; |
1733 | |
1734 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1735 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1736 | aq_ret = I40E_ERR_PARAM; |
1737 | goto err; |
1738 | } |
1739 | @@ -3062,7 +3088,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) |
1740 | struct i40e_hw *hw = &pf->hw; |
1741 | i40e_status aq_ret = 0; |
1742 | |
1743 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1744 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1745 | aq_ret = I40E_ERR_PARAM; |
1746 | goto err; |
1747 | } |
1748 | @@ -3087,7 +3113,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) |
1749 | i40e_status aq_ret = 0; |
1750 | struct i40e_vsi *vsi; |
1751 | |
1752 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1753 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1754 | aq_ret = I40E_ERR_PARAM; |
1755 | goto err; |
1756 | } |
1757 | @@ -3113,7 +3139,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) |
1758 | i40e_status aq_ret = 0; |
1759 | struct i40e_vsi *vsi; |
1760 | |
1761 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1762 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1763 | aq_ret = I40E_ERR_PARAM; |
1764 | goto err; |
1765 | } |
1766 | @@ -3340,7 +3366,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) |
1767 | i40e_status aq_ret = 0; |
1768 | int i, ret; |
1769 | |
1770 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1771 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1772 | aq_ret = I40E_ERR_PARAM; |
1773 | goto err; |
1774 | } |
1775 | @@ -3471,7 +3497,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) |
1776 | i40e_status aq_ret = 0; |
1777 | int i, ret; |
1778 | |
1779 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1780 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1781 | aq_ret = I40E_ERR_PARAM; |
1782 | goto err_out; |
1783 | } |
1784 | @@ -3580,7 +3606,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) |
1785 | i40e_status aq_ret = 0; |
1786 | u64 speed = 0; |
1787 | |
1788 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1789 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1790 | aq_ret = I40E_ERR_PARAM; |
1791 | goto err; |
1792 | } |
1793 | @@ -3687,11 +3713,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) |
1794 | |
1795 | /* set this flag only after making sure all inputs are sane */ |
1796 | vf->adq_enabled = true; |
1797 | - /* num_req_queues is set when user changes number of queues via ethtool |
1798 | - * and this causes issue for default VSI(which depends on this variable) |
1799 | - * when ADq is enabled, hence reset it. |
1800 | - */ |
1801 | - vf->num_req_queues = 0; |
1802 | |
1803 | /* reset the VF in order to allocate resources */ |
1804 | i40e_vc_notify_vf_reset(vf); |
1805 | @@ -3715,7 +3736,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) |
1806 | struct i40e_pf *pf = vf->pf; |
1807 | i40e_status aq_ret = 0; |
1808 | |
1809 | - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
1810 | + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { |
1811 | aq_ret = I40E_ERR_PARAM; |
1812 | goto err; |
1813 | } |
1814 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1815 | index f65cc0c165502..7df3e5833c5d2 100644 |
1816 | --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1817 | +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h |
1818 | @@ -19,6 +19,8 @@ |
1819 | |
1820 | #define I40E_MAX_VF_PROMISC_FLAGS 3 |
1821 | |
1822 | +#define I40E_VF_STATE_WAIT_COUNT 20 |
1823 | + |
1824 | /* Various queue ctrls */ |
1825 | enum i40e_queue_ctrl { |
1826 | I40E_QUEUE_CTRL_UNKNOWN = 0, |
1827 | diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
1828 | index 4e0e1b02d615e..ed45f3c8338e8 100644 |
1829 | --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
1830 | +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c |
1831 | @@ -612,23 +612,44 @@ static int iavf_set_ringparam(struct net_device *netdev, |
1832 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
1833 | return -EINVAL; |
1834 | |
1835 | - new_tx_count = clamp_t(u32, ring->tx_pending, |
1836 | - IAVF_MIN_TXD, |
1837 | - IAVF_MAX_TXD); |
1838 | - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); |
1839 | + if (ring->tx_pending > IAVF_MAX_TXD || |
1840 | + ring->tx_pending < IAVF_MIN_TXD || |
1841 | + ring->rx_pending > IAVF_MAX_RXD || |
1842 | + ring->rx_pending < IAVF_MIN_RXD) { |
1843 | + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", |
1844 | + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, |
1845 | + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); |
1846 | + return -EINVAL; |
1847 | + } |
1848 | |
1849 | - new_rx_count = clamp_t(u32, ring->rx_pending, |
1850 | - IAVF_MIN_RXD, |
1851 | - IAVF_MAX_RXD); |
1852 | - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); |
1853 | + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); |
1854 | + if (new_tx_count != ring->tx_pending) |
1855 | + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", |
1856 | + new_tx_count); |
1857 | + |
1858 | + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); |
1859 | + if (new_rx_count != ring->rx_pending) |
1860 | + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", |
1861 | + new_rx_count); |
1862 | |
1863 | /* if nothing to do return success */ |
1864 | if ((new_tx_count == adapter->tx_desc_count) && |
1865 | - (new_rx_count == adapter->rx_desc_count)) |
1866 | + (new_rx_count == adapter->rx_desc_count)) { |
1867 | + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); |
1868 | return 0; |
1869 | + } |
1870 | |
1871 | - adapter->tx_desc_count = new_tx_count; |
1872 | - adapter->rx_desc_count = new_rx_count; |
1873 | + if (new_tx_count != adapter->tx_desc_count) { |
1874 | + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", |
1875 | + adapter->tx_desc_count, new_tx_count); |
1876 | + adapter->tx_desc_count = new_tx_count; |
1877 | + } |
1878 | + |
1879 | + if (new_rx_count != adapter->rx_desc_count) { |
1880 | + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", |
1881 | + adapter->rx_desc_count, new_rx_count); |
1882 | + adapter->rx_desc_count = new_rx_count; |
1883 | + } |
1884 | |
1885 | if (netif_running(netdev)) { |
1886 | adapter->flags |= IAVF_FLAG_RESET_NEEDED; |
1887 | diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1888 | index 484c2a6f1625d..449eb06e2c7da 100644 |
1889 | --- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
1890 | +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1891 | @@ -2151,6 +2151,7 @@ static void iavf_reset_task(struct work_struct *work) |
1892 | } |
1893 | |
1894 | pci_set_master(adapter->pdev); |
1895 | + pci_restore_msi_state(adapter->pdev); |
1896 | |
1897 | if (i == IAVF_RESET_WAIT_COUNT) { |
1898 | dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", |
1899 | diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c |
1900 | index 6370e96ebfacc..3eea68f3a5262 100644 |
1901 | --- a/drivers/net/ethernet/intel/ice/ice_main.c |
1902 | +++ b/drivers/net/ethernet/intel/ice/ice_main.c |
1903 | @@ -3561,6 +3561,9 @@ static int ice_up_complete(struct ice_vsi *vsi) |
1904 | netif_carrier_on(vsi->netdev); |
1905 | } |
1906 | |
1907 | + /* clear this now, and the first stats read will be used as baseline */ |
1908 | + vsi->stat_offsets_loaded = false; |
1909 | + |
1910 | ice_service_task_schedule(pf); |
1911 | |
1912 | return 0; |
1913 | diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c |
1914 | index 94994a939277b..6ef48eb3a77d4 100644 |
1915 | --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c |
1916 | +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c |
1917 | @@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) |
1918 | return -ENOMEM; |
1919 | |
1920 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
1921 | - if (!cache) |
1922 | + if (!cache) { |
1923 | + nfp_cpp_area_free(area); |
1924 | return -ENOMEM; |
1925 | + } |
1926 | |
1927 | cache->id = 0; |
1928 | cache->addr = 0; |
1929 | diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c |
1930 | index f310a94e04898..b81579afa361d 100644 |
1931 | --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c |
1932 | +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c |
1933 | @@ -1597,6 +1597,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1934 | data_split = true; |
1935 | } |
1936 | } else { |
1937 | + if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { |
1938 | + DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); |
1939 | + qede_free_failed_tx_pkt(txq, first_bd, 0, false); |
1940 | + qede_update_tx_producer(txq); |
1941 | + return NETDEV_TX_OK; |
1942 | + } |
1943 | + |
1944 | val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << |
1945 | ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); |
1946 | } |
1947 | diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c |
1948 | index 6ca2216e40585..da2862d596813 100644 |
1949 | --- a/drivers/net/ethernet/qlogic/qla3xxx.c |
1950 | +++ b/drivers/net/ethernet/qlogic/qla3xxx.c |
1951 | @@ -3495,20 +3495,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev) |
1952 | |
1953 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); |
1954 | |
1955 | - err = ql_wait_for_drvr_lock(qdev); |
1956 | - if (err) { |
1957 | - err = ql_adapter_initialize(qdev); |
1958 | - if (err) { |
1959 | - netdev_err(ndev, "Unable to initialize adapter\n"); |
1960 | - goto err_init; |
1961 | - } |
1962 | - netdev_err(ndev, "Releasing driver lock\n"); |
1963 | - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); |
1964 | - } else { |
1965 | + if (!ql_wait_for_drvr_lock(qdev)) { |
1966 | netdev_err(ndev, "Could not acquire driver lock\n"); |
1967 | + err = -ENODEV; |
1968 | goto err_lock; |
1969 | } |
1970 | |
1971 | + err = ql_adapter_initialize(qdev); |
1972 | + if (err) { |
1973 | + netdev_err(ndev, "Unable to initialize adapter\n"); |
1974 | + goto err_init; |
1975 | + } |
1976 | + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); |
1977 | + |
1978 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); |
1979 | |
1980 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
1981 | diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
1982 | index 4cff0a6b1098a..1c9a1b94f6e28 100644 |
1983 | --- a/drivers/net/usb/cdc_ncm.c |
1984 | +++ b/drivers/net/usb/cdc_ncm.c |
1985 | @@ -177,6 +177,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) |
1986 | /* clamp new_tx to sane values */ |
1987 | min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16); |
1988 | max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); |
1989 | + if (max == 0) |
1990 | + max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ |
1991 | |
1992 | /* some devices set dwNtbOutMaxSize too low for the above default */ |
1993 | min = min(min, max); |
1994 | diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
1995 | index be5bd2f637d80..b51a2367dbaa0 100644 |
1996 | --- a/drivers/net/vrf.c |
1997 | +++ b/drivers/net/vrf.c |
1998 | @@ -495,8 +495,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, |
1999 | |
2000 | skb->dev = vrf_dev; |
2001 | |
2002 | - vrf_nf_set_untracked(skb); |
2003 | - |
2004 | err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, |
2005 | skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); |
2006 | |
2007 | @@ -517,6 +515,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, |
2008 | if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) |
2009 | return skb; |
2010 | |
2011 | + vrf_nf_set_untracked(skb); |
2012 | + |
2013 | if (qdisc_tx_is_default(vrf_dev) || |
2014 | IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) |
2015 | return vrf_ip6_out_direct(vrf_dev, sk, skb); |
2016 | @@ -732,8 +732,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, |
2017 | |
2018 | skb->dev = vrf_dev; |
2019 | |
2020 | - vrf_nf_set_untracked(skb); |
2021 | - |
2022 | err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, |
2023 | skb, NULL, vrf_dev, vrf_ip_out_direct_finish); |
2024 | |
2025 | @@ -755,6 +753,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, |
2026 | ipv4_is_lbcast(ip_hdr(skb)->daddr)) |
2027 | return skb; |
2028 | |
2029 | + vrf_nf_set_untracked(skb); |
2030 | + |
2031 | if (qdisc_tx_is_default(vrf_dev) || |
2032 | IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) |
2033 | return vrf_ip_out_direct(vrf_dev, sk, skb); |
2034 | diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c |
2035 | index 64f18bf1e694a..74c21152367ae 100644 |
2036 | --- a/drivers/tty/serial/serial-tegra.c |
2037 | +++ b/drivers/tty/serial/serial-tegra.c |
2038 | @@ -1494,7 +1494,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = { |
2039 | .fifo_mode_enable_status = false, |
2040 | .uart_max_port = 5, |
2041 | .max_dma_burst_bytes = 4, |
2042 | - .error_tolerance_low_range = 0, |
2043 | + .error_tolerance_low_range = -4, |
2044 | .error_tolerance_high_range = 4, |
2045 | }; |
2046 | |
2047 | @@ -1505,7 +1505,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = { |
2048 | .fifo_mode_enable_status = false, |
2049 | .uart_max_port = 5, |
2050 | .max_dma_burst_bytes = 4, |
2051 | - .error_tolerance_low_range = 0, |
2052 | + .error_tolerance_low_range = -4, |
2053 | .error_tolerance_high_range = 4, |
2054 | }; |
2055 | |
2056 | diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c |
2057 | index c68217b7dace4..a8d97773d3d98 100644 |
2058 | --- a/drivers/usb/core/config.c |
2059 | +++ b/drivers/usb/core/config.c |
2060 | @@ -409,7 +409,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, |
2061 | * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 |
2062 | * (see the end of section 5.6.3), so don't warn about them. |
2063 | */ |
2064 | - maxp = usb_endpoint_maxp(&endpoint->desc); |
2065 | + maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); |
2066 | if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { |
2067 | dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", |
2068 | cfgno, inum, asnum, d->bEndpointAddress); |
2069 | @@ -425,9 +425,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, |
2070 | maxpacket_maxes = full_speed_maxpacket_maxes; |
2071 | break; |
2072 | case USB_SPEED_HIGH: |
2073 | - /* Bits 12..11 are allowed only for HS periodic endpoints */ |
2074 | + /* Multiple-transactions bits are allowed only for HS periodic endpoints */ |
2075 | if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { |
2076 | - i = maxp & (BIT(12) | BIT(11)); |
2077 | + i = maxp & USB_EP_MAXP_MULT_MASK; |
2078 | maxp &= ~i; |
2079 | } |
2080 | /* fallthrough */ |
2081 | diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c |
2082 | index 6bd3fdb925cd9..d2980e30f3417 100644 |
2083 | --- a/drivers/usb/gadget/composite.c |
2084 | +++ b/drivers/usb/gadget/composite.c |
2085 | @@ -1648,6 +1648,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) |
2086 | struct usb_function *f = NULL; |
2087 | u8 endp; |
2088 | |
2089 | + if (w_length > USB_COMP_EP0_BUFSIZ) { |
2090 | + if (ctrl->bRequestType == USB_DIR_OUT) { |
2091 | + goto done; |
2092 | + } else { |
2093 | + /* Cast away the const, we are going to overwrite on purpose. */ |
2094 | + __le16 *temp = (__le16 *)&ctrl->wLength; |
2095 | + |
2096 | + *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); |
2097 | + w_length = USB_COMP_EP0_BUFSIZ; |
2098 | + } |
2099 | + } |
2100 | + |
2101 | /* partial re-init of the response message; the function or the |
2102 | * gadget might need to intercept e.g. a control-OUT completion |
2103 | * when we delegate to it. |
2104 | @@ -2161,7 +2173,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite, |
2105 | if (!cdev->req) |
2106 | return -ENOMEM; |
2107 | |
2108 | - cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); |
2109 | + cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); |
2110 | if (!cdev->req->buf) |
2111 | goto fail; |
2112 | |
2113 | diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c |
2114 | index e1d566c9918ae..355bc7dab9d5f 100644 |
2115 | --- a/drivers/usb/gadget/legacy/dbgp.c |
2116 | +++ b/drivers/usb/gadget/legacy/dbgp.c |
2117 | @@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep) |
2118 | goto fail_1; |
2119 | } |
2120 | |
2121 | - req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL); |
2122 | + req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL); |
2123 | if (!req->buf) { |
2124 | err = -ENOMEM; |
2125 | stp = 2; |
2126 | @@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget, |
2127 | void *data = NULL; |
2128 | u16 len = 0; |
2129 | |
2130 | + if (length > DBGP_REQ_LEN) { |
2131 | + if (ctrl->bRequestType == USB_DIR_OUT) { |
2132 | + return err; |
2133 | + } else { |
2134 | + /* Cast away the const, we are going to overwrite on purpose. */ |
2135 | + __le16 *temp = (__le16 *)&ctrl->wLength; |
2136 | + |
2137 | + *temp = cpu_to_le16(DBGP_REQ_LEN); |
2138 | + length = DBGP_REQ_LEN; |
2139 | + } |
2140 | + } |
2141 | + |
2142 | + |
2143 | if (request == USB_REQ_GET_DESCRIPTOR) { |
2144 | switch (value>>8) { |
2145 | case USB_DT_DEVICE: |
2146 | diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c |
2147 | index cabcbb47f0ac1..f0aff79f544c3 100644 |
2148 | --- a/drivers/usb/gadget/legacy/inode.c |
2149 | +++ b/drivers/usb/gadget/legacy/inode.c |
2150 | @@ -110,6 +110,8 @@ enum ep0_state { |
2151 | /* enough for the whole queue: most events invalidate others */ |
2152 | #define N_EVENT 5 |
2153 | |
2154 | +#define RBUF_SIZE 256 |
2155 | + |
2156 | struct dev_data { |
2157 | spinlock_t lock; |
2158 | refcount_t count; |
2159 | @@ -144,7 +146,7 @@ struct dev_data { |
2160 | struct dentry *dentry; |
2161 | |
2162 | /* except this scratch i/o buffer for ep0 */ |
2163 | - u8 rbuf [256]; |
2164 | + u8 rbuf[RBUF_SIZE]; |
2165 | }; |
2166 | |
2167 | static inline void get_dev (struct dev_data *data) |
2168 | @@ -1333,6 +1335,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) |
2169 | u16 w_value = le16_to_cpu(ctrl->wValue); |
2170 | u16 w_length = le16_to_cpu(ctrl->wLength); |
2171 | |
2172 | + if (w_length > RBUF_SIZE) { |
2173 | + if (ctrl->bRequestType == USB_DIR_OUT) { |
2174 | + return value; |
2175 | + } else { |
2176 | + /* Cast away the const, we are going to overwrite on purpose. */ |
2177 | + __le16 *temp = (__le16 *)&ctrl->wLength; |
2178 | + |
2179 | + *temp = cpu_to_le16(RBUF_SIZE); |
2180 | + w_length = RBUF_SIZE; |
2181 | + } |
2182 | + } |
2183 | + |
2184 | spin_lock (&dev->lock); |
2185 | dev->setup_abort = 0; |
2186 | if (dev->state == STATE_DEV_UNCONNECTED) { |
2187 | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
2188 | index 9a4260927ce31..48832f7f2fc3c 100644 |
2189 | --- a/drivers/usb/host/xhci-hub.c |
2190 | +++ b/drivers/usb/host/xhci-hub.c |
2191 | @@ -629,6 +629,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, |
2192 | continue; |
2193 | |
2194 | retval = xhci_disable_slot(xhci, i); |
2195 | + xhci_free_virt_device(xhci, i); |
2196 | if (retval) |
2197 | xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", |
2198 | i, retval); |
2199 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
2200 | index 5ec31d2d32e05..7fa27b4037560 100644 |
2201 | --- a/drivers/usb/host/xhci-ring.c |
2202 | +++ b/drivers/usb/host/xhci-ring.c |
2203 | @@ -1265,7 +1265,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) |
2204 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) |
2205 | /* Delete default control endpoint resources */ |
2206 | xhci_free_device_endpoint_resources(xhci, virt_dev, true); |
2207 | - xhci_free_virt_device(xhci, slot_id); |
2208 | } |
2209 | |
2210 | static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, |
2211 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
2212 | index 4bb850370bb6b..1c8070023161f 100644 |
2213 | --- a/drivers/usb/host/xhci.c |
2214 | +++ b/drivers/usb/host/xhci.c |
2215 | @@ -3889,7 +3889,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
2216 | struct xhci_slot_ctx *slot_ctx; |
2217 | int i, ret; |
2218 | |
2219 | -#ifndef CONFIG_USB_DEFAULT_PERSIST |
2220 | /* |
2221 | * We called pm_runtime_get_noresume when the device was attached. |
2222 | * Decrement the counter here to allow controller to runtime suspend |
2223 | @@ -3897,7 +3896,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
2224 | */ |
2225 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
2226 | pm_runtime_put_noidle(hcd->self.controller); |
2227 | -#endif |
2228 | |
2229 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
2230 | /* If the host is halted due to driver unload, we still need to free the |
2231 | @@ -3916,9 +3914,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
2232 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
2233 | } |
2234 | virt_dev->udev = NULL; |
2235 | - ret = xhci_disable_slot(xhci, udev->slot_id); |
2236 | - if (ret) |
2237 | - xhci_free_virt_device(xhci, udev->slot_id); |
2238 | + xhci_disable_slot(xhci, udev->slot_id); |
2239 | + xhci_free_virt_device(xhci, udev->slot_id); |
2240 | } |
2241 | |
2242 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
2243 | @@ -3928,7 +3925,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
2244 | u32 state; |
2245 | int ret = 0; |
2246 | |
2247 | - command = xhci_alloc_command(xhci, false, GFP_KERNEL); |
2248 | + command = xhci_alloc_command(xhci, true, GFP_KERNEL); |
2249 | if (!command) |
2250 | return -ENOMEM; |
2251 | |
2252 | @@ -3953,6 +3950,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
2253 | } |
2254 | xhci_ring_cmd_db(xhci); |
2255 | spin_unlock_irqrestore(&xhci->lock, flags); |
2256 | + |
2257 | + wait_for_completion(command->completion); |
2258 | + |
2259 | + if (command->status != COMP_SUCCESS) |
2260 | + xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", |
2261 | + slot_id, command->status); |
2262 | + |
2263 | + xhci_free_command(xhci, command); |
2264 | + |
2265 | return ret; |
2266 | } |
2267 | |
2268 | @@ -4049,23 +4055,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
2269 | |
2270 | xhci_debugfs_create_slot(xhci, slot_id); |
2271 | |
2272 | -#ifndef CONFIG_USB_DEFAULT_PERSIST |
2273 | /* |
2274 | * If resetting upon resume, we can't put the controller into runtime |
2275 | * suspend if there is a device attached. |
2276 | */ |
2277 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
2278 | pm_runtime_get_noresume(hcd->self.controller); |
2279 | -#endif |
2280 | |
2281 | /* Is this a LS or FS device under a HS hub? */ |
2282 | /* Hub or peripherial? */ |
2283 | return 1; |
2284 | |
2285 | disable_slot: |
2286 | - ret = xhci_disable_slot(xhci, udev->slot_id); |
2287 | - if (ret) |
2288 | - xhci_free_virt_device(xhci, udev->slot_id); |
2289 | + xhci_disable_slot(xhci, udev->slot_id); |
2290 | + xhci_free_virt_device(xhci, udev->slot_id); |
2291 | |
2292 | return 0; |
2293 | } |
2294 | @@ -4195,6 +4198,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
2295 | |
2296 | mutex_unlock(&xhci->mutex); |
2297 | ret = xhci_disable_slot(xhci, udev->slot_id); |
2298 | + xhci_free_virt_device(xhci, udev->slot_id); |
2299 | if (!ret) |
2300 | xhci_alloc_dev(hcd, udev); |
2301 | kfree(command->completion); |
2302 | diff --git a/fs/aio.c b/fs/aio.c |
2303 | index 47bb7b5685ba1..fb92c32a6f1e9 100644 |
2304 | --- a/fs/aio.c |
2305 | +++ b/fs/aio.c |
2306 | @@ -183,8 +183,9 @@ struct poll_iocb { |
2307 | struct file *file; |
2308 | struct wait_queue_head *head; |
2309 | __poll_t events; |
2310 | - bool done; |
2311 | bool cancelled; |
2312 | + bool work_scheduled; |
2313 | + bool work_need_resched; |
2314 | struct wait_queue_entry wait; |
2315 | struct work_struct work; |
2316 | }; |
2317 | @@ -1626,6 +1627,51 @@ static void aio_poll_put_work(struct work_struct *work) |
2318 | iocb_put(iocb); |
2319 | } |
2320 | |
2321 | +/* |
2322 | + * Safely lock the waitqueue which the request is on, synchronizing with the |
2323 | + * case where the ->poll() provider decides to free its waitqueue early. |
2324 | + * |
2325 | + * Returns true on success, meaning that req->head->lock was locked, req->wait |
2326 | + * is on req->head, and an RCU read lock was taken. Returns false if the |
2327 | + * request was already removed from its waitqueue (which might no longer exist). |
2328 | + */ |
2329 | +static bool poll_iocb_lock_wq(struct poll_iocb *req) |
2330 | +{ |
2331 | + wait_queue_head_t *head; |
2332 | + |
2333 | + /* |
2334 | + * While we hold the waitqueue lock and the waitqueue is nonempty, |
2335 | + * wake_up_pollfree() will wait for us. However, taking the waitqueue |
2336 | + * lock in the first place can race with the waitqueue being freed. |
2337 | + * |
2338 | + * We solve this as eventpoll does: by taking advantage of the fact that |
2339 | + * all users of wake_up_pollfree() will RCU-delay the actual free. If |
2340 | + * we enter rcu_read_lock() and see that the pointer to the queue is |
2341 | + * non-NULL, we can then lock it without the memory being freed out from |
2342 | + * under us, then check whether the request is still on the queue. |
2343 | + * |
2344 | + * Keep holding rcu_read_lock() as long as we hold the queue lock, in |
2345 | + * case the caller deletes the entry from the queue, leaving it empty. |
2346 | + * In that case, only RCU prevents the queue memory from being freed. |
2347 | + */ |
2348 | + rcu_read_lock(); |
2349 | + head = smp_load_acquire(&req->head); |
2350 | + if (head) { |
2351 | + spin_lock(&head->lock); |
2352 | + if (!list_empty(&req->wait.entry)) |
2353 | + return true; |
2354 | + spin_unlock(&head->lock); |
2355 | + } |
2356 | + rcu_read_unlock(); |
2357 | + return false; |
2358 | +} |
2359 | + |
2360 | +static void poll_iocb_unlock_wq(struct poll_iocb *req) |
2361 | +{ |
2362 | + spin_unlock(&req->head->lock); |
2363 | + rcu_read_unlock(); |
2364 | +} |
2365 | + |
2366 | static void aio_poll_complete_work(struct work_struct *work) |
2367 | { |
2368 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); |
2369 | @@ -1645,14 +1691,27 @@ static void aio_poll_complete_work(struct work_struct *work) |
2370 | * avoid further branches in the fast path. |
2371 | */ |
2372 | spin_lock_irq(&ctx->ctx_lock); |
2373 | - if (!mask && !READ_ONCE(req->cancelled)) { |
2374 | - add_wait_queue(req->head, &req->wait); |
2375 | - spin_unlock_irq(&ctx->ctx_lock); |
2376 | - return; |
2377 | - } |
2378 | + if (poll_iocb_lock_wq(req)) { |
2379 | + if (!mask && !READ_ONCE(req->cancelled)) { |
2380 | + /* |
2381 | + * The request isn't actually ready to be completed yet. |
2382 | + * Reschedule completion if another wakeup came in. |
2383 | + */ |
2384 | + if (req->work_need_resched) { |
2385 | + schedule_work(&req->work); |
2386 | + req->work_need_resched = false; |
2387 | + } else { |
2388 | + req->work_scheduled = false; |
2389 | + } |
2390 | + poll_iocb_unlock_wq(req); |
2391 | + spin_unlock_irq(&ctx->ctx_lock); |
2392 | + return; |
2393 | + } |
2394 | + list_del_init(&req->wait.entry); |
2395 | + poll_iocb_unlock_wq(req); |
2396 | + } /* else, POLLFREE has freed the waitqueue, so we must complete */ |
2397 | list_del_init(&iocb->ki_list); |
2398 | iocb->ki_res.res = mangle_poll(mask); |
2399 | - req->done = true; |
2400 | spin_unlock_irq(&ctx->ctx_lock); |
2401 | |
2402 | iocb_put(iocb); |
2403 | @@ -1664,13 +1723,14 @@ static int aio_poll_cancel(struct kiocb *iocb) |
2404 | struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); |
2405 | struct poll_iocb *req = &aiocb->poll; |
2406 | |
2407 | - spin_lock(&req->head->lock); |
2408 | - WRITE_ONCE(req->cancelled, true); |
2409 | - if (!list_empty(&req->wait.entry)) { |
2410 | - list_del_init(&req->wait.entry); |
2411 | - schedule_work(&aiocb->poll.work); |
2412 | - } |
2413 | - spin_unlock(&req->head->lock); |
2414 | + if (poll_iocb_lock_wq(req)) { |
2415 | + WRITE_ONCE(req->cancelled, true); |
2416 | + if (!req->work_scheduled) { |
2417 | + schedule_work(&aiocb->poll.work); |
2418 | + req->work_scheduled = true; |
2419 | + } |
2420 | + poll_iocb_unlock_wq(req); |
2421 | + } /* else, the request was force-cancelled by POLLFREE already */ |
2422 | |
2423 | return 0; |
2424 | } |
2425 | @@ -1687,20 +1747,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
2426 | if (mask && !(mask & req->events)) |
2427 | return 0; |
2428 | |
2429 | - list_del_init(&req->wait.entry); |
2430 | - |
2431 | - if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { |
2432 | + /* |
2433 | + * Complete the request inline if possible. This requires that three |
2434 | + * conditions be met: |
2435 | + * 1. An event mask must have been passed. If a plain wakeup was done |
2436 | + * instead, then mask == 0 and we have to call vfs_poll() to get |
2437 | + * the events, so inline completion isn't possible. |
2438 | + * 2. The completion work must not have already been scheduled. |
2439 | + * 3. ctx_lock must not be busy. We have to use trylock because we |
2440 | + * already hold the waitqueue lock, so this inverts the normal |
2441 | + * locking order. Use irqsave/irqrestore because not all |
2442 | + * filesystems (e.g. fuse) call this function with IRQs disabled, |
2443 | + * yet IRQs have to be disabled before ctx_lock is obtained. |
2444 | + */ |
2445 | + if (mask && !req->work_scheduled && |
2446 | + spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { |
2447 | struct kioctx *ctx = iocb->ki_ctx; |
2448 | |
2449 | - /* |
2450 | - * Try to complete the iocb inline if we can. Use |
2451 | - * irqsave/irqrestore because not all filesystems (e.g. fuse) |
2452 | - * call this function with IRQs disabled and because IRQs |
2453 | - * have to be disabled before ctx_lock is obtained. |
2454 | - */ |
2455 | + list_del_init(&req->wait.entry); |
2456 | list_del(&iocb->ki_list); |
2457 | iocb->ki_res.res = mangle_poll(mask); |
2458 | - req->done = true; |
2459 | if (iocb->ki_eventfd && eventfd_signal_count()) { |
2460 | iocb = NULL; |
2461 | INIT_WORK(&req->work, aio_poll_put_work); |
2462 | @@ -1710,7 +1776,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
2463 | if (iocb) |
2464 | iocb_put(iocb); |
2465 | } else { |
2466 | - schedule_work(&req->work); |
2467 | + /* |
2468 | + * Schedule the completion work if needed. If it was already |
2469 | + * scheduled, record that another wakeup came in. |
2470 | + * |
2471 | + * Don't remove the request from the waitqueue here, as it might |
2472 | + * not actually be complete yet (we won't know until vfs_poll() |
2473 | + * is called), and we must not miss any wakeups. POLLFREE is an |
2474 | + * exception to this; see below. |
2475 | + */ |
2476 | + if (req->work_scheduled) { |
2477 | + req->work_need_resched = true; |
2478 | + } else { |
2479 | + schedule_work(&req->work); |
2480 | + req->work_scheduled = true; |
2481 | + } |
2482 | + |
2483 | + /* |
2484 | + * If the waitqueue is being freed early but we can't complete |
2485 | + * the request inline, we have to tear down the request as best |
2486 | + * we can. That means immediately removing the request from its |
2487 | + * waitqueue and preventing all further accesses to the |
2488 | + * waitqueue via the request. We also need to schedule the |
2489 | + * completion work (done above). Also mark the request as |
2490 | + * cancelled, to potentially skip an unneeded call to ->poll(). |
2491 | + */ |
2492 | + if (mask & POLLFREE) { |
2493 | + WRITE_ONCE(req->cancelled, true); |
2494 | + list_del_init(&req->wait.entry); |
2495 | + |
2496 | + /* |
2497 | + * Careful: this *must* be the last step, since as soon |
2498 | + * as req->head is NULL'ed out, the request can be |
2499 | + * completed and freed, since aio_poll_complete_work() |
2500 | + * will no longer need to take the waitqueue lock. |
2501 | + */ |
2502 | + smp_store_release(&req->head, NULL); |
2503 | + } |
2504 | } |
2505 | return 1; |
2506 | } |
2507 | @@ -1718,6 +1820,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
2508 | struct aio_poll_table { |
2509 | struct poll_table_struct pt; |
2510 | struct aio_kiocb *iocb; |
2511 | + bool queued; |
2512 | int error; |
2513 | }; |
2514 | |
2515 | @@ -1728,11 +1831,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, |
2516 | struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); |
2517 | |
2518 | /* multiple wait queues per file are not supported */ |
2519 | - if (unlikely(pt->iocb->poll.head)) { |
2520 | + if (unlikely(pt->queued)) { |
2521 | pt->error = -EINVAL; |
2522 | return; |
2523 | } |
2524 | |
2525 | + pt->queued = true; |
2526 | pt->error = 0; |
2527 | pt->iocb->poll.head = head; |
2528 | add_wait_queue(head, &pt->iocb->poll.wait); |
2529 | @@ -1757,12 +1861,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
2530 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; |
2531 | |
2532 | req->head = NULL; |
2533 | - req->done = false; |
2534 | req->cancelled = false; |
2535 | + req->work_scheduled = false; |
2536 | + req->work_need_resched = false; |
2537 | |
2538 | apt.pt._qproc = aio_poll_queue_proc; |
2539 | apt.pt._key = req->events; |
2540 | apt.iocb = aiocb; |
2541 | + apt.queued = false; |
2542 | apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ |
2543 | |
2544 | /* initialized the list so that we can do list_empty checks */ |
2545 | @@ -1771,23 +1877,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
2546 | |
2547 | mask = vfs_poll(req->file, &apt.pt) & req->events; |
2548 | spin_lock_irq(&ctx->ctx_lock); |
2549 | - if (likely(req->head)) { |
2550 | - spin_lock(&req->head->lock); |
2551 | - if (unlikely(list_empty(&req->wait.entry))) { |
2552 | - if (apt.error) |
2553 | + if (likely(apt.queued)) { |
2554 | + bool on_queue = poll_iocb_lock_wq(req); |
2555 | + |
2556 | + if (!on_queue || req->work_scheduled) { |
2557 | + /* |
2558 | + * aio_poll_wake() already either scheduled the async |
2559 | + * completion work, or completed the request inline. |
2560 | + */ |
2561 | + if (apt.error) /* unsupported case: multiple queues */ |
2562 | cancel = true; |
2563 | apt.error = 0; |
2564 | mask = 0; |
2565 | } |
2566 | if (mask || apt.error) { |
2567 | + /* Steal to complete synchronously. */ |
2568 | list_del_init(&req->wait.entry); |
2569 | } else if (cancel) { |
2570 | + /* Cancel if possible (may be too late though). */ |
2571 | WRITE_ONCE(req->cancelled, true); |
2572 | - } else if (!req->done) { /* actually waiting for an event */ |
2573 | + } else if (on_queue) { |
2574 | + /* |
2575 | + * Actually waiting for an event, so add the request to |
2576 | + * active_reqs so that it can be cancelled if needed. |
2577 | + */ |
2578 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); |
2579 | aiocb->ki_cancel = aio_poll_cancel; |
2580 | } |
2581 | - spin_unlock(&req->head->lock); |
2582 | + if (on_queue) |
2583 | + poll_iocb_unlock_wq(req); |
2584 | } |
2585 | if (mask) { /* no async, we'd stolen it */ |
2586 | aiocb->ki_res.res = mangle_poll(mask); |
2587 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
2588 | index 9108a73423f70..95ddeb4777970 100644 |
2589 | --- a/fs/btrfs/extent_io.c |
2590 | +++ b/fs/btrfs/extent_io.c |
2591 | @@ -3754,6 +3754,12 @@ static void set_btree_ioerr(struct page *page) |
2592 | if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) |
2593 | return; |
2594 | |
2595 | + /* |
2596 | + * A read may stumble upon this buffer later, make sure that it gets an |
2597 | + * error and knows there was an error. |
2598 | + */ |
2599 | + clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
2600 | + |
2601 | /* |
2602 | * If we error out, we should add back the dirty_metadata_bytes |
2603 | * to make it consistent. |
2604 | diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c |
2605 | index 612411c74550f..0d07ebe511e7f 100644 |
2606 | --- a/fs/btrfs/root-tree.c |
2607 | +++ b/fs/btrfs/root-tree.c |
2608 | @@ -371,7 +371,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, |
2609 | key.offset = ref_id; |
2610 | again: |
2611 | ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); |
2612 | - BUG_ON(ret < 0); |
2613 | + if (ret < 0) |
2614 | + goto out; |
2615 | if (ret == 0) { |
2616 | leaf = path->nodes[0]; |
2617 | ref = btrfs_item_ptr(leaf, path->slots[0], |
2618 | diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c |
2619 | index c35c0ebaf722c..7d408957ed623 100644 |
2620 | --- a/fs/nfsd/nfs4recover.c |
2621 | +++ b/fs/nfsd/nfs4recover.c |
2622 | @@ -2177,6 +2177,7 @@ static struct notifier_block nfsd4_cld_block = { |
2623 | int |
2624 | register_cld_notifier(void) |
2625 | { |
2626 | + WARN_ON(!nfsd_net_id); |
2627 | return rpc_pipefs_notifier_register(&nfsd4_cld_block); |
2628 | } |
2629 | |
2630 | diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c |
2631 | index 730386c130e0f..f6328ae9b2da4 100644 |
2632 | --- a/fs/nfsd/nfsctl.c |
2633 | +++ b/fs/nfsd/nfsctl.c |
2634 | @@ -1526,12 +1526,9 @@ static int __init init_nfsd(void) |
2635 | int retval; |
2636 | printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); |
2637 | |
2638 | - retval = register_cld_notifier(); |
2639 | - if (retval) |
2640 | - return retval; |
2641 | retval = nfsd4_init_slabs(); |
2642 | if (retval) |
2643 | - goto out_unregister_notifier; |
2644 | + return retval; |
2645 | retval = nfsd4_init_pnfs(); |
2646 | if (retval) |
2647 | goto out_free_slabs; |
2648 | @@ -1549,9 +1546,14 @@ static int __init init_nfsd(void) |
2649 | goto out_free_exports; |
2650 | retval = register_pernet_subsys(&nfsd_net_ops); |
2651 | if (retval < 0) |
2652 | + goto out_free_filesystem; |
2653 | + retval = register_cld_notifier(); |
2654 | + if (retval) |
2655 | goto out_free_all; |
2656 | return 0; |
2657 | out_free_all: |
2658 | + unregister_pernet_subsys(&nfsd_net_ops); |
2659 | +out_free_filesystem: |
2660 | unregister_filesystem(&nfsd_fs_type); |
2661 | out_free_exports: |
2662 | remove_proc_entry("fs/nfs/exports", NULL); |
2663 | @@ -1565,13 +1567,12 @@ out_free_stat: |
2664 | nfsd4_exit_pnfs(); |
2665 | out_free_slabs: |
2666 | nfsd4_free_slabs(); |
2667 | -out_unregister_notifier: |
2668 | - unregister_cld_notifier(); |
2669 | return retval; |
2670 | } |
2671 | |
2672 | static void __exit exit_nfsd(void) |
2673 | { |
2674 | + unregister_cld_notifier(); |
2675 | unregister_pernet_subsys(&nfsd_net_ops); |
2676 | nfsd_drc_slab_free(); |
2677 | remove_proc_entry("fs/nfs/exports", NULL); |
2678 | @@ -1582,7 +1583,6 @@ static void __exit exit_nfsd(void) |
2679 | nfsd4_exit_pnfs(); |
2680 | nfsd_fault_inject_cleanup(); |
2681 | unregister_filesystem(&nfsd_fs_type); |
2682 | - unregister_cld_notifier(); |
2683 | } |
2684 | |
2685 | MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); |
2686 | diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c |
2687 | index 3c4811469ae86..e278bfc5ee7ff 100644 |
2688 | --- a/fs/ntfs/dir.c |
2689 | +++ b/fs/ntfs/dir.c |
2690 | @@ -1503,7 +1503,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, |
2691 | na.type = AT_BITMAP; |
2692 | na.name = I30; |
2693 | na.name_len = 4; |
2694 | - bmp_vi = ilookup5(vi->i_sb, vi->i_ino, (test_t)ntfs_test_inode, &na); |
2695 | + bmp_vi = ilookup5(vi->i_sb, vi->i_ino, ntfs_test_inode, &na); |
2696 | if (bmp_vi) { |
2697 | write_inode_now(bmp_vi, !datasync); |
2698 | iput(bmp_vi); |
2699 | diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c |
2700 | index 46dc16e01fe20..ea18e4a2a691d 100644 |
2701 | --- a/fs/ntfs/inode.c |
2702 | +++ b/fs/ntfs/inode.c |
2703 | @@ -30,10 +30,10 @@ |
2704 | /** |
2705 | * ntfs_test_inode - compare two (possibly fake) inodes for equality |
2706 | * @vi: vfs inode which to test |
2707 | - * @na: ntfs attribute which is being tested with |
2708 | + * @data: data which is being tested with |
2709 | * |
2710 | * Compare the ntfs attribute embedded in the ntfs specific part of the vfs |
2711 | - * inode @vi for equality with the ntfs attribute @na. |
2712 | + * inode @vi for equality with the ntfs attribute @data. |
2713 | * |
2714 | * If searching for the normal file/directory inode, set @na->type to AT_UNUSED. |
2715 | * @na->name and @na->name_len are then ignored. |
2716 | @@ -43,8 +43,9 @@ |
2717 | * NOTE: This function runs with the inode_hash_lock spin lock held so it is not |
2718 | * allowed to sleep. |
2719 | */ |
2720 | -int ntfs_test_inode(struct inode *vi, ntfs_attr *na) |
2721 | +int ntfs_test_inode(struct inode *vi, void *data) |
2722 | { |
2723 | + ntfs_attr *na = (ntfs_attr *)data; |
2724 | ntfs_inode *ni; |
2725 | |
2726 | if (vi->i_ino != na->mft_no) |
2727 | @@ -72,9 +73,9 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na) |
2728 | /** |
2729 | * ntfs_init_locked_inode - initialize an inode |
2730 | * @vi: vfs inode to initialize |
2731 | - * @na: ntfs attribute which to initialize @vi to |
2732 | + * @data: data which to initialize @vi to |
2733 | * |
2734 | - * Initialize the vfs inode @vi with the values from the ntfs attribute @na in |
2735 | + * Initialize the vfs inode @vi with the values from the ntfs attribute @data in |
2736 | * order to enable ntfs_test_inode() to do its work. |
2737 | * |
2738 | * If initializing the normal file/directory inode, set @na->type to AT_UNUSED. |
2739 | @@ -87,8 +88,9 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na) |
2740 | * NOTE: This function runs with the inode->i_lock spin lock held so it is not |
2741 | * allowed to sleep. (Hence the GFP_ATOMIC allocation.) |
2742 | */ |
2743 | -static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) |
2744 | +static int ntfs_init_locked_inode(struct inode *vi, void *data) |
2745 | { |
2746 | + ntfs_attr *na = (ntfs_attr *)data; |
2747 | ntfs_inode *ni = NTFS_I(vi); |
2748 | |
2749 | vi->i_ino = na->mft_no; |
2750 | @@ -131,7 +133,6 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) |
2751 | return 0; |
2752 | } |
2753 | |
2754 | -typedef int (*set_t)(struct inode *, void *); |
2755 | static int ntfs_read_locked_inode(struct inode *vi); |
2756 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi); |
2757 | static int ntfs_read_locked_index_inode(struct inode *base_vi, |
2758 | @@ -164,8 +165,8 @@ struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no) |
2759 | na.name = NULL; |
2760 | na.name_len = 0; |
2761 | |
2762 | - vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode, |
2763 | - (set_t)ntfs_init_locked_inode, &na); |
2764 | + vi = iget5_locked(sb, mft_no, ntfs_test_inode, |
2765 | + ntfs_init_locked_inode, &na); |
2766 | if (unlikely(!vi)) |
2767 | return ERR_PTR(-ENOMEM); |
2768 | |
2769 | @@ -225,8 +226,8 @@ struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type, |
2770 | na.name = name; |
2771 | na.name_len = name_len; |
2772 | |
2773 | - vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, |
2774 | - (set_t)ntfs_init_locked_inode, &na); |
2775 | + vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode, |
2776 | + ntfs_init_locked_inode, &na); |
2777 | if (unlikely(!vi)) |
2778 | return ERR_PTR(-ENOMEM); |
2779 | |
2780 | @@ -280,8 +281,8 @@ struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name, |
2781 | na.name = name; |
2782 | na.name_len = name_len; |
2783 | |
2784 | - vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, |
2785 | - (set_t)ntfs_init_locked_inode, &na); |
2786 | + vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode, |
2787 | + ntfs_init_locked_inode, &na); |
2788 | if (unlikely(!vi)) |
2789 | return ERR_PTR(-ENOMEM); |
2790 | |
2791 | diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h |
2792 | index 98e670fbdd31d..363e4e8206738 100644 |
2793 | --- a/fs/ntfs/inode.h |
2794 | +++ b/fs/ntfs/inode.h |
2795 | @@ -253,9 +253,7 @@ typedef struct { |
2796 | ATTR_TYPE type; |
2797 | } ntfs_attr; |
2798 | |
2799 | -typedef int (*test_t)(struct inode *, void *); |
2800 | - |
2801 | -extern int ntfs_test_inode(struct inode *vi, ntfs_attr *na); |
2802 | +extern int ntfs_test_inode(struct inode *vi, void *data); |
2803 | |
2804 | extern struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no); |
2805 | extern struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type, |
2806 | diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c |
2807 | index 3aac5c917afe7..58234b42d68f1 100644 |
2808 | --- a/fs/ntfs/mft.c |
2809 | +++ b/fs/ntfs/mft.c |
2810 | @@ -958,7 +958,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, |
2811 | * dirty code path of the inode dirty code path when writing |
2812 | * $MFT occurs. |
2813 | */ |
2814 | - vi = ilookup5_nowait(sb, mft_no, (test_t)ntfs_test_inode, &na); |
2815 | + vi = ilookup5_nowait(sb, mft_no, ntfs_test_inode, &na); |
2816 | } |
2817 | if (vi) { |
2818 | ntfs_debug("Base inode 0x%lx is in icache.", mft_no); |
2819 | @@ -1019,7 +1019,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, |
2820 | vi = igrab(mft_vi); |
2821 | BUG_ON(vi != mft_vi); |
2822 | } else |
2823 | - vi = ilookup5_nowait(sb, na.mft_no, (test_t)ntfs_test_inode, |
2824 | + vi = ilookup5_nowait(sb, na.mft_no, ntfs_test_inode, |
2825 | &na); |
2826 | if (!vi) { |
2827 | /* |
2828 | diff --git a/fs/signalfd.c b/fs/signalfd.c |
2829 | index 5b78719be4455..3e94d181930fd 100644 |
2830 | --- a/fs/signalfd.c |
2831 | +++ b/fs/signalfd.c |
2832 | @@ -35,17 +35,7 @@ |
2833 | |
2834 | void signalfd_cleanup(struct sighand_struct *sighand) |
2835 | { |
2836 | - wait_queue_head_t *wqh = &sighand->signalfd_wqh; |
2837 | - /* |
2838 | - * The lockless check can race with remove_wait_queue() in progress, |
2839 | - * but in this case its caller should run under rcu_read_lock() and |
2840 | - * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return. |
2841 | - */ |
2842 | - if (likely(!waitqueue_active(wqh))) |
2843 | - return; |
2844 | - |
2845 | - /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ |
2846 | - wake_up_poll(wqh, EPOLLHUP | POLLFREE); |
2847 | + wake_up_pollfree(&sighand->signalfd_wqh); |
2848 | } |
2849 | |
2850 | struct signalfd_ctx { |
2851 | diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c |
2852 | index efe078fe5d4a9..3fdbbc7a9848e 100644 |
2853 | --- a/fs/tracefs/inode.c |
2854 | +++ b/fs/tracefs/inode.c |
2855 | @@ -159,6 +159,77 @@ struct tracefs_fs_info { |
2856 | struct tracefs_mount_opts mount_opts; |
2857 | }; |
2858 | |
2859 | +static void change_gid(struct dentry *dentry, kgid_t gid) |
2860 | +{ |
2861 | + if (!dentry->d_inode) |
2862 | + return; |
2863 | + dentry->d_inode->i_gid = gid; |
2864 | +} |
2865 | + |
2866 | +/* |
2867 | + * Taken from d_walk, but without he need for handling renames. |
2868 | + * Nothing can be renamed while walking the list, as tracefs |
2869 | + * does not support renames. This is only called when mounting |
2870 | + * or remounting the file system, to set all the files to |
2871 | + * the given gid. |
2872 | + */ |
2873 | +static void set_gid(struct dentry *parent, kgid_t gid) |
2874 | +{ |
2875 | + struct dentry *this_parent; |
2876 | + struct list_head *next; |
2877 | + |
2878 | + this_parent = parent; |
2879 | + spin_lock(&this_parent->d_lock); |
2880 | + |
2881 | + change_gid(this_parent, gid); |
2882 | +repeat: |
2883 | + next = this_parent->d_subdirs.next; |
2884 | +resume: |
2885 | + while (next != &this_parent->d_subdirs) { |
2886 | + struct list_head *tmp = next; |
2887 | + struct dentry *dentry = list_entry(tmp, struct dentry, d_child); |
2888 | + next = tmp->next; |
2889 | + |
2890 | + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
2891 | + |
2892 | + change_gid(dentry, gid); |
2893 | + |
2894 | + if (!list_empty(&dentry->d_subdirs)) { |
2895 | + spin_unlock(&this_parent->d_lock); |
2896 | + spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); |
2897 | + this_parent = dentry; |
2898 | + spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); |
2899 | + goto repeat; |
2900 | + } |
2901 | + spin_unlock(&dentry->d_lock); |
2902 | + } |
2903 | + /* |
2904 | + * All done at this level ... ascend and resume the search. |
2905 | + */ |
2906 | + rcu_read_lock(); |
2907 | +ascend: |
2908 | + if (this_parent != parent) { |
2909 | + struct dentry *child = this_parent; |
2910 | + this_parent = child->d_parent; |
2911 | + |
2912 | + spin_unlock(&child->d_lock); |
2913 | + spin_lock(&this_parent->d_lock); |
2914 | + |
2915 | + /* go into the first sibling still alive */ |
2916 | + do { |
2917 | + next = child->d_child.next; |
2918 | + if (next == &this_parent->d_subdirs) |
2919 | + goto ascend; |
2920 | + child = list_entry(next, struct dentry, d_child); |
2921 | + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); |
2922 | + rcu_read_unlock(); |
2923 | + goto resume; |
2924 | + } |
2925 | + rcu_read_unlock(); |
2926 | + spin_unlock(&this_parent->d_lock); |
2927 | + return; |
2928 | +} |
2929 | + |
2930 | static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) |
2931 | { |
2932 | substring_t args[MAX_OPT_ARGS]; |
2933 | @@ -191,6 +262,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) |
2934 | if (!gid_valid(gid)) |
2935 | return -EINVAL; |
2936 | opts->gid = gid; |
2937 | + set_gid(tracefs_mount->mnt_root, gid); |
2938 | break; |
2939 | case Opt_mode: |
2940 | if (match_octal(&args[0], &option)) |
2941 | @@ -409,6 +481,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode, |
2942 | inode->i_mode = mode; |
2943 | inode->i_fop = fops ? fops : &tracefs_file_operations; |
2944 | inode->i_private = data; |
2945 | + inode->i_uid = d_inode(dentry->d_parent)->i_uid; |
2946 | + inode->i_gid = d_inode(dentry->d_parent)->i_gid; |
2947 | d_instantiate(dentry, inode); |
2948 | fsnotify_create(dentry->d_parent->d_inode, dentry); |
2949 | return end_creating(dentry); |
2950 | @@ -431,6 +505,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, |
2951 | inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; |
2952 | inode->i_op = ops; |
2953 | inode->i_fop = &simple_dir_operations; |
2954 | + inode->i_uid = d_inode(dentry->d_parent)->i_uid; |
2955 | + inode->i_gid = d_inode(dentry->d_parent)->i_gid; |
2956 | |
2957 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
2958 | inc_nlink(inode); |
2959 | diff --git a/include/linux/hid.h b/include/linux/hid.h |
2960 | index 85bedeb9ca9f2..ad46ed41e8836 100644 |
2961 | --- a/include/linux/hid.h |
2962 | +++ b/include/linux/hid.h |
2963 | @@ -831,6 +831,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, |
2964 | return hdev->ll_driver == driver; |
2965 | } |
2966 | |
2967 | +static inline bool hid_is_usb(struct hid_device *hdev) |
2968 | +{ |
2969 | + return hid_is_using_ll_driver(hdev, &usb_hid_driver); |
2970 | +} |
2971 | + |
2972 | #define PM_HINT_FULLON 1<<5 |
2973 | #define PM_HINT_NORMAL 1<<1 |
2974 | |
2975 | diff --git a/include/linux/wait.h b/include/linux/wait.h |
2976 | index 032ae61c22a2b..5903b1d17c924 100644 |
2977 | --- a/include/linux/wait.h |
2978 | +++ b/include/linux/wait.h |
2979 | @@ -204,6 +204,7 @@ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
2980 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); |
2981 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
2982 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
2983 | +void __wake_up_pollfree(struct wait_queue_head *wq_head); |
2984 | |
2985 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
2986 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) |
2987 | @@ -230,6 +231,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
2988 | #define wake_up_interruptible_sync_poll(x, m) \ |
2989 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
2990 | |
2991 | +/** |
2992 | + * wake_up_pollfree - signal that a polled waitqueue is going away |
2993 | + * @wq_head: the wait queue head |
2994 | + * |
2995 | + * In the very rare cases where a ->poll() implementation uses a waitqueue whose |
2996 | + * lifetime is tied to a task rather than to the 'struct file' being polled, |
2997 | + * this function must be called before the waitqueue is freed so that |
2998 | + * non-blocking polls (e.g. epoll) are notified that the queue is going away. |
2999 | + * |
3000 | + * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via |
3001 | + * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. |
3002 | + */ |
3003 | +static inline void wake_up_pollfree(struct wait_queue_head *wq_head) |
3004 | +{ |
3005 | + /* |
3006 | + * For performance reasons, we don't always take the queue lock here. |
3007 | + * Therefore, we might race with someone removing the last entry from |
3008 | + * the queue, and proceed while they still hold the queue lock. |
3009 | + * However, rcu_read_lock() is required to be held in such cases, so we |
3010 | + * can safely proceed with an RCU-delayed free. |
3011 | + */ |
3012 | + if (waitqueue_active(wq_head)) |
3013 | + __wake_up_pollfree(wq_head); |
3014 | +} |
3015 | + |
3016 | #define ___wait_cond_timeout(condition) \ |
3017 | ({ \ |
3018 | bool __cond = (condition); \ |
3019 | diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h |
3020 | index b3504fcd773dc..2d3c482818863 100644 |
3021 | --- a/include/net/bond_alb.h |
3022 | +++ b/include/net/bond_alb.h |
3023 | @@ -126,7 +126,7 @@ struct tlb_slave_info { |
3024 | struct alb_bond_info { |
3025 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ |
3026 | u32 unbalanced_load; |
3027 | - int tx_rebalance_counter; |
3028 | + atomic_t tx_rebalance_counter; |
3029 | int lp_counter; |
3030 | /* -------- rlb parameters -------- */ |
3031 | int rlb_enabled; |
3032 | diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h |
3033 | index 41b509f410bf9..f9c520ce4bf4e 100644 |
3034 | --- a/include/uapi/asm-generic/poll.h |
3035 | +++ b/include/uapi/asm-generic/poll.h |
3036 | @@ -29,7 +29,7 @@ |
3037 | #define POLLRDHUP 0x2000 |
3038 | #endif |
3039 | |
3040 | -#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */ |
3041 | +#define POLLFREE (__force __poll_t)0x4000 |
3042 | |
3043 | #define POLL_BUSY_LOOP (__force __poll_t)0x8000 |
3044 | |
3045 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
3046 | index 9c5fa5c529031..34262d83dce11 100644 |
3047 | --- a/kernel/bpf/verifier.c |
3048 | +++ b/kernel/bpf/verifier.c |
3049 | @@ -5372,7 +5372,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, |
3050 | |
3051 | new_range = dst_reg->off; |
3052 | if (range_right_open) |
3053 | - new_range--; |
3054 | + new_range++; |
3055 | |
3056 | /* Examples for register markings: |
3057 | * |
3058 | diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c |
3059 | index 84bd05117dc22..7d668b31dbc6d 100644 |
3060 | --- a/kernel/sched/wait.c |
3061 | +++ b/kernel/sched/wait.c |
3062 | @@ -206,6 +206,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_e |
3063 | } |
3064 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
3065 | |
3066 | +void __wake_up_pollfree(struct wait_queue_head *wq_head) |
3067 | +{ |
3068 | + __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); |
3069 | + /* POLLFREE must have cleared the queue. */ |
3070 | + WARN_ON_ONCE(waitqueue_active(wq_head)); |
3071 | +} |
3072 | + |
3073 | /* |
3074 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
3075 | * because we need a memory barrier there on SMP, so that any |
3076 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
3077 | index 3f2480e4c5af3..3954ef0cc7705 100644 |
3078 | --- a/mm/backing-dev.c |
3079 | +++ b/mm/backing-dev.c |
3080 | @@ -1013,6 +1013,13 @@ void bdi_unregister(struct backing_dev_info *bdi) |
3081 | wb_shutdown(&bdi->wb); |
3082 | cgwb_bdi_unregister(bdi); |
3083 | |
3084 | + /* |
3085 | + * If this BDI's min ratio has been set, use bdi_set_min_ratio() to |
3086 | + * update the global bdi_min_ratio. |
3087 | + */ |
3088 | + if (bdi->min_ratio) |
3089 | + bdi_set_min_ratio(bdi, 0); |
3090 | + |
3091 | if (bdi->dev) { |
3092 | bdi_debug_unregister(bdi); |
3093 | device_unregister(bdi->dev); |
3094 | diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
3095 | index 02e55041a8813..8b6140e67e7f8 100644 |
3096 | --- a/net/core/neighbour.c |
3097 | +++ b/net/core/neighbour.c |
3098 | @@ -734,11 +734,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, |
3099 | |
3100 | ASSERT_RTNL(); |
3101 | |
3102 | - n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); |
3103 | + n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); |
3104 | if (!n) |
3105 | goto out; |
3106 | |
3107 | - n->protocol = 0; |
3108 | write_pnet(&n->net, net); |
3109 | memcpy(n->key, pkey, key_len); |
3110 | n->dev = dev; |
3111 | diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
3112 | index fdbd56ee1300c..a0016f05c4f86 100644 |
3113 | --- a/net/ipv4/udp.c |
3114 | +++ b/net/ipv4/udp.c |
3115 | @@ -845,7 +845,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, |
3116 | kfree_skb(skb); |
3117 | return -EINVAL; |
3118 | } |
3119 | - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { |
3120 | + if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { |
3121 | kfree_skb(skb); |
3122 | return -EINVAL; |
3123 | } |
3124 | diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c |
3125 | index ab7f124ff5d7e..6954db1fd26e7 100644 |
3126 | --- a/net/ipv6/seg6_iptunnel.c |
3127 | +++ b/net/ipv6/seg6_iptunnel.c |
3128 | @@ -143,6 +143,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
3129 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); |
3130 | |
3131 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); |
3132 | + |
3133 | + /* the control block has been erased, so we have to set the |
3134 | + * iif once again. |
3135 | + * We read the receiving interface index directly from the |
3136 | + * skb->skb_iif as it is done in the IPv4 receiving path (i.e.: |
3137 | + * ip_rcv_core(...)). |
3138 | + */ |
3139 | + IP6CB(skb)->iif = skb->skb_iif; |
3140 | } |
3141 | |
3142 | hdr->nexthdr = NEXTHDR_ROUTING; |
3143 | diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c |
3144 | index 99b06a16b8086..0f61dad7256b8 100644 |
3145 | --- a/net/nfc/netlink.c |
3146 | +++ b/net/nfc/netlink.c |
3147 | @@ -1400,8 +1400,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb) |
3148 | { |
3149 | struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; |
3150 | |
3151 | - nfc_device_iter_exit(iter); |
3152 | - kfree(iter); |
3153 | + if (iter) { |
3154 | + nfc_device_iter_exit(iter); |
3155 | + kfree(iter); |
3156 | + } |
3157 | |
3158 | return 0; |
3159 | } |
3160 | diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c |
3161 | index d55be1db1a8a5..cca3ed9b06294 100644 |
3162 | --- a/sound/core/control_compat.c |
3163 | +++ b/sound/core/control_compat.c |
3164 | @@ -266,6 +266,7 @@ static int copy_ctl_value_to_user(void __user *userdata, |
3165 | struct snd_ctl_elem_value *data, |
3166 | int type, int count) |
3167 | { |
3168 | + struct snd_ctl_elem_value32 __user *data32 = userdata; |
3169 | int i, size; |
3170 | |
3171 | if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || |
3172 | @@ -282,6 +283,8 @@ static int copy_ctl_value_to_user(void __user *userdata, |
3173 | if (copy_to_user(valuep, data->value.bytes.data, size)) |
3174 | return -EFAULT; |
3175 | } |
3176 | + if (copy_to_user(&data32->id, &data->id, sizeof(data32->id))) |
3177 | + return -EFAULT; |
3178 | return 0; |
3179 | } |
3180 | |
3181 | diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c |
3182 | index 0b03777d01116..9e31f4bd43826 100644 |
3183 | --- a/sound/core/oss/pcm_oss.c |
3184 | +++ b/sound/core/oss/pcm_oss.c |
3185 | @@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params, |
3186 | * |
3187 | * Return the maximum value for field PAR. |
3188 | */ |
3189 | -static unsigned int |
3190 | +static int |
3191 | snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params, |
3192 | snd_pcm_hw_param_t var, int *dir) |
3193 | { |
3194 | @@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, |
3195 | struct snd_pcm_hw_params *oss_params, |
3196 | struct snd_pcm_hw_params *slave_params) |
3197 | { |
3198 | - size_t s; |
3199 | - size_t oss_buffer_size, oss_period_size, oss_periods; |
3200 | - size_t min_period_size, max_period_size; |
3201 | + ssize_t s; |
3202 | + ssize_t oss_buffer_size; |
3203 | + ssize_t oss_period_size, oss_periods; |
3204 | + ssize_t min_period_size, max_period_size; |
3205 | struct snd_pcm_runtime *runtime = substream->runtime; |
3206 | size_t oss_frame_size; |
3207 | |
3208 | oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) * |
3209 | params_channels(oss_params) / 8; |
3210 | |
3211 | + oss_buffer_size = snd_pcm_hw_param_value_max(slave_params, |
3212 | + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
3213 | + NULL); |
3214 | + if (oss_buffer_size <= 0) |
3215 | + return -EINVAL; |
3216 | oss_buffer_size = snd_pcm_plug_client_size(substream, |
3217 | - snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; |
3218 | - if (!oss_buffer_size) |
3219 | + oss_buffer_size * oss_frame_size); |
3220 | + if (oss_buffer_size <= 0) |
3221 | return -EINVAL; |
3222 | oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); |
3223 | if (atomic_read(&substream->mmap_count)) { |
3224 | @@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, |
3225 | |
3226 | min_period_size = snd_pcm_plug_client_size(substream, |
3227 | snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); |
3228 | - if (min_period_size) { |
3229 | + if (min_period_size > 0) { |
3230 | min_period_size *= oss_frame_size; |
3231 | min_period_size = roundup_pow_of_two(min_period_size); |
3232 | if (oss_period_size < min_period_size) |
3233 | @@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, |
3234 | |
3235 | max_period_size = snd_pcm_plug_client_size(substream, |
3236 | snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); |
3237 | - if (max_period_size) { |
3238 | + if (max_period_size > 0) { |
3239 | max_period_size *= oss_frame_size; |
3240 | max_period_size = rounddown_pow_of_two(max_period_size); |
3241 | if (oss_period_size > max_period_size) |
3242 | @@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, |
3243 | oss_periods = substream->oss.setup.periods; |
3244 | |
3245 | s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); |
3246 | - if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) |
3247 | + if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags) |
3248 | s = runtime->oss.maxfrags; |
3249 | if (oss_periods > s) |
3250 | oss_periods = s; |
3251 | @@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) |
3252 | err = -EINVAL; |
3253 | goto failure; |
3254 | } |
3255 | - choose_rate(substream, sparams, runtime->oss.rate); |
3256 | - snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL); |
3257 | + |
3258 | + err = choose_rate(substream, sparams, runtime->oss.rate); |
3259 | + if (err < 0) |
3260 | + goto failure; |
3261 | + err = snd_pcm_hw_param_near(substream, sparams, |
3262 | + SNDRV_PCM_HW_PARAM_CHANNELS, |
3263 | + runtime->oss.channels, NULL); |
3264 | + if (err < 0) |
3265 | + goto failure; |
3266 | |
3267 | format = snd_pcm_oss_format_from(runtime->oss.format); |
3268 | |
3269 | @@ -1946,7 +1959,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign |
3270 | if (runtime->oss.subdivision || runtime->oss.fragshift) |
3271 | return -EINVAL; |
3272 | fragshift = val & 0xffff; |
3273 | - if (fragshift >= 31) |
3274 | + if (fragshift >= 25) /* should be large enough */ |
3275 | return -EINVAL; |
3276 | runtime->oss.fragshift = fragshift; |
3277 | runtime->oss.maxfrags = (val >> 16) & 0xffff; |
3278 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
3279 | index 94fc17b28e9c7..9514db7c2a86d 100644 |
3280 | --- a/sound/pci/hda/patch_realtek.c |
3281 | +++ b/sound/pci/hda/patch_realtek.c |
3282 | @@ -9620,6 +9620,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec, |
3283 | } |
3284 | } |
3285 | |
3286 | +static void alc897_hp_automute_hook(struct hda_codec *codec, |
3287 | + struct hda_jack_callback *jack) |
3288 | +{ |
3289 | + struct alc_spec *spec = codec->spec; |
3290 | + int vref; |
3291 | + |
3292 | + snd_hda_gen_hp_automute(codec, jack); |
3293 | + vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP; |
3294 | + snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, |
3295 | + vref); |
3296 | +} |
3297 | + |
3298 | +static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, |
3299 | + const struct hda_fixup *fix, int action) |
3300 | +{ |
3301 | + struct alc_spec *spec = codec->spec; |
3302 | + if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
3303 | + spec->gen.hp_automute_hook = alc897_hp_automute_hook; |
3304 | + } |
3305 | +} |
3306 | + |
3307 | static const struct coef_fw alc668_coefs[] = { |
3308 | WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), |
3309 | WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), |
3310 | @@ -9700,6 +9721,8 @@ enum { |
3311 | ALC668_FIXUP_ASUS_NO_HEADSET_MIC, |
3312 | ALC668_FIXUP_HEADSET_MIC, |
3313 | ALC668_FIXUP_MIC_DET_COEF, |
3314 | + ALC897_FIXUP_LENOVO_HEADSET_MIC, |
3315 | + ALC897_FIXUP_HEADSET_MIC_PIN, |
3316 | }; |
3317 | |
3318 | static const struct hda_fixup alc662_fixups[] = { |
3319 | @@ -10106,6 +10129,19 @@ static const struct hda_fixup alc662_fixups[] = { |
3320 | {} |
3321 | }, |
3322 | }, |
3323 | + [ALC897_FIXUP_LENOVO_HEADSET_MIC] = { |
3324 | + .type = HDA_FIXUP_FUNC, |
3325 | + .v.func = alc897_fixup_lenovo_headset_mic, |
3326 | + }, |
3327 | + [ALC897_FIXUP_HEADSET_MIC_PIN] = { |
3328 | + .type = HDA_FIXUP_PINS, |
3329 | + .v.pins = (const struct hda_pintbl[]) { |
3330 | + { 0x1a, 0x03a11050 }, |
3331 | + { } |
3332 | + }, |
3333 | + .chained = true, |
3334 | + .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC |
3335 | + }, |
3336 | }; |
3337 | |
3338 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
3339 | @@ -10150,6 +10186,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
3340 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), |
3341 | SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), |
3342 | SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), |
3343 | + SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN), |
3344 | + SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), |
3345 | + SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), |
3346 | + SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), |
3347 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
3348 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
3349 | SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), |
3350 | diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c |
3351 | index 16f26dd2d59ed..bc65009be875c 100644 |
3352 | --- a/sound/soc/qcom/qdsp6/q6routing.c |
3353 | +++ b/sound/soc/qcom/qdsp6/q6routing.c |
3354 | @@ -440,14 +440,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol, |
3355 | struct session_data *session = &data->sessions[session_id]; |
3356 | |
3357 | if (ucontrol->value.integer.value[0]) { |
3358 | + if (session->port_id == be_id) |
3359 | + return 0; |
3360 | + |
3361 | session->port_id = be_id; |
3362 | snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update); |
3363 | } else { |
3364 | - if (session->port_id == be_id) { |
3365 | - session->port_id = -1; |
3366 | + if (session->port_id == -1 || session->port_id != be_id) |
3367 | return 0; |
3368 | - } |
3369 | |
3370 | + session->port_id = -1; |
3371 | snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update); |
3372 | } |
3373 | |
3374 | diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature |
3375 | index 8c6e1ea67f213..1ea26bb8c5791 100644 |
3376 | --- a/tools/build/Makefile.feature |
3377 | +++ b/tools/build/Makefile.feature |
3378 | @@ -52,7 +52,6 @@ FEATURE_TESTS_BASIC := \ |
3379 | numa_num_possible_cpus \ |
3380 | libperl \ |
3381 | libpython \ |
3382 | - libpython-version \ |
3383 | libslang \ |
3384 | libslang-include-subdir \ |
3385 | libcrypto \ |
3386 | diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile |
3387 | index 054e09ab4a9e4..2a261b909dd8d 100644 |
3388 | --- a/tools/build/feature/Makefile |
3389 | +++ b/tools/build/feature/Makefile |
3390 | @@ -30,7 +30,6 @@ FILES= \ |
3391 | test-numa_num_possible_cpus.bin \ |
3392 | test-libperl.bin \ |
3393 | test-libpython.bin \ |
3394 | - test-libpython-version.bin \ |
3395 | test-libslang.bin \ |
3396 | test-libslang-include-subdir.bin \ |
3397 | test-libcrypto.bin \ |
3398 | @@ -214,9 +213,6 @@ $(OUTPUT)test-libperl.bin: |
3399 | $(OUTPUT)test-libpython.bin: |
3400 | $(BUILD) $(FLAGS_PYTHON_EMBED) |
3401 | |
3402 | -$(OUTPUT)test-libpython-version.bin: |
3403 | - $(BUILD) |
3404 | - |
3405 | $(OUTPUT)test-libbfd.bin: |
3406 | $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl |
3407 | |
3408 | diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c |
3409 | index 88145e8cde1a6..6eaeaf2da36ea 100644 |
3410 | --- a/tools/build/feature/test-all.c |
3411 | +++ b/tools/build/feature/test-all.c |
3412 | @@ -14,10 +14,6 @@ |
3413 | # include "test-libpython.c" |
3414 | #undef main |
3415 | |
3416 | -#define main main_test_libpython_version |
3417 | -# include "test-libpython-version.c" |
3418 | -#undef main |
3419 | - |
3420 | #define main main_test_libperl |
3421 | # include "test-libperl.c" |
3422 | #undef main |
3423 | @@ -193,7 +189,6 @@ |
3424 | int main(int argc, char *argv[]) |
3425 | { |
3426 | main_test_libpython(); |
3427 | - main_test_libpython_version(); |
3428 | main_test_libperl(); |
3429 | main_test_hello(); |
3430 | main_test_libelf(); |
3431 | diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c |
3432 | deleted file mode 100644 |
3433 | index 47714b942d4d3..0000000000000 |
3434 | --- a/tools/build/feature/test-libpython-version.c |
3435 | +++ /dev/null |
3436 | @@ -1,11 +0,0 @@ |
3437 | -// SPDX-License-Identifier: GPL-2.0 |
3438 | -#include <Python.h> |
3439 | - |
3440 | -#if PY_VERSION_HEX >= 0x03000000 |
3441 | - #error |
3442 | -#endif |
3443 | - |
3444 | -int main(void) |
3445 | -{ |
3446 | - return 0; |
3447 | -} |
3448 | diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config |
3449 | index c75c9b03d6e77..7578af2504549 100644 |
3450 | --- a/tools/perf/Makefile.config |
3451 | +++ b/tools/perf/Makefile.config |
3452 | @@ -247,8 +247,6 @@ endif |
3453 | |
3454 | FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) |
3455 | FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) |
3456 | -FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) |
3457 | -FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) |
3458 | |
3459 | FEATURE_CHECK_LDFLAGS-libaio = -lrt |
3460 | |
3461 | diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c |
3462 | index bfb97383e6b5a..b4ec228eb95d0 100644 |
3463 | --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c |
3464 | +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c |
3465 | @@ -35,7 +35,7 @@ |
3466 | .prog_type = BPF_PROG_TYPE_XDP, |
3467 | }, |
3468 | { |
3469 | - "XDP pkt read, pkt_data' > pkt_end, good access", |
3470 | + "XDP pkt read, pkt_data' > pkt_end, corner case, good access", |
3471 | .insns = { |
3472 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3473 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3474 | @@ -87,6 +87,41 @@ |
3475 | .prog_type = BPF_PROG_TYPE_XDP, |
3476 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3477 | }, |
3478 | +{ |
3479 | + "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", |
3480 | + .insns = { |
3481 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3482 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3483 | + offsetof(struct xdp_md, data_end)), |
3484 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3485 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
3486 | + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), |
3487 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
3488 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3489 | + BPF_EXIT_INSN(), |
3490 | + }, |
3491 | + .result = ACCEPT, |
3492 | + .prog_type = BPF_PROG_TYPE_XDP, |
3493 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3494 | +}, |
3495 | +{ |
3496 | + "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", |
3497 | + .insns = { |
3498 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3499 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3500 | + offsetof(struct xdp_md, data_end)), |
3501 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3502 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3503 | + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), |
3504 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3505 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3506 | + BPF_EXIT_INSN(), |
3507 | + }, |
3508 | + .errstr = "R1 offset is outside of the packet", |
3509 | + .result = REJECT, |
3510 | + .prog_type = BPF_PROG_TYPE_XDP, |
3511 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3512 | +}, |
3513 | { |
3514 | "XDP pkt read, pkt_end > pkt_data', good access", |
3515 | .insns = { |
3516 | @@ -106,16 +141,16 @@ |
3517 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3518 | }, |
3519 | { |
3520 | - "XDP pkt read, pkt_end > pkt_data', bad access 1", |
3521 | + "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", |
3522 | .insns = { |
3523 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3524 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3525 | offsetof(struct xdp_md, data_end)), |
3526 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3527 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3528 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
3529 | BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3530 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3531 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3532 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
3533 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3534 | BPF_EXIT_INSN(), |
3535 | }, |
3536 | @@ -142,6 +177,42 @@ |
3537 | .prog_type = BPF_PROG_TYPE_XDP, |
3538 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3539 | }, |
3540 | +{ |
3541 | + "XDP pkt read, pkt_end > pkt_data', corner case, good access", |
3542 | + .insns = { |
3543 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3544 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3545 | + offsetof(struct xdp_md, data_end)), |
3546 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3547 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3548 | + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3549 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3550 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3551 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3552 | + BPF_EXIT_INSN(), |
3553 | + }, |
3554 | + .result = ACCEPT, |
3555 | + .prog_type = BPF_PROG_TYPE_XDP, |
3556 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3557 | +}, |
3558 | +{ |
3559 | + "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", |
3560 | + .insns = { |
3561 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3562 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3563 | + offsetof(struct xdp_md, data_end)), |
3564 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3565 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3566 | + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3567 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3568 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3569 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3570 | + BPF_EXIT_INSN(), |
3571 | + }, |
3572 | + .result = ACCEPT, |
3573 | + .prog_type = BPF_PROG_TYPE_XDP, |
3574 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3575 | +}, |
3576 | { |
3577 | "XDP pkt read, pkt_data' < pkt_end, good access", |
3578 | .insns = { |
3579 | @@ -161,16 +232,16 @@ |
3580 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3581 | }, |
3582 | { |
3583 | - "XDP pkt read, pkt_data' < pkt_end, bad access 1", |
3584 | + "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", |
3585 | .insns = { |
3586 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3587 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3588 | offsetof(struct xdp_md, data_end)), |
3589 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3590 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3591 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
3592 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
3593 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3594 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3595 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
3596 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3597 | BPF_EXIT_INSN(), |
3598 | }, |
3599 | @@ -198,7 +269,43 @@ |
3600 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3601 | }, |
3602 | { |
3603 | - "XDP pkt read, pkt_end < pkt_data', good access", |
3604 | + "XDP pkt read, pkt_data' < pkt_end, corner case, good access", |
3605 | + .insns = { |
3606 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3607 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3608 | + offsetof(struct xdp_md, data_end)), |
3609 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3610 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3611 | + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
3612 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3613 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3614 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3615 | + BPF_EXIT_INSN(), |
3616 | + }, |
3617 | + .result = ACCEPT, |
3618 | + .prog_type = BPF_PROG_TYPE_XDP, |
3619 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3620 | +}, |
3621 | +{ |
3622 | + "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", |
3623 | + .insns = { |
3624 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3625 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3626 | + offsetof(struct xdp_md, data_end)), |
3627 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3628 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3629 | + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
3630 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3631 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3632 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3633 | + BPF_EXIT_INSN(), |
3634 | + }, |
3635 | + .result = ACCEPT, |
3636 | + .prog_type = BPF_PROG_TYPE_XDP, |
3637 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3638 | +}, |
3639 | +{ |
3640 | + "XDP pkt read, pkt_end < pkt_data', corner case, good access", |
3641 | .insns = { |
3642 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3643 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3644 | @@ -250,6 +357,41 @@ |
3645 | .prog_type = BPF_PROG_TYPE_XDP, |
3646 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3647 | }, |
3648 | +{ |
3649 | + "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", |
3650 | + .insns = { |
3651 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3652 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3653 | + offsetof(struct xdp_md, data_end)), |
3654 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3655 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
3656 | + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), |
3657 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
3658 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3659 | + BPF_EXIT_INSN(), |
3660 | + }, |
3661 | + .result = ACCEPT, |
3662 | + .prog_type = BPF_PROG_TYPE_XDP, |
3663 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3664 | +}, |
3665 | +{ |
3666 | + "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", |
3667 | + .insns = { |
3668 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3669 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3670 | + offsetof(struct xdp_md, data_end)), |
3671 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3672 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3673 | + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), |
3674 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3675 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3676 | + BPF_EXIT_INSN(), |
3677 | + }, |
3678 | + .errstr = "R1 offset is outside of the packet", |
3679 | + .result = REJECT, |
3680 | + .prog_type = BPF_PROG_TYPE_XDP, |
3681 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3682 | +}, |
3683 | { |
3684 | "XDP pkt read, pkt_data' >= pkt_end, good access", |
3685 | .insns = { |
3686 | @@ -268,15 +410,15 @@ |
3687 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3688 | }, |
3689 | { |
3690 | - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", |
3691 | + "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", |
3692 | .insns = { |
3693 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3694 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3695 | offsetof(struct xdp_md, data_end)), |
3696 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3697 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3698 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
3699 | BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
3700 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3701 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
3702 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3703 | BPF_EXIT_INSN(), |
3704 | }, |
3705 | @@ -304,7 +446,41 @@ |
3706 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3707 | }, |
3708 | { |
3709 | - "XDP pkt read, pkt_end >= pkt_data', good access", |
3710 | + "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", |
3711 | + .insns = { |
3712 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3713 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3714 | + offsetof(struct xdp_md, data_end)), |
3715 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3716 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3717 | + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
3718 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3719 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3720 | + BPF_EXIT_INSN(), |
3721 | + }, |
3722 | + .result = ACCEPT, |
3723 | + .prog_type = BPF_PROG_TYPE_XDP, |
3724 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3725 | +}, |
3726 | +{ |
3727 | + "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", |
3728 | + .insns = { |
3729 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3730 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3731 | + offsetof(struct xdp_md, data_end)), |
3732 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3733 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3734 | + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
3735 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3736 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3737 | + BPF_EXIT_INSN(), |
3738 | + }, |
3739 | + .result = ACCEPT, |
3740 | + .prog_type = BPF_PROG_TYPE_XDP, |
3741 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3742 | +}, |
3743 | +{ |
3744 | + "XDP pkt read, pkt_end >= pkt_data', corner case, good access", |
3745 | .insns = { |
3746 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3747 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3748 | @@ -359,7 +535,44 @@ |
3749 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3750 | }, |
3751 | { |
3752 | - "XDP pkt read, pkt_data' <= pkt_end, good access", |
3753 | + "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", |
3754 | + .insns = { |
3755 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3756 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3757 | + offsetof(struct xdp_md, data_end)), |
3758 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3759 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
3760 | + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), |
3761 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3762 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
3763 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3764 | + BPF_EXIT_INSN(), |
3765 | + }, |
3766 | + .result = ACCEPT, |
3767 | + .prog_type = BPF_PROG_TYPE_XDP, |
3768 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3769 | +}, |
3770 | +{ |
3771 | + "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", |
3772 | + .insns = { |
3773 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3774 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3775 | + offsetof(struct xdp_md, data_end)), |
3776 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3777 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3778 | + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), |
3779 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3780 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3781 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3782 | + BPF_EXIT_INSN(), |
3783 | + }, |
3784 | + .errstr = "R1 offset is outside of the packet", |
3785 | + .result = REJECT, |
3786 | + .prog_type = BPF_PROG_TYPE_XDP, |
3787 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3788 | +}, |
3789 | +{ |
3790 | + "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", |
3791 | .insns = { |
3792 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3793 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3794 | @@ -413,6 +626,43 @@ |
3795 | .prog_type = BPF_PROG_TYPE_XDP, |
3796 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3797 | }, |
3798 | +{ |
3799 | + "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", |
3800 | + .insns = { |
3801 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3802 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3803 | + offsetof(struct xdp_md, data_end)), |
3804 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3805 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
3806 | + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), |
3807 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3808 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
3809 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3810 | + BPF_EXIT_INSN(), |
3811 | + }, |
3812 | + .result = ACCEPT, |
3813 | + .prog_type = BPF_PROG_TYPE_XDP, |
3814 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3815 | +}, |
3816 | +{ |
3817 | + "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", |
3818 | + .insns = { |
3819 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3820 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3821 | + offsetof(struct xdp_md, data_end)), |
3822 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3823 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3824 | + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), |
3825 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3826 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3827 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3828 | + BPF_EXIT_INSN(), |
3829 | + }, |
3830 | + .errstr = "R1 offset is outside of the packet", |
3831 | + .result = REJECT, |
3832 | + .prog_type = BPF_PROG_TYPE_XDP, |
3833 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3834 | +}, |
3835 | { |
3836 | "XDP pkt read, pkt_end <= pkt_data', good access", |
3837 | .insns = { |
3838 | @@ -431,15 +681,15 @@ |
3839 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3840 | }, |
3841 | { |
3842 | - "XDP pkt read, pkt_end <= pkt_data', bad access 1", |
3843 | + "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", |
3844 | .insns = { |
3845 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3846 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3847 | offsetof(struct xdp_md, data_end)), |
3848 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3849 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3850 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
3851 | BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
3852 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3853 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
3854 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3855 | BPF_EXIT_INSN(), |
3856 | }, |
3857 | @@ -467,7 +717,41 @@ |
3858 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3859 | }, |
3860 | { |
3861 | - "XDP pkt read, pkt_meta' > pkt_data, good access", |
3862 | + "XDP pkt read, pkt_end <= pkt_data', corner case, good access", |
3863 | + .insns = { |
3864 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3865 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3866 | + offsetof(struct xdp_md, data_end)), |
3867 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3868 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3869 | + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
3870 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3871 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3872 | + BPF_EXIT_INSN(), |
3873 | + }, |
3874 | + .result = ACCEPT, |
3875 | + .prog_type = BPF_PROG_TYPE_XDP, |
3876 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3877 | +}, |
3878 | +{ |
3879 | + "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", |
3880 | + .insns = { |
3881 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), |
3882 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, |
3883 | + offsetof(struct xdp_md, data_end)), |
3884 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3885 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3886 | + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
3887 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3888 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3889 | + BPF_EXIT_INSN(), |
3890 | + }, |
3891 | + .result = ACCEPT, |
3892 | + .prog_type = BPF_PROG_TYPE_XDP, |
3893 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3894 | +}, |
3895 | +{ |
3896 | + "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", |
3897 | .insns = { |
3898 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3899 | offsetof(struct xdp_md, data_meta)), |
3900 | @@ -519,6 +803,41 @@ |
3901 | .prog_type = BPF_PROG_TYPE_XDP, |
3902 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3903 | }, |
3904 | +{ |
3905 | + "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", |
3906 | + .insns = { |
3907 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3908 | + offsetof(struct xdp_md, data_meta)), |
3909 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
3910 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3911 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
3912 | + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), |
3913 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
3914 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3915 | + BPF_EXIT_INSN(), |
3916 | + }, |
3917 | + .result = ACCEPT, |
3918 | + .prog_type = BPF_PROG_TYPE_XDP, |
3919 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3920 | +}, |
3921 | +{ |
3922 | + "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", |
3923 | + .insns = { |
3924 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3925 | + offsetof(struct xdp_md, data_meta)), |
3926 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
3927 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3928 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3929 | + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), |
3930 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3931 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3932 | + BPF_EXIT_INSN(), |
3933 | + }, |
3934 | + .errstr = "R1 offset is outside of the packet", |
3935 | + .result = REJECT, |
3936 | + .prog_type = BPF_PROG_TYPE_XDP, |
3937 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3938 | +}, |
3939 | { |
3940 | "XDP pkt read, pkt_data > pkt_meta', good access", |
3941 | .insns = { |
3942 | @@ -538,16 +857,16 @@ |
3943 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3944 | }, |
3945 | { |
3946 | - "XDP pkt read, pkt_data > pkt_meta', bad access 1", |
3947 | + "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", |
3948 | .insns = { |
3949 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3950 | offsetof(struct xdp_md, data_meta)), |
3951 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
3952 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3953 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3954 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
3955 | BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3956 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3957 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3958 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
3959 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3960 | BPF_EXIT_INSN(), |
3961 | }, |
3962 | @@ -574,6 +893,42 @@ |
3963 | .prog_type = BPF_PROG_TYPE_XDP, |
3964 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3965 | }, |
3966 | +{ |
3967 | + "XDP pkt read, pkt_data > pkt_meta', corner case, good access", |
3968 | + .insns = { |
3969 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3970 | + offsetof(struct xdp_md, data_meta)), |
3971 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
3972 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3973 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
3974 | + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3975 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3976 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
3977 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3978 | + BPF_EXIT_INSN(), |
3979 | + }, |
3980 | + .result = ACCEPT, |
3981 | + .prog_type = BPF_PROG_TYPE_XDP, |
3982 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3983 | +}, |
3984 | +{ |
3985 | + "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", |
3986 | + .insns = { |
3987 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
3988 | + offsetof(struct xdp_md, data_meta)), |
3989 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
3990 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
3991 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
3992 | + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), |
3993 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
3994 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
3995 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3996 | + BPF_EXIT_INSN(), |
3997 | + }, |
3998 | + .result = ACCEPT, |
3999 | + .prog_type = BPF_PROG_TYPE_XDP, |
4000 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4001 | +}, |
4002 | { |
4003 | "XDP pkt read, pkt_meta' < pkt_data, good access", |
4004 | .insns = { |
4005 | @@ -593,16 +948,16 @@ |
4006 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4007 | }, |
4008 | { |
4009 | - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", |
4010 | + "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", |
4011 | .insns = { |
4012 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4013 | offsetof(struct xdp_md, data_meta)), |
4014 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4015 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4016 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4017 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
4018 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
4019 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4020 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4021 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
4022 | BPF_MOV64_IMM(BPF_REG_0, 0), |
4023 | BPF_EXIT_INSN(), |
4024 | }, |
4025 | @@ -630,7 +985,43 @@ |
4026 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4027 | }, |
4028 | { |
4029 | - "XDP pkt read, pkt_data < pkt_meta', good access", |
4030 | + "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", |
4031 | + .insns = { |
4032 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4033 | + offsetof(struct xdp_md, data_meta)), |
4034 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4035 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4036 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4037 | + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
4038 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4039 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4040 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4041 | + BPF_EXIT_INSN(), |
4042 | + }, |
4043 | + .result = ACCEPT, |
4044 | + .prog_type = BPF_PROG_TYPE_XDP, |
4045 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4046 | +}, |
4047 | +{ |
4048 | + "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", |
4049 | + .insns = { |
4050 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4051 | + offsetof(struct xdp_md, data_meta)), |
4052 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4053 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4054 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4055 | + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), |
4056 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4057 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4058 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4059 | + BPF_EXIT_INSN(), |
4060 | + }, |
4061 | + .result = ACCEPT, |
4062 | + .prog_type = BPF_PROG_TYPE_XDP, |
4063 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4064 | +}, |
4065 | +{ |
4066 | + "XDP pkt read, pkt_data < pkt_meta', corner case, good access", |
4067 | .insns = { |
4068 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4069 | offsetof(struct xdp_md, data_meta)), |
4070 | @@ -682,6 +1073,41 @@ |
4071 | .prog_type = BPF_PROG_TYPE_XDP, |
4072 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4073 | }, |
4074 | +{ |
4075 | + "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", |
4076 | + .insns = { |
4077 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4078 | + offsetof(struct xdp_md, data_meta)), |
4079 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4080 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4081 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
4082 | + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), |
4083 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
4084 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4085 | + BPF_EXIT_INSN(), |
4086 | + }, |
4087 | + .result = ACCEPT, |
4088 | + .prog_type = BPF_PROG_TYPE_XDP, |
4089 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4090 | +}, |
4091 | +{ |
4092 | + "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", |
4093 | + .insns = { |
4094 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4095 | + offsetof(struct xdp_md, data_meta)), |
4096 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4097 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4098 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4099 | + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), |
4100 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4101 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4102 | + BPF_EXIT_INSN(), |
4103 | + }, |
4104 | + .errstr = "R1 offset is outside of the packet", |
4105 | + .result = REJECT, |
4106 | + .prog_type = BPF_PROG_TYPE_XDP, |
4107 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4108 | +}, |
4109 | { |
4110 | "XDP pkt read, pkt_meta' >= pkt_data, good access", |
4111 | .insns = { |
4112 | @@ -700,15 +1126,15 @@ |
4113 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4114 | }, |
4115 | { |
4116 | - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", |
4117 | + "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", |
4118 | .insns = { |
4119 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4120 | offsetof(struct xdp_md, data_meta)), |
4121 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4122 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4123 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4124 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
4125 | BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
4126 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4127 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
4128 | BPF_MOV64_IMM(BPF_REG_0, 0), |
4129 | BPF_EXIT_INSN(), |
4130 | }, |
4131 | @@ -736,7 +1162,41 @@ |
4132 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4133 | }, |
4134 | { |
4135 | - "XDP pkt read, pkt_data >= pkt_meta', good access", |
4136 | + "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", |
4137 | + .insns = { |
4138 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4139 | + offsetof(struct xdp_md, data_meta)), |
4140 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4141 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4142 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4143 | + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
4144 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4145 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4146 | + BPF_EXIT_INSN(), |
4147 | + }, |
4148 | + .result = ACCEPT, |
4149 | + .prog_type = BPF_PROG_TYPE_XDP, |
4150 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4151 | +}, |
4152 | +{ |
4153 | + "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", |
4154 | + .insns = { |
4155 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4156 | + offsetof(struct xdp_md, data_meta)), |
4157 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4158 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4159 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4160 | + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), |
4161 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4162 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4163 | + BPF_EXIT_INSN(), |
4164 | + }, |
4165 | + .result = ACCEPT, |
4166 | + .prog_type = BPF_PROG_TYPE_XDP, |
4167 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4168 | +}, |
4169 | +{ |
4170 | + "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", |
4171 | .insns = { |
4172 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4173 | offsetof(struct xdp_md, data_meta)), |
4174 | @@ -791,7 +1251,44 @@ |
4175 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4176 | }, |
4177 | { |
4178 | - "XDP pkt read, pkt_meta' <= pkt_data, good access", |
4179 | + "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", |
4180 | + .insns = { |
4181 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4182 | + offsetof(struct xdp_md, data_meta)), |
4183 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4184 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4185 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
4186 | + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), |
4187 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4188 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
4189 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4190 | + BPF_EXIT_INSN(), |
4191 | + }, |
4192 | + .result = ACCEPT, |
4193 | + .prog_type = BPF_PROG_TYPE_XDP, |
4194 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4195 | +}, |
4196 | +{ |
4197 | + "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", |
4198 | + .insns = { |
4199 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4200 | + offsetof(struct xdp_md, data_meta)), |
4201 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4202 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4203 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4204 | + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), |
4205 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4206 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4207 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4208 | + BPF_EXIT_INSN(), |
4209 | + }, |
4210 | + .errstr = "R1 offset is outside of the packet", |
4211 | + .result = REJECT, |
4212 | + .prog_type = BPF_PROG_TYPE_XDP, |
4213 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4214 | +}, |
4215 | +{ |
4216 | + "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", |
4217 | .insns = { |
4218 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4219 | offsetof(struct xdp_md, data_meta)), |
4220 | @@ -845,6 +1342,43 @@ |
4221 | .prog_type = BPF_PROG_TYPE_XDP, |
4222 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4223 | }, |
4224 | +{ |
4225 | + "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", |
4226 | + .insns = { |
4227 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4228 | + offsetof(struct xdp_md, data_meta)), |
4229 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4230 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4231 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), |
4232 | + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), |
4233 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4234 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), |
4235 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4236 | + BPF_EXIT_INSN(), |
4237 | + }, |
4238 | + .result = ACCEPT, |
4239 | + .prog_type = BPF_PROG_TYPE_XDP, |
4240 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4241 | +}, |
4242 | +{ |
4243 | + "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", |
4244 | + .insns = { |
4245 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4246 | + offsetof(struct xdp_md, data_meta)), |
4247 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4248 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4249 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4250 | + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), |
4251 | + BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
4252 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4253 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4254 | + BPF_EXIT_INSN(), |
4255 | + }, |
4256 | + .errstr = "R1 offset is outside of the packet", |
4257 | + .result = REJECT, |
4258 | + .prog_type = BPF_PROG_TYPE_XDP, |
4259 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4260 | +}, |
4261 | { |
4262 | "XDP pkt read, pkt_data <= pkt_meta', good access", |
4263 | .insns = { |
4264 | @@ -863,15 +1397,15 @@ |
4265 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4266 | }, |
4267 | { |
4268 | - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", |
4269 | + "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", |
4270 | .insns = { |
4271 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4272 | offsetof(struct xdp_md, data_meta)), |
4273 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4274 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4275 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4276 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), |
4277 | BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
4278 | - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4279 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), |
4280 | BPF_MOV64_IMM(BPF_REG_0, 0), |
4281 | BPF_EXIT_INSN(), |
4282 | }, |
4283 | @@ -898,3 +1432,37 @@ |
4284 | .prog_type = BPF_PROG_TYPE_XDP, |
4285 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4286 | }, |
4287 | +{ |
4288 | + "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", |
4289 | + .insns = { |
4290 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4291 | + offsetof(struct xdp_md, data_meta)), |
4292 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4293 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4294 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), |
4295 | + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
4296 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), |
4297 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4298 | + BPF_EXIT_INSN(), |
4299 | + }, |
4300 | + .result = ACCEPT, |
4301 | + .prog_type = BPF_PROG_TYPE_XDP, |
4302 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4303 | +}, |
4304 | +{ |
4305 | + "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", |
4306 | + .insns = { |
4307 | + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, |
4308 | + offsetof(struct xdp_md, data_meta)), |
4309 | + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), |
4310 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), |
4311 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), |
4312 | + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), |
4313 | + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), |
4314 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
4315 | + BPF_EXIT_INSN(), |
4316 | + }, |
4317 | + .result = ACCEPT, |
4318 | + .prog_type = BPF_PROG_TYPE_XDP, |
4319 | + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
4320 | +}, |
4321 | diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh |
4322 | index 4e19a1c00ddd8..6986086035d6c 100755 |
4323 | --- a/tools/testing/selftests/net/fib_tests.sh |
4324 | +++ b/tools/testing/selftests/net/fib_tests.sh |
4325 | @@ -444,24 +444,63 @@ fib_rp_filter_test() |
4326 | setup |
4327 | |
4328 | set -e |
4329 | + ip netns add ns2 |
4330 | + ip netns set ns2 auto |
4331 | + |
4332 | + ip -netns ns2 link set dev lo up |
4333 | + |
4334 | + $IP link add name veth1 type veth peer name veth2 |
4335 | + $IP link set dev veth2 netns ns2 |
4336 | + $IP address add 192.0.2.1/24 dev veth1 |
4337 | + ip -netns ns2 address add 192.0.2.1/24 dev veth2 |
4338 | + $IP link set dev veth1 up |
4339 | + ip -netns ns2 link set dev veth2 up |
4340 | + |
4341 | $IP link set dev lo address 52:54:00:6a:c7:5e |
4342 | - $IP link set dummy0 address 52:54:00:6a:c7:5e |
4343 | - $IP link add dummy1 type dummy |
4344 | - $IP link set dummy1 address 52:54:00:6a:c7:5e |
4345 | - $IP link set dev dummy1 up |
4346 | + $IP link set dev veth1 address 52:54:00:6a:c7:5e |
4347 | + ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e |
4348 | + ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e |
4349 | + |
4350 | + # 1. (ns2) redirect lo's egress to veth2's egress |
4351 | + ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel |
4352 | + ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \ |
4353 | + action mirred egress redirect dev veth2 |
4354 | + ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \ |
4355 | + action mirred egress redirect dev veth2 |
4356 | + |
4357 | + # 2. (ns1) redirect veth1's ingress to lo's ingress |
4358 | + $NS_EXEC tc qdisc add dev veth1 ingress |
4359 | + $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \ |
4360 | + action mirred ingress redirect dev lo |
4361 | + $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \ |
4362 | + action mirred ingress redirect dev lo |
4363 | + |
4364 | + # 3. (ns1) redirect lo's egress to veth1's egress |
4365 | + $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel |
4366 | + $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \ |
4367 | + action mirred egress redirect dev veth1 |
4368 | + $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \ |
4369 | + action mirred egress redirect dev veth1 |
4370 | + |
4371 | + # 4. (ns2) redirect veth2's ingress to lo's ingress |
4372 | + ip netns exec ns2 tc qdisc add dev veth2 ingress |
4373 | + ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \ |
4374 | + action mirred ingress redirect dev lo |
4375 | + ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \ |
4376 | + action mirred ingress redirect dev lo |
4377 | + |
4378 | $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1 |
4379 | $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1 |
4380 | $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1 |
4381 | - |
4382 | - $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel |
4383 | - $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo |
4384 | - $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo |
4385 | + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1 |
4386 | + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1 |
4387 | + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1 |
4388 | set +e |
4389 | |
4390 | - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1" |
4391 | + run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1" |
4392 | log_test $? 0 "rp_filter passes local packets" |
4393 | |
4394 | - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1" |
4395 | + run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1" |
4396 | log_test $? 0 "rp_filter passes loopback packets" |
4397 | |
4398 | cleanup |
4399 | diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile |
4400 | index 4144984ebee56..d0c56d5a5c528 100644 |
4401 | --- a/tools/testing/selftests/netfilter/Makefile |
4402 | +++ b/tools/testing/selftests/netfilter/Makefile |
4403 | @@ -2,6 +2,7 @@ |
4404 | # Makefile for netfilter selftests |
4405 | |
4406 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \ |
4407 | - conntrack_icmp_related.sh nft_flowtable.sh |
4408 | + conntrack_icmp_related.sh nft_flowtable.sh \ |
4409 | + conntrack_vrf.sh |
4410 | |
4411 | include ../lib.mk |
4412 | diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh |
4413 | new file mode 100644 |
4414 | index 0000000000000..8b5ea92345882 |
4415 | --- /dev/null |
4416 | +++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh |
4417 | @@ -0,0 +1,241 @@ |
4418 | +#!/bin/sh |
4419 | + |
4420 | +# This script demonstrates interaction of conntrack and vrf. |
4421 | +# The vrf driver calls the netfilter hooks again, with oif/iif |
4422 | +# pointing at the VRF device. |
4423 | +# |
4424 | +# For ingress, this means first iteration has iifname of lower/real |
4425 | +# device. In this script, thats veth0. |
4426 | +# Second iteration is iifname set to vrf device, tvrf in this script. |
4427 | +# |
4428 | +# For egress, this is reversed: first iteration has the vrf device, |
4429 | +# second iteration is done with the lower/real/veth0 device. |
4430 | +# |
4431 | +# test_ct_zone_in demonstrates unexpected change of nftables |
4432 | +# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack |
4433 | +# connection on VRF rcv" |
4434 | +# |
4435 | +# It was possible to assign conntrack zone to a packet (or mark it for |
4436 | +# `notracking`) in the prerouting chain before conntrack, based on real iif. |
4437 | +# |
4438 | +# After the change, the zone assignment is lost and the zone is assigned based |
4439 | +# on the VRF master interface (in case such a rule exists). |
4440 | +# assignment is lost. Instead, assignment based on the `iif` matching |
4441 | +# Thus it is impossible to distinguish packets based on the original |
4442 | +# interface. |
4443 | +# |
4444 | +# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem |
4445 | +# that was supposed to be fixed by the commit mentioned above to make sure |
4446 | +# that any fix to test case 1 won't break masquerade again. |
4447 | + |
4448 | +ksft_skip=4 |
4449 | + |
4450 | +IP0=172.30.30.1 |
4451 | +IP1=172.30.30.2 |
4452 | +PFXL=30 |
4453 | +ret=0 |
4454 | + |
4455 | +sfx=$(mktemp -u "XXXXXXXX") |
4456 | +ns0="ns0-$sfx" |
4457 | +ns1="ns1-$sfx" |
4458 | + |
4459 | +cleanup() |
4460 | +{ |
4461 | + ip netns pids $ns0 | xargs kill 2>/dev/null |
4462 | + ip netns pids $ns1 | xargs kill 2>/dev/null |
4463 | + |
4464 | + ip netns del $ns0 $ns1 |
4465 | +} |
4466 | + |
4467 | +nft --version > /dev/null 2>&1 |
4468 | +if [ $? -ne 0 ];then |
4469 | + echo "SKIP: Could not run test without nft tool" |
4470 | + exit $ksft_skip |
4471 | +fi |
4472 | + |
4473 | +ip -Version > /dev/null 2>&1 |
4474 | +if [ $? -ne 0 ];then |
4475 | + echo "SKIP: Could not run test without ip tool" |
4476 | + exit $ksft_skip |
4477 | +fi |
4478 | + |
4479 | +ip netns add "$ns0" |
4480 | +if [ $? -ne 0 ];then |
4481 | + echo "SKIP: Could not create net namespace $ns0" |
4482 | + exit $ksft_skip |
4483 | +fi |
4484 | +ip netns add "$ns1" |
4485 | + |
4486 | +trap cleanup EXIT |
4487 | + |
4488 | +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0 |
4489 | +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 |
4490 | +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 |
4491 | + |
4492 | +ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1 |
4493 | +if [ $? -ne 0 ];then |
4494 | + echo "SKIP: Could not add veth device" |
4495 | + exit $ksft_skip |
4496 | +fi |
4497 | + |
4498 | +ip -net $ns0 li add tvrf type vrf table 9876 |
4499 | +if [ $? -ne 0 ];then |
4500 | + echo "SKIP: Could not add vrf device" |
4501 | + exit $ksft_skip |
4502 | +fi |
4503 | + |
4504 | +ip -net $ns0 li set lo up |
4505 | + |
4506 | +ip -net $ns0 li set veth0 master tvrf |
4507 | +ip -net $ns0 li set tvrf up |
4508 | +ip -net $ns0 li set veth0 up |
4509 | +ip -net $ns1 li set veth0 up |
4510 | + |
4511 | +ip -net $ns0 addr add $IP0/$PFXL dev veth0 |
4512 | +ip -net $ns1 addr add $IP1/$PFXL dev veth0 |
4513 | + |
4514 | +ip netns exec $ns1 iperf3 -s > /dev/null 2>&1& |
4515 | +if [ $? -ne 0 ];then |
4516 | + echo "SKIP: Could not start iperf3" |
4517 | + exit $ksft_skip |
4518 | +fi |
4519 | + |
4520 | +# test vrf ingress handling. |
4521 | +# The incoming connection should be placed in conntrack zone 1, |
4522 | +# as decided by the first iteration of the ruleset. |
4523 | +test_ct_zone_in() |
4524 | +{ |
4525 | +ip netns exec $ns0 nft -f - <<EOF |
4526 | +table testct { |
4527 | + chain rawpre { |
4528 | + type filter hook prerouting priority raw; |
4529 | + |
4530 | + iif { veth0, tvrf } counter meta nftrace set 1 |
4531 | + iif veth0 counter ct zone set 1 counter return |
4532 | + iif tvrf counter ct zone set 2 counter return |
4533 | + ip protocol icmp counter |
4534 | + notrack counter |
4535 | + } |
4536 | + |
4537 | + chain rawout { |
4538 | + type filter hook output priority raw; |
4539 | + |
4540 | + oif veth0 counter ct zone set 1 counter return |
4541 | + oif tvrf counter ct zone set 2 counter return |
4542 | + notrack counter |
4543 | + } |
4544 | +} |
4545 | +EOF |
4546 | + ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null |
4547 | + |
4548 | + # should be in zone 1, not zone 2 |
4549 | + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l) |
4550 | + if [ $count -eq 1 ]; then |
4551 | + echo "PASS: entry found in conntrack zone 1" |
4552 | + else |
4553 | + echo "FAIL: entry not found in conntrack zone 1" |
4554 | + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l) |
4555 | + if [ $count -eq 1 ]; then |
4556 | + echo "FAIL: entry found in zone 2 instead" |
4557 | + else |
4558 | + echo "FAIL: entry not in zone 1 or 2, dumping table" |
4559 | + ip netns exec $ns0 conntrack -L |
4560 | + ip netns exec $ns0 nft list ruleset |
4561 | + fi |
4562 | + fi |
4563 | +} |
4564 | + |
4565 | +# add masq rule that gets evaluated w. outif set to vrf device. |
4566 | +# This tests the first iteration of the packet through conntrack, |
4567 | +# oifname is the vrf device. |
4568 | +test_masquerade_vrf() |
4569 | +{ |
4570 | + local qdisc=$1 |
4571 | + |
4572 | + if [ "$qdisc" != "default" ]; then |
4573 | + tc -net $ns0 qdisc add dev tvrf root $qdisc |
4574 | + fi |
4575 | + |
4576 | + ip netns exec $ns0 conntrack -F 2>/dev/null |
4577 | + |
4578 | +ip netns exec $ns0 nft -f - <<EOF |
4579 | +flush ruleset |
4580 | +table ip nat { |
4581 | + chain rawout { |
4582 | + type filter hook output priority raw; |
4583 | + |
4584 | + oif tvrf ct state untracked counter |
4585 | + } |
4586 | + chain postrouting2 { |
4587 | + type filter hook postrouting priority mangle; |
4588 | + |
4589 | + oif tvrf ct state untracked counter |
4590 | + } |
4591 | + chain postrouting { |
4592 | + type nat hook postrouting priority 0; |
4593 | + # NB: masquerade should always be combined with 'oif(name) bla', |
4594 | + # lack of this is intentional here, we want to exercise double-snat. |
4595 | + ip saddr 172.30.30.0/30 counter masquerade random |
4596 | + } |
4597 | +} |
4598 | +EOF |
4599 | + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null |
4600 | + if [ $? -ne 0 ]; then |
4601 | + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device" |
4602 | + ret=1 |
4603 | + return |
4604 | + fi |
4605 | + |
4606 | + # must also check that nat table was evaluated on second (lower device) iteration. |
4607 | + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' && |
4608 | + ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]' |
4609 | + if [ $? -eq 0 ]; then |
4610 | + echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)" |
4611 | + else |
4612 | + echo "FAIL: vrf rules have unexpected counter value" |
4613 | + ret=1 |
4614 | + fi |
4615 | + |
4616 | + if [ "$qdisc" != "default" ]; then |
4617 | + tc -net $ns0 qdisc del dev tvrf root |
4618 | + fi |
4619 | +} |
4620 | + |
4621 | +# add masq rule that gets evaluated w. outif set to veth device. |
4622 | +# This tests the 2nd iteration of the packet through conntrack, |
4623 | +# oifname is the lower device (veth0 in this case). |
4624 | +test_masquerade_veth() |
4625 | +{ |
4626 | + ip netns exec $ns0 conntrack -F 2>/dev/null |
4627 | +ip netns exec $ns0 nft -f - <<EOF |
4628 | +flush ruleset |
4629 | +table ip nat { |
4630 | + chain postrouting { |
4631 | + type nat hook postrouting priority 0; |
4632 | + meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random |
4633 | + } |
4634 | +} |
4635 | +EOF |
4636 | + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null |
4637 | + if [ $? -ne 0 ]; then |
4638 | + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device" |
4639 | + ret=1 |
4640 | + return |
4641 | + fi |
4642 | + |
4643 | + # must also check that nat table was evaluated on second (lower device) iteration. |
4644 | + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' |
4645 | + if [ $? -eq 0 ]; then |
4646 | + echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device" |
4647 | + else |
4648 | + echo "FAIL: vrf masq rule has unexpected counter value" |
4649 | + ret=1 |
4650 | + fi |
4651 | +} |
4652 | + |
4653 | +test_ct_zone_in |
4654 | +test_masquerade_vrf "default" |
4655 | +test_masquerade_vrf "pfifo" |
4656 | +test_masquerade_veth |
4657 | + |
4658 | +exit $ret |