Contents of /trunk/kernel-alx/patches-5.4/0199-5.4.100-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (19 months ago) by niro
File size: 12973 byte(s)
Mon Oct 24 12:40:44 2022 UTC (19 months ago) by niro
File size: 12973 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index a0491ba1d7593..d0d4beb4f8373 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 99 |
10 | +SUBLEVEL = 100 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c |
15 | index e52950a43f2ed..fd6e3aafe2724 100644 |
16 | --- a/arch/arm/xen/p2m.c |
17 | +++ b/arch/arm/xen/p2m.c |
18 | @@ -95,8 +95,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
19 | for (i = 0; i < count; i++) { |
20 | if (map_ops[i].status) |
21 | continue; |
22 | - set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, |
23 | - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); |
24 | + if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, |
25 | + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) { |
26 | + return -ENOMEM; |
27 | + } |
28 | } |
29 | |
30 | return 0; |
31 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
32 | index 296b0d7570d06..1da558f28aa57 100644 |
33 | --- a/arch/x86/kvm/svm.c |
34 | +++ b/arch/x86/kvm/svm.c |
35 | @@ -7104,7 +7104,6 @@ static int svm_register_enc_region(struct kvm *kvm, |
36 | region->uaddr = range->addr; |
37 | region->size = range->size; |
38 | |
39 | - mutex_lock(&kvm->lock); |
40 | list_add_tail(®ion->list, &sev->regions_list); |
41 | mutex_unlock(&kvm->lock); |
42 | |
43 | diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c |
44 | index 0acba2c712ab8..7fe5be8e6d3db 100644 |
45 | --- a/arch/x86/xen/p2m.c |
46 | +++ b/arch/x86/xen/p2m.c |
47 | @@ -716,7 +716,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
48 | unsigned long mfn, pfn; |
49 | |
50 | /* Do not add to override if the map failed. */ |
51 | - if (map_ops[i].status) |
52 | + if (map_ops[i].status != GNTST_okay || |
53 | + (kmap_ops && kmap_ops[i].status != GNTST_okay)) |
54 | continue; |
55 | |
56 | if (map_ops[i].flags & GNTMAP_contains_pte) { |
57 | @@ -754,17 +755,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, |
58 | unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); |
59 | unsigned long pfn = page_to_pfn(pages[i]); |
60 | |
61 | - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { |
62 | + if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) |
63 | + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
64 | + else |
65 | ret = -EINVAL; |
66 | - goto out; |
67 | - } |
68 | - |
69 | - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
70 | } |
71 | if (kunmap_ops) |
72 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, |
73 | - kunmap_ops, count); |
74 | -out: |
75 | + kunmap_ops, count) ?: ret; |
76 | + |
77 | return ret; |
78 | } |
79 | EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); |
80 | diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c |
81 | index b18f0162cb9c4..208f3eea3641f 100644 |
82 | --- a/drivers/block/xen-blkback/blkback.c |
83 | +++ b/drivers/block/xen-blkback/blkback.c |
84 | @@ -850,8 +850,11 @@ again: |
85 | pages[i]->page = persistent_gnt->page; |
86 | pages[i]->persistent_gnt = persistent_gnt; |
87 | } else { |
88 | - if (get_free_page(ring, &pages[i]->page)) |
89 | - goto out_of_memory; |
90 | + if (get_free_page(ring, &pages[i]->page)) { |
91 | + put_free_pages(ring, pages_to_gnt, segs_to_map); |
92 | + ret = -ENOMEM; |
93 | + goto out; |
94 | + } |
95 | addr = vaddr(pages[i]->page); |
96 | pages_to_gnt[segs_to_map] = pages[i]->page; |
97 | pages[i]->persistent_gnt = NULL; |
98 | @@ -867,10 +870,8 @@ again: |
99 | break; |
100 | } |
101 | |
102 | - if (segs_to_map) { |
103 | + if (segs_to_map) |
104 | ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); |
105 | - BUG_ON(ret); |
106 | - } |
107 | |
108 | /* |
109 | * Now swizzle the MFN in our domain with the MFN from the other domain |
110 | @@ -885,7 +886,7 @@ again: |
111 | pr_debug("invalid buffer -- could not remap it\n"); |
112 | put_free_pages(ring, &pages[seg_idx]->page, 1); |
113 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
114 | - ret |= 1; |
115 | + ret |= !ret; |
116 | goto next; |
117 | } |
118 | pages[seg_idx]->handle = map[new_map_idx].handle; |
119 | @@ -937,17 +938,18 @@ next: |
120 | } |
121 | segs_to_map = 0; |
122 | last_map = map_until; |
123 | - if (map_until != num) |
124 | + if (!ret && map_until != num) |
125 | goto again; |
126 | |
127 | - return ret; |
128 | - |
129 | -out_of_memory: |
130 | - pr_alert("%s: out of memory\n", __func__); |
131 | - put_free_pages(ring, pages_to_gnt, segs_to_map); |
132 | - for (i = last_map; i < num; i++) |
133 | +out: |
134 | + for (i = last_map; i < num; i++) { |
135 | + /* Don't zap current batch's valid persistent grants. */ |
136 | + if(i >= last_map + segs_to_map) |
137 | + pages[i]->persistent_gnt = NULL; |
138 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
139 | - return -ENOMEM; |
140 | + } |
141 | + |
142 | + return ret; |
143 | } |
144 | |
145 | static int xen_blkbk_map_seg(struct pending_req *pending_req) |
146 | diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c |
147 | index 9b76cf133d524..3df7ca2357da8 100644 |
148 | --- a/drivers/media/usb/pwc/pwc-if.c |
149 | +++ b/drivers/media/usb/pwc/pwc-if.c |
150 | @@ -147,16 +147,17 @@ static const struct video_device pwc_template = { |
151 | /***************************************************************************/ |
152 | /* Private functions */ |
153 | |
154 | -static void *pwc_alloc_urb_buffer(struct device *dev, |
155 | +static void *pwc_alloc_urb_buffer(struct usb_device *dev, |
156 | size_t size, dma_addr_t *dma_handle) |
157 | { |
158 | + struct device *dmadev = dev->bus->sysdev; |
159 | void *buffer = kmalloc(size, GFP_KERNEL); |
160 | |
161 | if (!buffer) |
162 | return NULL; |
163 | |
164 | - *dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE); |
165 | - if (dma_mapping_error(dev, *dma_handle)) { |
166 | + *dma_handle = dma_map_single(dmadev, buffer, size, DMA_FROM_DEVICE); |
167 | + if (dma_mapping_error(dmadev, *dma_handle)) { |
168 | kfree(buffer); |
169 | return NULL; |
170 | } |
171 | @@ -164,12 +165,14 @@ static void *pwc_alloc_urb_buffer(struct device *dev, |
172 | return buffer; |
173 | } |
174 | |
175 | -static void pwc_free_urb_buffer(struct device *dev, |
176 | +static void pwc_free_urb_buffer(struct usb_device *dev, |
177 | size_t size, |
178 | void *buffer, |
179 | dma_addr_t dma_handle) |
180 | { |
181 | - dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE); |
182 | + struct device *dmadev = dev->bus->sysdev; |
183 | + |
184 | + dma_unmap_single(dmadev, dma_handle, size, DMA_FROM_DEVICE); |
185 | kfree(buffer); |
186 | } |
187 | |
188 | @@ -274,6 +277,7 @@ static void pwc_frame_complete(struct pwc_device *pdev) |
189 | static void pwc_isoc_handler(struct urb *urb) |
190 | { |
191 | struct pwc_device *pdev = (struct pwc_device *)urb->context; |
192 | + struct device *dmadev = urb->dev->bus->sysdev; |
193 | int i, fst, flen; |
194 | unsigned char *iso_buf = NULL; |
195 | |
196 | @@ -320,7 +324,7 @@ static void pwc_isoc_handler(struct urb *urb) |
197 | /* Reset ISOC error counter. We did get here, after all. */ |
198 | pdev->visoc_errors = 0; |
199 | |
200 | - dma_sync_single_for_cpu(&urb->dev->dev, |
201 | + dma_sync_single_for_cpu(dmadev, |
202 | urb->transfer_dma, |
203 | urb->transfer_buffer_length, |
204 | DMA_FROM_DEVICE); |
205 | @@ -371,7 +375,7 @@ static void pwc_isoc_handler(struct urb *urb) |
206 | pdev->vlast_packet_size = flen; |
207 | } |
208 | |
209 | - dma_sync_single_for_device(&urb->dev->dev, |
210 | + dma_sync_single_for_device(dmadev, |
211 | urb->transfer_dma, |
212 | urb->transfer_buffer_length, |
213 | DMA_FROM_DEVICE); |
214 | @@ -453,7 +457,7 @@ retry: |
215 | urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint); |
216 | urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; |
217 | urb->transfer_buffer_length = ISO_BUFFER_SIZE; |
218 | - urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev, |
219 | + urb->transfer_buffer = pwc_alloc_urb_buffer(udev, |
220 | urb->transfer_buffer_length, |
221 | &urb->transfer_dma); |
222 | if (urb->transfer_buffer == NULL) { |
223 | @@ -516,7 +520,7 @@ static void pwc_iso_free(struct pwc_device *pdev) |
224 | if (urb) { |
225 | PWC_DEBUG_MEMORY("Freeing URB\n"); |
226 | if (urb->transfer_buffer) |
227 | - pwc_free_urb_buffer(&urb->dev->dev, |
228 | + pwc_free_urb_buffer(urb->dev, |
229 | urb->transfer_buffer_length, |
230 | urb->transfer_buffer, |
231 | urb->transfer_dma); |
232 | diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
233 | index fa1ac0abc924b..4bfafcd6317f4 100644 |
234 | --- a/drivers/net/xen-netback/netback.c |
235 | +++ b/drivers/net/xen-netback/netback.c |
236 | @@ -1335,13 +1335,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) |
237 | return 0; |
238 | |
239 | gnttab_batch_copy(queue->tx_copy_ops, nr_cops); |
240 | - if (nr_mops != 0) { |
241 | + if (nr_mops != 0) |
242 | ret = gnttab_map_refs(queue->tx_map_ops, |
243 | NULL, |
244 | queue->pages_to_map, |
245 | nr_mops); |
246 | - BUG_ON(ret); |
247 | - } |
248 | |
249 | work_done = xenvif_tx_submit(queue); |
250 | |
251 | diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c |
252 | index 246864bbb3e4e..e953ea34b3e43 100644 |
253 | --- a/drivers/xen/gntdev.c |
254 | +++ b/drivers/xen/gntdev.c |
255 | @@ -319,44 +319,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) |
256 | * to the kernel linear addresses of the struct pages. |
257 | * These ptes are completely different from the user ptes dealt |
258 | * with find_grant_ptes. |
259 | + * Note that GNTMAP_device_map isn't needed here: The |
260 | + * dev_bus_addr output field gets consumed only from ->map_ops, |
261 | + * and by not requesting it when mapping we also avoid needing |
262 | + * to mirror dev_bus_addr into ->unmap_ops (and holding an extra |
263 | + * reference to the page in the hypervisor). |
264 | */ |
265 | + unsigned int flags = (map->flags & ~GNTMAP_device_map) | |
266 | + GNTMAP_host_map; |
267 | + |
268 | for (i = 0; i < map->count; i++) { |
269 | unsigned long address = (unsigned long) |
270 | pfn_to_kaddr(page_to_pfn(map->pages[i])); |
271 | BUG_ON(PageHighMem(map->pages[i])); |
272 | |
273 | - gnttab_set_map_op(&map->kmap_ops[i], address, |
274 | - map->flags | GNTMAP_host_map, |
275 | + gnttab_set_map_op(&map->kmap_ops[i], address, flags, |
276 | map->grants[i].ref, |
277 | map->grants[i].domid); |
278 | gnttab_set_unmap_op(&map->kunmap_ops[i], address, |
279 | - map->flags | GNTMAP_host_map, -1); |
280 | + flags, -1); |
281 | } |
282 | } |
283 | |
284 | pr_debug("map %d+%d\n", map->index, map->count); |
285 | err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, |
286 | map->pages, map->count); |
287 | - if (err) |
288 | - return err; |
289 | |
290 | for (i = 0; i < map->count; i++) { |
291 | - if (map->map_ops[i].status) { |
292 | + if (map->map_ops[i].status == GNTST_okay) |
293 | + map->unmap_ops[i].handle = map->map_ops[i].handle; |
294 | + else if (!err) |
295 | err = -EINVAL; |
296 | - continue; |
297 | - } |
298 | |
299 | - map->unmap_ops[i].handle = map->map_ops[i].handle; |
300 | - if (use_ptemod) |
301 | - map->kunmap_ops[i].handle = map->kmap_ops[i].handle; |
302 | -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC |
303 | - else if (map->dma_vaddr) { |
304 | - unsigned long bfn; |
305 | + if (map->flags & GNTMAP_device_map) |
306 | + map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; |
307 | |
308 | - bfn = pfn_to_bfn(page_to_pfn(map->pages[i])); |
309 | - map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn); |
310 | + if (use_ptemod) { |
311 | + if (map->kmap_ops[i].status == GNTST_okay) |
312 | + map->kunmap_ops[i].handle = map->kmap_ops[i].handle; |
313 | + else if (!err) |
314 | + err = -EINVAL; |
315 | } |
316 | -#endif |
317 | } |
318 | return err; |
319 | } |
320 | diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c |
321 | index 33d6499d84724..32aba2e8c075f 100644 |
322 | --- a/drivers/xen/xen-scsiback.c |
323 | +++ b/drivers/xen/xen-scsiback.c |
324 | @@ -422,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, |
325 | return 0; |
326 | |
327 | err = gnttab_map_refs(map, NULL, pg, cnt); |
328 | - BUG_ON(err); |
329 | for (i = 0; i < cnt; i++) { |
330 | if (unlikely(map[i].status != GNTST_okay)) { |
331 | pr_err("invalid buffer -- could not remap it\n"); |
332 | map[i].handle = SCSIBACK_INVALID_HANDLE; |
333 | - err = -ENOMEM; |
334 | + if (!err) |
335 | + err = -ENOMEM; |
336 | } else { |
337 | get_page(pg[i]); |
338 | } |
339 | diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h |
340 | index cda5534d3d0e3..7960359dbc700 100644 |
341 | --- a/fs/btrfs/ctree.h |
342 | +++ b/fs/btrfs/ctree.h |
343 | @@ -136,9 +136,6 @@ enum { |
344 | BTRFS_FS_STATE_DEV_REPLACING, |
345 | /* The btrfs_fs_info created for self-tests */ |
346 | BTRFS_FS_STATE_DUMMY_FS_INFO, |
347 | - |
348 | - /* Indicate that we can't trust the free space tree for caching yet */ |
349 | - BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, |
350 | }; |
351 | |
352 | #define BTRFS_BACKREF_REV_MAX 256 |
353 | @@ -527,6 +524,9 @@ enum { |
354 | * so we don't need to offload checksums to workqueues. |
355 | */ |
356 | BTRFS_FS_CSUM_IMPL_FAST, |
357 | + |
358 | + /* Indicate that we can't trust the free space tree for caching yet */ |
359 | + BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, |
360 | }; |
361 | |
362 | struct btrfs_fs_info { |
363 | diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h |
364 | index 9bc5bc07d4d3f..a9978350b45b0 100644 |
365 | --- a/include/xen/grant_table.h |
366 | +++ b/include/xen/grant_table.h |
367 | @@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, |
368 | map->flags = flags; |
369 | map->ref = ref; |
370 | map->dom = domid; |
371 | + map->status = 1; /* arbitrary positive value */ |
372 | } |
373 | |
374 | static inline void |
375 | diff --git a/net/bridge/br.c b/net/bridge/br.c |
376 | index 8a8f9e5f264f2..cccbb9bf3ca4e 100644 |
377 | --- a/net/bridge/br.c |
378 | +++ b/net/bridge/br.c |
379 | @@ -43,7 +43,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v |
380 | |
381 | if (event == NETDEV_REGISTER) { |
382 | /* register of bridge completed, add sysfs entries */ |
383 | - br_sysfs_addbr(dev); |
384 | + err = br_sysfs_addbr(dev); |
385 | + if (err) |
386 | + return notifier_from_errno(err); |
387 | + |
388 | return NOTIFY_DONE; |
389 | } |
390 | } |
391 | diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c |
392 | index d6d2736ec9273..ef602976bb2c8 100644 |
393 | --- a/net/qrtr/qrtr.c |
394 | +++ b/net/qrtr/qrtr.c |
395 | @@ -187,7 +187,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, |
396 | hdr->src_port_id = cpu_to_le32(from->sq_port); |
397 | if (to->sq_port == QRTR_PORT_CTRL) { |
398 | hdr->dst_node_id = cpu_to_le32(node->nid); |
399 | - hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST); |
400 | + hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); |
401 | } else { |
402 | hdr->dst_node_id = cpu_to_le32(to->sq_node); |
403 | hdr->dst_port_id = cpu_to_le32(to->sq_port); |