Contents of /trunk/kernel-alx-legacy/patches-4.9/0374-4.9.275-all-fixes.patch
Parent Directory | Revision Log
Revision 3676 -
(show annotations)
(download)
Mon Oct 24 14:07:53 2022 UTC (23 months ago) by niro
File size: 14441 byte(s)
Mon Oct 24 14:07:53 2022 UTC (23 months ago) by niro
File size: 14441 byte(s)
-linux-4.9.275
1 | diff --git a/Makefile b/Makefile |
2 | index 3002dfee32314..dfd253648758c 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 274 |
9 | +SUBLEVEL = 275 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c |
14 | index a2e6a81669e78..94b7798bdea4e 100644 |
15 | --- a/drivers/gpu/drm/nouveau/nouveau_bo.c |
16 | +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c |
17 | @@ -447,7 +447,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) |
18 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; |
19 | int i; |
20 | |
21 | - if (!ttm_dma) |
22 | + if (!ttm_dma || !ttm_dma->dma_address) |
23 | return; |
24 | |
25 | /* Don't waste time looping if the object is coherent */ |
26 | @@ -467,7 +467,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) |
27 | struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; |
28 | int i; |
29 | |
30 | - if (!ttm_dma) |
31 | + if (!ttm_dma || !ttm_dma->dma_address) |
32 | return; |
33 | |
34 | /* Don't waste time looping if the object is coherent */ |
35 | diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c |
36 | index 67a73ea0a615e..5e51a39a0c27e 100644 |
37 | --- a/drivers/scsi/sr.c |
38 | +++ b/drivers/scsi/sr.c |
39 | @@ -216,6 +216,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev) |
40 | return DISK_EVENT_EJECT_REQUEST; |
41 | else if (med->media_event_code == 2) |
42 | return DISK_EVENT_MEDIA_CHANGE; |
43 | + else if (med->media_event_code == 3) |
44 | + return DISK_EVENT_EJECT_REQUEST; |
45 | return 0; |
46 | } |
47 | |
48 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
49 | index ea307f40cab19..c6e6b7470cbf6 100644 |
50 | --- a/drivers/xen/events/events_base.c |
51 | +++ b/drivers/xen/events/events_base.c |
52 | @@ -533,6 +533,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) |
53 | } |
54 | |
55 | info->eoi_time = 0; |
56 | + |
57 | + /* is_active hasn't been reset yet, do it now. */ |
58 | + smp_store_release(&info->is_active, 0); |
59 | do_unmask(info, EVT_MASK_REASON_EOI_PENDING); |
60 | } |
61 | |
62 | @@ -1778,10 +1781,22 @@ static void lateeoi_ack_dynirq(struct irq_data *data) |
63 | struct irq_info *info = info_for_irq(data->irq); |
64 | evtchn_port_t evtchn = info ? info->evtchn : 0; |
65 | |
66 | - if (VALID_EVTCHN(evtchn)) { |
67 | - do_mask(info, EVT_MASK_REASON_EOI_PENDING); |
68 | - ack_dynirq(data); |
69 | - } |
70 | + if (!VALID_EVTCHN(evtchn)) |
71 | + return; |
72 | + |
73 | + do_mask(info, EVT_MASK_REASON_EOI_PENDING); |
74 | + |
75 | + if (unlikely(irqd_is_setaffinity_pending(data)) && |
76 | + likely(!irqd_irq_disabled(data))) { |
77 | + do_mask(info, EVT_MASK_REASON_TEMPORARY); |
78 | + |
79 | + clear_evtchn(evtchn); |
80 | + |
81 | + irq_move_masked_irq(data); |
82 | + |
83 | + do_unmask(info, EVT_MASK_REASON_TEMPORARY); |
84 | + } else |
85 | + clear_evtchn(evtchn); |
86 | } |
87 | |
88 | static void lateeoi_mask_ack_dynirq(struct irq_data *data) |
89 | diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
90 | index 8dd365c654780..6417bc845db56 100644 |
91 | --- a/include/linux/hugetlb.h |
92 | +++ b/include/linux/hugetlb.h |
93 | @@ -451,17 +451,6 @@ static inline int hstate_index(struct hstate *h) |
94 | return h - hstates; |
95 | } |
96 | |
97 | -pgoff_t __basepage_index(struct page *page); |
98 | - |
99 | -/* Return page->index in PAGE_SIZE units */ |
100 | -static inline pgoff_t basepage_index(struct page *page) |
101 | -{ |
102 | - if (!PageCompound(page)) |
103 | - return page->index; |
104 | - |
105 | - return __basepage_index(page); |
106 | -} |
107 | - |
108 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
109 | unsigned long end_pfn); |
110 | static inline bool hugepage_migration_supported(struct hstate *h) |
111 | @@ -529,10 +518,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) |
112 | #define hstate_index_to_shift(index) 0 |
113 | #define hstate_index(h) 0 |
114 | |
115 | -static inline pgoff_t basepage_index(struct page *page) |
116 | -{ |
117 | - return page->index; |
118 | -} |
119 | #define dissolve_free_huge_pages(s, e) 0 |
120 | #define hugepage_migration_supported(h) false |
121 | |
122 | diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h |
123 | index 451a811f48f26..d1fb3bbff37ad 100644 |
124 | --- a/include/linux/mmdebug.h |
125 | +++ b/include/linux/mmdebug.h |
126 | @@ -36,10 +36,22 @@ void dump_mm(const struct mm_struct *mm); |
127 | BUG(); \ |
128 | } \ |
129 | } while (0) |
130 | -#define VM_WARN_ON(cond) WARN_ON(cond) |
131 | -#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) |
132 | -#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) |
133 | -#define VM_WARN(cond, format...) WARN(cond, format) |
134 | +#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ |
135 | + static bool __section(".data.once") __warned; \ |
136 | + int __ret_warn_once = !!(cond); \ |
137 | + \ |
138 | + if (unlikely(__ret_warn_once && !__warned)) { \ |
139 | + dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ |
140 | + __warned = true; \ |
141 | + WARN_ON(1); \ |
142 | + } \ |
143 | + unlikely(__ret_warn_once); \ |
144 | +}) |
145 | + |
146 | +#define VM_WARN_ON(cond) (void)WARN_ON(cond) |
147 | +#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) |
148 | +#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) |
149 | +#define VM_WARN(cond, format...) (void)WARN(cond, format) |
150 | #else |
151 | #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) |
152 | #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) |
153 | @@ -47,6 +59,7 @@ void dump_mm(const struct mm_struct *mm); |
154 | #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) |
155 | #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) |
156 | #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) |
157 | +#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) |
158 | #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) |
159 | #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) |
160 | #endif |
161 | diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h |
162 | index 35f4c4d9c4054..8672291633ddf 100644 |
163 | --- a/include/linux/pagemap.h |
164 | +++ b/include/linux/pagemap.h |
165 | @@ -374,7 +374,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping, |
166 | } |
167 | |
168 | /* |
169 | - * Get index of the page with in radix-tree |
170 | + * Get index of the page within radix-tree (but not for hugetlb pages). |
171 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) |
172 | */ |
173 | static inline pgoff_t page_to_index(struct page *page) |
174 | @@ -393,15 +393,16 @@ static inline pgoff_t page_to_index(struct page *page) |
175 | return pgoff; |
176 | } |
177 | |
178 | +extern pgoff_t hugetlb_basepage_index(struct page *page); |
179 | + |
180 | /* |
181 | - * Get the offset in PAGE_SIZE. |
182 | - * (TODO: hugepage should have ->index in PAGE_SIZE) |
183 | + * Get the offset in PAGE_SIZE (even for hugetlb pages). |
184 | + * (TODO: hugetlb pages should have ->index in PAGE_SIZE) |
185 | */ |
186 | static inline pgoff_t page_to_pgoff(struct page *page) |
187 | { |
188 | - if (unlikely(PageHeadHuge(page))) |
189 | - return page->index << compound_order(page); |
190 | - |
191 | + if (unlikely(PageHuge(page))) |
192 | + return hugetlb_basepage_index(page); |
193 | return page_to_index(page); |
194 | } |
195 | |
196 | diff --git a/kernel/futex.c b/kernel/futex.c |
197 | index 324fb85c89049..b3823736af6f9 100644 |
198 | --- a/kernel/futex.c |
199 | +++ b/kernel/futex.c |
200 | @@ -717,7 +717,7 @@ again: |
201 | |
202 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
203 | key->shared.i_seq = get_inode_sequence_number(inode); |
204 | - key->shared.pgoff = basepage_index(tail); |
205 | + key->shared.pgoff = page_to_pgoff(tail); |
206 | rcu_read_unlock(); |
207 | } |
208 | |
209 | diff --git a/kernel/kthread.c b/kernel/kthread.c |
210 | index 60f54c5a07a46..52b89c582189b 100644 |
211 | --- a/kernel/kthread.c |
212 | +++ b/kernel/kthread.c |
213 | @@ -952,8 +952,38 @@ void kthread_flush_work(struct kthread_work *work) |
214 | EXPORT_SYMBOL_GPL(kthread_flush_work); |
215 | |
216 | /* |
217 | - * This function removes the work from the worker queue. Also it makes sure |
218 | - * that it won't get queued later via the delayed work's timer. |
219 | + * Make sure that the timer is neither set nor running and could |
220 | + * not manipulate the work list_head any longer. |
221 | + * |
222 | + * The function is called under worker->lock. The lock is temporary |
223 | + * released but the timer can't be set again in the meantime. |
224 | + */ |
225 | +static void kthread_cancel_delayed_work_timer(struct kthread_work *work, |
226 | + unsigned long *flags) |
227 | +{ |
228 | + struct kthread_delayed_work *dwork = |
229 | + container_of(work, struct kthread_delayed_work, work); |
230 | + struct kthread_worker *worker = work->worker; |
231 | + |
232 | + /* |
233 | + * del_timer_sync() must be called to make sure that the timer |
234 | + * callback is not running. The lock must be temporary released |
235 | + * to avoid a deadlock with the callback. In the meantime, |
236 | + * any queuing is blocked by setting the canceling counter. |
237 | + */ |
238 | + work->canceling++; |
239 | + spin_unlock_irqrestore(&worker->lock, *flags); |
240 | + del_timer_sync(&dwork->timer); |
241 | + spin_lock_irqsave(&worker->lock, *flags); |
242 | + work->canceling--; |
243 | +} |
244 | + |
245 | +/* |
246 | + * This function removes the work from the worker queue. |
247 | + * |
248 | + * It is called under worker->lock. The caller must make sure that |
249 | + * the timer used by delayed work is not running, e.g. by calling |
250 | + * kthread_cancel_delayed_work_timer(). |
251 | * |
252 | * The work might still be in use when this function finishes. See the |
253 | * current_work proceed by the worker. |
254 | @@ -961,28 +991,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work); |
255 | * Return: %true if @work was pending and successfully canceled, |
256 | * %false if @work was not pending |
257 | */ |
258 | -static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, |
259 | - unsigned long *flags) |
260 | +static bool __kthread_cancel_work(struct kthread_work *work) |
261 | { |
262 | - /* Try to cancel the timer if exists. */ |
263 | - if (is_dwork) { |
264 | - struct kthread_delayed_work *dwork = |
265 | - container_of(work, struct kthread_delayed_work, work); |
266 | - struct kthread_worker *worker = work->worker; |
267 | - |
268 | - /* |
269 | - * del_timer_sync() must be called to make sure that the timer |
270 | - * callback is not running. The lock must be temporary released |
271 | - * to avoid a deadlock with the callback. In the meantime, |
272 | - * any queuing is blocked by setting the canceling counter. |
273 | - */ |
274 | - work->canceling++; |
275 | - spin_unlock_irqrestore(&worker->lock, *flags); |
276 | - del_timer_sync(&dwork->timer); |
277 | - spin_lock_irqsave(&worker->lock, *flags); |
278 | - work->canceling--; |
279 | - } |
280 | - |
281 | /* |
282 | * Try to remove the work from a worker list. It might either |
283 | * be from worker->work_list or from worker->delayed_work_list. |
284 | @@ -1035,11 +1045,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, |
285 | /* Work must not be used with >1 worker, see kthread_queue_work() */ |
286 | WARN_ON_ONCE(work->worker != worker); |
287 | |
288 | - /* Do not fight with another command that is canceling this work. */ |
289 | + /* |
290 | + * Temporary cancel the work but do not fight with another command |
291 | + * that is canceling the work as well. |
292 | + * |
293 | + * It is a bit tricky because of possible races with another |
294 | + * mod_delayed_work() and cancel_delayed_work() callers. |
295 | + * |
296 | + * The timer must be canceled first because worker->lock is released |
297 | + * when doing so. But the work can be removed from the queue (list) |
298 | + * only when it can be queued again so that the return value can |
299 | + * be used for reference counting. |
300 | + */ |
301 | + kthread_cancel_delayed_work_timer(work, &flags); |
302 | if (work->canceling) |
303 | goto out; |
304 | + ret = __kthread_cancel_work(work); |
305 | |
306 | - ret = __kthread_cancel_work(work, true, &flags); |
307 | fast_queue: |
308 | __kthread_queue_delayed_work(worker, dwork, delay); |
309 | out: |
310 | @@ -1061,7 +1083,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) |
311 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
312 | WARN_ON_ONCE(work->worker != worker); |
313 | |
314 | - ret = __kthread_cancel_work(work, is_dwork, &flags); |
315 | + if (is_dwork) |
316 | + kthread_cancel_delayed_work_timer(work, &flags); |
317 | + |
318 | + ret = __kthread_cancel_work(work); |
319 | |
320 | if (worker->current_work != work) |
321 | goto out_fast; |
322 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
323 | index 14cd0ef33b628..177ca028b9868 100644 |
324 | --- a/mm/huge_memory.c |
325 | +++ b/mm/huge_memory.c |
326 | @@ -1891,7 +1891,7 @@ static void unmap_page(struct page *page) |
327 | { |
328 | enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | |
329 | TTU_RMAP_LOCKED; |
330 | - int i, ret; |
331 | + int i; |
332 | |
333 | VM_BUG_ON_PAGE(!PageHead(page), page); |
334 | |
335 | @@ -1899,15 +1899,16 @@ static void unmap_page(struct page *page) |
336 | ttu_flags |= TTU_MIGRATION; |
337 | |
338 | /* We only need TTU_SPLIT_HUGE_PMD once */ |
339 | - ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); |
340 | - for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { |
341 | + try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); |
342 | + for (i = 1; i < HPAGE_PMD_NR; i++) { |
343 | /* Cut short if the page is unmapped */ |
344 | if (page_count(page) == 1) |
345 | return; |
346 | |
347 | - ret = try_to_unmap(page + i, ttu_flags); |
348 | + try_to_unmap(page + i, ttu_flags); |
349 | } |
350 | - VM_BUG_ON_PAGE(ret, page + i - 1); |
351 | + |
352 | + VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); |
353 | } |
354 | |
355 | static void remap_page(struct page *page) |
356 | @@ -2137,7 +2138,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) |
357 | struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); |
358 | struct anon_vma *anon_vma = NULL; |
359 | struct address_space *mapping = NULL; |
360 | - int count, mapcount, extra_pins, ret; |
361 | + int extra_pins, ret; |
362 | bool mlocked; |
363 | unsigned long flags; |
364 | pgoff_t end; |
365 | @@ -2200,7 +2201,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) |
366 | |
367 | mlocked = PageMlocked(page); |
368 | unmap_page(head); |
369 | - VM_BUG_ON_PAGE(compound_mapcount(head), head); |
370 | |
371 | /* Make sure the page is not on per-CPU pagevec as it takes pin */ |
372 | if (mlocked) |
373 | @@ -2226,9 +2226,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) |
374 | |
375 | /* Prevent deferred_split_scan() touching ->_refcount */ |
376 | spin_lock(&pgdata->split_queue_lock); |
377 | - count = page_count(head); |
378 | - mapcount = total_mapcount(head); |
379 | - if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { |
380 | + if (page_ref_freeze(head, 1 + extra_pins)) { |
381 | if (!list_empty(page_deferred_list(head))) { |
382 | pgdata->split_queue_len--; |
383 | list_del(page_deferred_list(head)); |
384 | @@ -2239,16 +2237,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) |
385 | __split_huge_page(page, list, end, flags); |
386 | ret = 0; |
387 | } else { |
388 | - if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { |
389 | - pr_alert("total_mapcount: %u, page_count(): %u\n", |
390 | - mapcount, count); |
391 | - if (PageTail(page)) |
392 | - dump_page(head, NULL); |
393 | - dump_page(page, "total_mapcount(head) > 0"); |
394 | - BUG(); |
395 | - } |
396 | spin_unlock(&pgdata->split_queue_lock); |
397 | -fail: if (mapping) |
398 | +fail: |
399 | + if (mapping) |
400 | spin_unlock(&mapping->tree_lock); |
401 | spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); |
402 | remap_page(head); |
403 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
404 | index b7215b0807ca6..de89e9295f6c5 100644 |
405 | --- a/mm/hugetlb.c |
406 | +++ b/mm/hugetlb.c |
407 | @@ -1380,15 +1380,12 @@ int PageHeadHuge(struct page *page_head) |
408 | return get_compound_page_dtor(page_head) == free_huge_page; |
409 | } |
410 | |
411 | -pgoff_t __basepage_index(struct page *page) |
412 | +pgoff_t hugetlb_basepage_index(struct page *page) |
413 | { |
414 | struct page *page_head = compound_head(page); |
415 | pgoff_t index = page_index(page_head); |
416 | unsigned long compound_idx; |
417 | |
418 | - if (!PageHuge(page_head)) |
419 | - return page_index(page); |
420 | - |
421 | if (compound_order(page_head) >= MAX_ORDER) |
422 | compound_idx = page_to_pfn(page) - page_to_pfn(page_head); |
423 | else |