Contents of /trunk/kernel-magellan/patches-4.20/0101-4.20.2-all-fixes.patch
Parent Directory | Revision Log
Revision 3278 -
(show annotations)
(download)
Mon Mar 4 10:35:50 2019 UTC (5 years, 6 months ago) by niro
File size: 101256 byte(s)
Mon Mar 4 10:35:50 2019 UTC (5 years, 6 months ago) by niro
File size: 101256 byte(s)
linux-4.20.2
1 | diff --git a/Makefile b/Makefile |
2 | index 84d2f8deea30..4ba3dd0bf35d 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 20 |
9 | -SUBLEVEL = 1 |
10 | +SUBLEVEL = 2 |
11 | EXTRAVERSION = |
12 | NAME = Shy Crocodile |
13 | |
14 | diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c |
15 | index 6327fd79b0fb..fd59fef9931b 100644 |
16 | --- a/arch/powerpc/kernel/signal_32.c |
17 | +++ b/arch/powerpc/kernel/signal_32.c |
18 | @@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs, |
19 | /* If TM bits are set to the reserved value, it's an invalid context */ |
20 | if (MSR_TM_RESV(msr_hi)) |
21 | return 1; |
22 | - /* Pull in the MSR TM bits from the user context */ |
23 | + |
24 | + /* |
25 | + * Disabling preemption, since it is unsafe to be preempted |
26 | + * with MSR[TS] set without recheckpointing. |
27 | + */ |
28 | + preempt_disable(); |
29 | + |
30 | + /* |
31 | + * CAUTION: |
32 | + * After regs->MSR[TS] being updated, make sure that get_user(), |
33 | + * put_user() or similar functions are *not* called. These |
34 | + * functions can generate page faults which will cause the process |
35 | + * to be de-scheduled with MSR[TS] set but without calling |
36 | + * tm_recheckpoint(). This can cause a bug. |
37 | + * |
38 | + * Pull in the MSR TM bits from the user context |
39 | + */ |
40 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); |
41 | /* Now, recheckpoint. This loads up all of the checkpointed (older) |
42 | * registers, including FP and V[S]Rs. After recheckpointing, the |
43 | @@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, |
44 | } |
45 | #endif |
46 | |
47 | + preempt_enable(); |
48 | + |
49 | return 0; |
50 | } |
51 | #endif |
52 | @@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn) |
53 | { |
54 | struct rt_sigframe __user *rt_sf; |
55 | struct pt_regs *regs = current_pt_regs(); |
56 | - int tm_restore = 0; |
57 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
58 | struct ucontext __user *uc_transact; |
59 | unsigned long msr_hi; |
60 | unsigned long tmp; |
61 | + int tm_restore = 0; |
62 | #endif |
63 | /* Always make any pending restarted system calls return -EINTR */ |
64 | current->restart_block.fn = do_no_restart_syscall; |
65 | @@ -1192,19 +1210,11 @@ SYSCALL_DEFINE0(rt_sigreturn) |
66 | goto bad; |
67 | } |
68 | } |
69 | - if (!tm_restore) { |
70 | - /* |
71 | - * Unset regs->msr because ucontext MSR TS is not |
72 | - * set, and recheckpoint was not called. This avoid |
73 | - * hitting a TM Bad thing at RFID |
74 | - */ |
75 | - regs->msr &= ~MSR_TS_MASK; |
76 | - } |
77 | - /* Fall through, for non-TM restore */ |
78 | -#endif |
79 | if (!tm_restore) |
80 | - if (do_setcontext(&rt_sf->uc, regs, 1)) |
81 | - goto bad; |
82 | + /* Fall through, for non-TM restore */ |
83 | +#endif |
84 | + if (do_setcontext(&rt_sf->uc, regs, 1)) |
85 | + goto bad; |
86 | |
87 | /* |
88 | * It's not clear whether or why it is desirable to save the |
89 | diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
90 | index daa28cb72272..bbd1c73243d7 100644 |
91 | --- a/arch/powerpc/kernel/signal_64.c |
92 | +++ b/arch/powerpc/kernel/signal_64.c |
93 | @@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, |
94 | if (MSR_TM_RESV(msr)) |
95 | return -EINVAL; |
96 | |
97 | - /* pull in MSR TS bits from user context */ |
98 | - regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
99 | - |
100 | - /* |
101 | - * Ensure that TM is enabled in regs->msr before we leave the signal |
102 | - * handler. It could be the case that (a) user disabled the TM bit |
103 | - * through the manipulation of the MSR bits in uc_mcontext or (b) the |
104 | - * TM bit was disabled because a sufficient number of context switches |
105 | - * happened whilst in the signal handler and load_tm overflowed, |
106 | - * disabling the TM bit. In either case we can end up with an illegal |
107 | - * TM state leading to a TM Bad Thing when we return to userspace. |
108 | - */ |
109 | - regs->msr |= MSR_TM; |
110 | - |
111 | /* pull in MSR LE from user context */ |
112 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
113 | |
114 | @@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, |
115 | tm_enable(); |
116 | /* Make sure the transaction is marked as failed */ |
117 | tsk->thread.tm_texasr |= TEXASR_FS; |
118 | + |
119 | + /* |
120 | + * Disabling preemption, since it is unsafe to be preempted |
121 | + * with MSR[TS] set without recheckpointing. |
122 | + */ |
123 | + preempt_disable(); |
124 | + |
125 | + /* pull in MSR TS bits from user context */ |
126 | + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
127 | + |
128 | + /* |
129 | + * Ensure that TM is enabled in regs->msr before we leave the signal |
130 | + * handler. It could be the case that (a) user disabled the TM bit |
131 | + * through the manipulation of the MSR bits in uc_mcontext or (b) the |
132 | + * TM bit was disabled because a sufficient number of context switches |
133 | + * happened whilst in the signal handler and load_tm overflowed, |
134 | + * disabling the TM bit. In either case we can end up with an illegal |
135 | + * TM state leading to a TM Bad Thing when we return to userspace. |
136 | + * |
137 | + * CAUTION: |
138 | + * After regs->MSR[TS] being updated, make sure that get_user(), |
139 | + * put_user() or similar functions are *not* called. These |
140 | + * functions can generate page faults which will cause the process |
141 | + * to be de-scheduled with MSR[TS] set but without calling |
142 | + * tm_recheckpoint(). This can cause a bug. |
143 | + */ |
144 | + regs->msr |= MSR_TM; |
145 | + |
146 | /* This loads the checkpointed FP/VEC state, if used */ |
147 | tm_recheckpoint(&tsk->thread); |
148 | |
149 | @@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, |
150 | regs->msr |= MSR_VEC; |
151 | } |
152 | |
153 | + preempt_enable(); |
154 | + |
155 | return err; |
156 | } |
157 | #endif |
158 | @@ -740,23 +756,11 @@ SYSCALL_DEFINE0(rt_sigreturn) |
159 | &uc_transact->uc_mcontext)) |
160 | goto badframe; |
161 | } |
162 | -#endif |
163 | + else |
164 | /* Fall through, for non-TM restore */ |
165 | - if (!MSR_TM_ACTIVE(msr)) { |
166 | - /* |
167 | - * Unset MSR[TS] on the thread regs since MSR from user |
168 | - * context does not have MSR active, and recheckpoint was |
169 | - * not called since restore_tm_sigcontexts() was not called |
170 | - * also. |
171 | - * |
172 | - * If not unsetting it, the code can RFID to userspace with |
173 | - * MSR[TS] set, but without CPU in the proper state, |
174 | - * causing a TM bad thing. |
175 | - */ |
176 | - current->thread.regs->msr &= ~MSR_TS_MASK; |
177 | - if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) |
178 | - goto badframe; |
179 | - } |
180 | +#endif |
181 | + if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) |
182 | + goto badframe; |
183 | |
184 | if (restore_altstack(&uc->uc_stack)) |
185 | goto badframe; |
186 | diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c |
187 | index f5bbd4563342..3632de52db0a 100644 |
188 | --- a/arch/powerpc/platforms/4xx/ocm.c |
189 | +++ b/arch/powerpc/platforms/4xx/ocm.c |
190 | @@ -179,7 +179,7 @@ static void __init ocm_init_node(int count, struct device_node *node) |
191 | /* ioremap the non-cached region */ |
192 | if (ocm->nc.memtotal) { |
193 | ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal, |
194 | - _PAGE_EXEC | PAGE_KERNEL_NCG); |
195 | + _PAGE_EXEC | pgprot_val(PAGE_KERNEL_NCG)); |
196 | |
197 | if (!ocm->nc.virt) { |
198 | printk(KERN_ERR |
199 | @@ -194,7 +194,7 @@ static void __init ocm_init_node(int count, struct device_node *node) |
200 | |
201 | if (ocm->c.memtotal) { |
202 | ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal, |
203 | - _PAGE_EXEC | PAGE_KERNEL); |
204 | + _PAGE_EXEC | pgprot_val(PAGE_KERNEL)); |
205 | |
206 | if (!ocm->c.virt) { |
207 | printk(KERN_ERR |
208 | diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c |
209 | index 29bfe8017a2d..da1de190a3b1 100644 |
210 | --- a/block/blk-mq-sched.c |
211 | +++ b/block/blk-mq-sched.c |
212 | @@ -54,13 +54,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) |
213 | * Mark a hardware queue as needing a restart. For shared queues, maintain |
214 | * a count of how many hardware queues are marked for restart. |
215 | */ |
216 | -static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
217 | +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
218 | { |
219 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) |
220 | return; |
221 | |
222 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
223 | } |
224 | +EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
225 | |
226 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
227 | { |
228 | diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h |
229 | index 8a9544203173..38e06e23821f 100644 |
230 | --- a/block/blk-mq-sched.h |
231 | +++ b/block/blk-mq-sched.h |
232 | @@ -15,6 +15,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
233 | struct request **merged_request); |
234 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); |
235 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); |
236 | +void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); |
237 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); |
238 | |
239 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
240 | diff --git a/block/blk-stat.h b/block/blk-stat.h |
241 | index f4a1568e81a4..17b47a86eefb 100644 |
242 | --- a/block/blk-stat.h |
243 | +++ b/block/blk-stat.h |
244 | @@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb, |
245 | mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs)); |
246 | } |
247 | |
248 | +static inline void blk_stat_deactivate(struct blk_stat_callback *cb) |
249 | +{ |
250 | + del_timer_sync(&cb->timer); |
251 | +} |
252 | + |
253 | /** |
254 | * blk_stat_activate_msecs() - Gather block statistics during a time window in |
255 | * milliseconds. |
256 | diff --git a/block/blk-wbt.c b/block/blk-wbt.c |
257 | index 8ac93fcbaa2e..0c62bf4eca75 100644 |
258 | --- a/block/blk-wbt.c |
259 | +++ b/block/blk-wbt.c |
260 | @@ -760,8 +760,10 @@ void wbt_disable_default(struct request_queue *q) |
261 | if (!rqos) |
262 | return; |
263 | rwb = RQWB(rqos); |
264 | - if (rwb->enable_state == WBT_STATE_ON_DEFAULT) |
265 | + if (rwb->enable_state == WBT_STATE_ON_DEFAULT) { |
266 | + blk_stat_deactivate(rwb->cb); |
267 | rwb->wb_normal = 0; |
268 | + } |
269 | } |
270 | EXPORT_SYMBOL_GPL(wbt_disable_default); |
271 | |
272 | diff --git a/block/mq-deadline.c b/block/mq-deadline.c |
273 | index 099a9e05854c..d5e21ce44d2c 100644 |
274 | --- a/block/mq-deadline.c |
275 | +++ b/block/mq-deadline.c |
276 | @@ -373,9 +373,16 @@ done: |
277 | |
278 | /* |
279 | * One confusing aspect here is that we get called for a specific |
280 | - * hardware queue, but we return a request that may not be for a |
281 | + * hardware queue, but we may return a request that is for a |
282 | * different hardware queue. This is because mq-deadline has shared |
283 | * state for all hardware queues, in terms of sorting, FIFOs, etc. |
284 | + * |
285 | + * For a zoned block device, __dd_dispatch_request() may return NULL |
286 | + * if all the queued write requests are directed at zones that are already |
287 | + * locked due to on-going write requests. In this case, make sure to mark |
288 | + * the queue as needing a restart to ensure that the queue is run again |
289 | + * and the pending writes dispatched once the target zones for the ongoing |
290 | + * write requests are unlocked in dd_finish_request(). |
291 | */ |
292 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
293 | { |
294 | @@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
295 | |
296 | spin_lock(&dd->lock); |
297 | rq = __dd_dispatch_request(dd); |
298 | + if (!rq && blk_queue_is_zoned(hctx->queue) && |
299 | + !list_empty(&dd->fifo_list[WRITE])) |
300 | + blk_mq_sched_mark_restart_hctx(hctx); |
301 | spin_unlock(&dd->lock); |
302 | |
303 | return rq; |
304 | diff --git a/drivers/base/dd.c b/drivers/base/dd.c |
305 | index 169412ee4ae8..dbba123e058d 100644 |
306 | --- a/drivers/base/dd.c |
307 | +++ b/drivers/base/dd.c |
308 | @@ -933,11 +933,11 @@ static void __device_release_driver(struct device *dev, struct device *parent) |
309 | |
310 | while (device_links_busy(dev)) { |
311 | device_unlock(dev); |
312 | - if (parent) |
313 | + if (parent && dev->bus->need_parent_lock) |
314 | device_unlock(parent); |
315 | |
316 | device_links_unbind_consumers(dev); |
317 | - if (parent) |
318 | + if (parent && dev->bus->need_parent_lock) |
319 | device_lock(parent); |
320 | |
321 | device_lock(dev); |
322 | diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c |
323 | index 4879595200e1..8e6a0db6555f 100644 |
324 | --- a/drivers/block/zram/zram_drv.c |
325 | +++ b/drivers/block/zram/zram_drv.c |
326 | @@ -382,8 +382,10 @@ static ssize_t backing_dev_store(struct device *dev, |
327 | |
328 | bdev = bdgrab(I_BDEV(inode)); |
329 | err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
330 | - if (err < 0) |
331 | + if (err < 0) { |
332 | + bdev = NULL; |
333 | goto out; |
334 | + } |
335 | |
336 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
337 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); |
338 | diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c |
339 | index 99e2aace8078..2c1f459c0c63 100644 |
340 | --- a/drivers/dax/pmem.c |
341 | +++ b/drivers/dax/pmem.c |
342 | @@ -48,9 +48,8 @@ static void dax_pmem_percpu_exit(void *data) |
343 | percpu_ref_exit(ref); |
344 | } |
345 | |
346 | -static void dax_pmem_percpu_kill(void *data) |
347 | +static void dax_pmem_percpu_kill(struct percpu_ref *ref) |
348 | { |
349 | - struct percpu_ref *ref = data; |
350 | struct dax_pmem *dax_pmem = to_dax_pmem(ref); |
351 | |
352 | dev_dbg(dax_pmem->dev, "trace\n"); |
353 | @@ -112,17 +111,10 @@ static int dax_pmem_probe(struct device *dev) |
354 | } |
355 | |
356 | dax_pmem->pgmap.ref = &dax_pmem->ref; |
357 | + dax_pmem->pgmap.kill = dax_pmem_percpu_kill; |
358 | addr = devm_memremap_pages(dev, &dax_pmem->pgmap); |
359 | - if (IS_ERR(addr)) { |
360 | - devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref); |
361 | - percpu_ref_exit(&dax_pmem->ref); |
362 | + if (IS_ERR(addr)) |
363 | return PTR_ERR(addr); |
364 | - } |
365 | - |
366 | - rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill, |
367 | - &dax_pmem->ref); |
368 | - if (rc) |
369 | - return rc; |
370 | |
371 | /* adjust the dax_region resource to the start of data */ |
372 | memcpy(&res, &dax_pmem->pgmap.res, sizeof(res)); |
373 | diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
374 | index db1bf7f88c1f..e0e6d66de745 100644 |
375 | --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c |
376 | +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
377 | @@ -1262,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm) |
378 | static void |
379 | nv50_mstm_init(struct nv50_mstm *mstm) |
380 | { |
381 | - if (mstm && mstm->mgr.mst_state) |
382 | - drm_dp_mst_topology_mgr_resume(&mstm->mgr); |
383 | + int ret; |
384 | + |
385 | + if (!mstm || !mstm->mgr.mst_state) |
386 | + return; |
387 | + |
388 | + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); |
389 | + if (ret == -1) { |
390 | + drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); |
391 | + drm_kms_helper_hotplug_event(mstm->mgr.dev); |
392 | + } |
393 | } |
394 | |
395 | static void |
396 | diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c |
397 | index 79d00d861a31..01ff3c858875 100644 |
398 | --- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c |
399 | +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c |
400 | @@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all); |
401 | int rockchip_drm_psr_register(struct drm_encoder *encoder, |
402 | int (*psr_set)(struct drm_encoder *, bool enable)) |
403 | { |
404 | - struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; |
405 | + struct rockchip_drm_private *drm_drv; |
406 | struct psr_drv *psr; |
407 | |
408 | if (!encoder || !psr_set) |
409 | return -EINVAL; |
410 | |
411 | + drm_drv = encoder->dev->dev_private; |
412 | + |
413 | psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL); |
414 | if (!psr) |
415 | return -ENOMEM; |
416 | diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c |
417 | index c6635f23918a..ae6254b0b1ae 100644 |
418 | --- a/drivers/gpu/drm/vc4/vc4_plane.c |
419 | +++ b/drivers/gpu/drm/vc4/vc4_plane.c |
420 | @@ -321,6 +321,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) |
421 | if (vc4_state->is_unity) |
422 | vc4_state->x_scaling[0] = VC4_SCALING_PPF; |
423 | } else { |
424 | + vc4_state->is_yuv = false; |
425 | vc4_state->x_scaling[1] = VC4_SCALING_NONE; |
426 | vc4_state->y_scaling[1] = VC4_SCALING_NONE; |
427 | } |
428 | diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c |
429 | index d293e55553bd..ba7aaf421f36 100644 |
430 | --- a/drivers/hwtracing/intel_th/msu.c |
431 | +++ b/drivers/hwtracing/intel_th/msu.c |
432 | @@ -1423,7 +1423,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, |
433 | if (!end) |
434 | break; |
435 | |
436 | - len -= end - p; |
437 | + /* consume the number and the following comma, hence +1 */ |
438 | + len -= end - p + 1; |
439 | p = end + 1; |
440 | } while (len); |
441 | |
442 | diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c |
443 | index 0910ec807187..4b9e44b227d8 100644 |
444 | --- a/drivers/hwtracing/stm/policy.c |
445 | +++ b/drivers/hwtracing/stm/policy.c |
446 | @@ -440,10 +440,8 @@ stp_policy_make(struct config_group *group, const char *name) |
447 | |
448 | stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL); |
449 | if (!stm->policy) { |
450 | - mutex_unlock(&stm->policy_mutex); |
451 | - stm_put_protocol(pdrv); |
452 | - stm_put_device(stm); |
453 | - return ERR_PTR(-ENOMEM); |
454 | + ret = ERR_PTR(-ENOMEM); |
455 | + goto unlock_policy; |
456 | } |
457 | |
458 | config_group_init_type_name(&stm->policy->group, name, |
459 | @@ -458,7 +456,11 @@ unlock_policy: |
460 | mutex_unlock(&stm->policy_mutex); |
461 | |
462 | if (IS_ERR(ret)) { |
463 | - stm_put_protocol(stm->pdrv); |
464 | + /* |
465 | + * pdrv and stm->pdrv at this point can be quite different, |
466 | + * and only one of them needs to be 'put' |
467 | + */ |
468 | + stm_put_protocol(pdrv); |
469 | stm_put_device(stm); |
470 | } |
471 | |
472 | diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c |
473 | index f9af6b082916..6a866cc187f7 100644 |
474 | --- a/drivers/iio/adc/qcom-spmi-adc5.c |
475 | +++ b/drivers/iio/adc/qcom-spmi-adc5.c |
476 | @@ -423,6 +423,7 @@ struct adc5_channels { |
477 | enum vadc_scale_fn_type scale_fn_type; |
478 | }; |
479 | |
480 | +/* In these definitions, _pre refers to an index into adc5_prescale_ratios. */ |
481 | #define ADC5_CHAN(_dname, _type, _mask, _pre, _scale) \ |
482 | { \ |
483 | .datasheet_name = _dname, \ |
484 | @@ -443,63 +444,63 @@ struct adc5_channels { |
485 | _pre, _scale) \ |
486 | |
487 | static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = { |
488 | - [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1, |
489 | + [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0, |
490 | SCALE_HW_CALIB_DEFAULT) |
491 | - [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1, |
492 | + [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0, |
493 | SCALE_HW_CALIB_DEFAULT) |
494 | - [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3, |
495 | + [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1, |
496 | SCALE_HW_CALIB_DEFAULT) |
497 | - [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3, |
498 | + [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1, |
499 | SCALE_HW_CALIB_DEFAULT) |
500 | - [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1, |
501 | + [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0, |
502 | SCALE_HW_CALIB_PMIC_THERM) |
503 | - [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 1, |
504 | + [ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 0, |
505 | SCALE_HW_CALIB_DEFAULT) |
506 | - [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 16, |
507 | + [ADC5_USB_IN_V_16] = ADC5_CHAN_VOLT("usb_in_v_div_16", 8, |
508 | SCALE_HW_CALIB_DEFAULT) |
509 | - [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 1, |
510 | + [ADC5_CHG_TEMP] = ADC5_CHAN_TEMP("chg_temp", 0, |
511 | SCALE_HW_CALIB_PM5_CHG_TEMP) |
512 | /* Charger prescales SBUx and MID_CHG to fit within 1.8V upper unit */ |
513 | - [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 3, |
514 | + [ADC5_SBUx] = ADC5_CHAN_VOLT("chg_sbux", 1, |
515 | SCALE_HW_CALIB_DEFAULT) |
516 | - [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 6, |
517 | + [ADC5_MID_CHG_DIV6] = ADC5_CHAN_VOLT("chg_mid_chg", 3, |
518 | SCALE_HW_CALIB_DEFAULT) |
519 | - [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 1, |
520 | + [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 0, |
521 | SCALE_HW_CALIB_XOTHERM) |
522 | - [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1, |
523 | + [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0, |
524 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
525 | - [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1, |
526 | + [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0, |
527 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
528 | - [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1, |
529 | + [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0, |
530 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
531 | - [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 1, |
532 | + [ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 0, |
533 | SCALE_HW_CALIB_PM5_SMB_TEMP) |
534 | }; |
535 | |
536 | static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = { |
537 | - [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 1, |
538 | + [ADC5_REF_GND] = ADC5_CHAN_VOLT("ref_gnd", 0, |
539 | SCALE_HW_CALIB_DEFAULT) |
540 | - [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 1, |
541 | + [ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0, |
542 | SCALE_HW_CALIB_DEFAULT) |
543 | - [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 3, |
544 | + [ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1, |
545 | SCALE_HW_CALIB_DEFAULT) |
546 | - [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 3, |
547 | + [ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1, |
548 | SCALE_HW_CALIB_DEFAULT) |
549 | - [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 3, |
550 | + [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 1, |
551 | SCALE_HW_CALIB_DEFAULT) |
552 | - [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 1, |
553 | + [ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0, |
554 | SCALE_HW_CALIB_PMIC_THERM) |
555 | - [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 1, |
556 | + [ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0, |
557 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
558 | - [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 1, |
559 | + [ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0, |
560 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
561 | - [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 1, |
562 | + [ADC5_AMUX_THM3_100K_PU] = ADC5_CHAN_TEMP("amux_thm3_100k_pu", 0, |
563 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
564 | - [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 1, |
565 | + [ADC5_AMUX_THM4_100K_PU] = ADC5_CHAN_TEMP("amux_thm4_100k_pu", 0, |
566 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
567 | - [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 1, |
568 | + [ADC5_AMUX_THM5_100K_PU] = ADC5_CHAN_TEMP("amux_thm5_100k_pu", 0, |
569 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
570 | - [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 1, |
571 | + [ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm_100k_pu", 0, |
572 | SCALE_HW_CALIB_THERM_100K_PULLUP) |
573 | }; |
574 | |
575 | @@ -558,6 +559,9 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc, |
576 | return ret; |
577 | } |
578 | prop->prescale = ret; |
579 | + } else { |
580 | + prop->prescale = |
581 | + adc->data->adc_chans[prop->channel].prescale_index; |
582 | } |
583 | |
584 | ret = of_property_read_u32(node, "qcom,hw-settle-time", &value); |
585 | diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c |
586 | index 0e134b13967a..eae740fceed9 100644 |
587 | --- a/drivers/iio/dac/ad5686.c |
588 | +++ b/drivers/iio/dac/ad5686.c |
589 | @@ -124,7 +124,8 @@ static int ad5686_read_raw(struct iio_dev *indio_dev, |
590 | mutex_unlock(&indio_dev->mlock); |
591 | if (ret < 0) |
592 | return ret; |
593 | - *val = ret; |
594 | + *val = (ret >> chan->scan_type.shift) & |
595 | + GENMASK(chan->scan_type.realbits - 1, 0); |
596 | return IIO_VAL_INT; |
597 | case IIO_CHAN_INFO_SCALE: |
598 | *val = st->vref_mv; |
599 | diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c |
600 | index ba668d49c751..476abc74178e 100644 |
601 | --- a/drivers/infiniband/core/iwcm.c |
602 | +++ b/drivers/infiniband/core/iwcm.c |
603 | @@ -502,17 +502,21 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr, |
604 | */ |
605 | static int iw_cm_map(struct iw_cm_id *cm_id, bool active) |
606 | { |
607 | + const char *devname = dev_name(&cm_id->device->dev); |
608 | + const char *ifname = cm_id->device->iwcm->ifname; |
609 | struct iwpm_dev_data pm_reg_msg; |
610 | struct iwpm_sa_data pm_msg; |
611 | int status; |
612 | |
613 | + if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) || |
614 | + strlen(ifname) >= sizeof(pm_reg_msg.if_name)) |
615 | + return -EINVAL; |
616 | + |
617 | cm_id->m_local_addr = cm_id->local_addr; |
618 | cm_id->m_remote_addr = cm_id->remote_addr; |
619 | |
620 | - memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev), |
621 | - sizeof(pm_reg_msg.dev_name)); |
622 | - memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname, |
623 | - sizeof(pm_reg_msg.if_name)); |
624 | + strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name)); |
625 | + strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name)); |
626 | |
627 | if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) || |
628 | !iwpm_valid_pid()) |
629 | diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c |
630 | index c962160292f4..f0438bc6df88 100644 |
631 | --- a/drivers/infiniband/sw/rxe/rxe_resp.c |
632 | +++ b/drivers/infiniband/sw/rxe/rxe_resp.c |
633 | @@ -844,11 +844,16 @@ static enum resp_states do_complete(struct rxe_qp *qp, |
634 | |
635 | memset(&cqe, 0, sizeof(cqe)); |
636 | |
637 | - wc->wr_id = wqe->wr_id; |
638 | - wc->status = qp->resp.status; |
639 | - wc->qp = &qp->ibqp; |
640 | + if (qp->rcq->is_user) { |
641 | + uwc->status = qp->resp.status; |
642 | + uwc->qp_num = qp->ibqp.qp_num; |
643 | + uwc->wr_id = wqe->wr_id; |
644 | + } else { |
645 | + wc->status = qp->resp.status; |
646 | + wc->qp = &qp->ibqp; |
647 | + wc->wr_id = wqe->wr_id; |
648 | + } |
649 | |
650 | - /* fields after status are not required for errors */ |
651 | if (wc->status == IB_WC_SUCCESS) { |
652 | wc->opcode = (pkt->mask & RXE_IMMDT_MASK && |
653 | pkt->mask & RXE_WRITE_MASK) ? |
654 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
655 | index 2357aa727dcf..96c767324575 100644 |
656 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
657 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
658 | @@ -2010,6 +2010,14 @@ static void srpt_free_ch(struct kref *kref) |
659 | kfree_rcu(ch, rcu); |
660 | } |
661 | |
662 | +/* |
663 | + * Shut down the SCSI target session, tell the connection manager to |
664 | + * disconnect the associated RDMA channel, transition the QP to the error |
665 | + * state and remove the channel from the channel list. This function is |
666 | + * typically called from inside srpt_zerolength_write_done(). Concurrent |
667 | + * srpt_zerolength_write() calls from inside srpt_close_ch() are possible |
668 | + * as long as the channel is on sport->nexus_list. |
669 | + */ |
670 | static void srpt_release_channel_work(struct work_struct *w) |
671 | { |
672 | struct srpt_rdma_ch *ch; |
673 | @@ -2037,6 +2045,11 @@ static void srpt_release_channel_work(struct work_struct *w) |
674 | else |
675 | ib_destroy_cm_id(ch->ib_cm.cm_id); |
676 | |
677 | + sport = ch->sport; |
678 | + mutex_lock(&sport->mutex); |
679 | + list_del_rcu(&ch->list); |
680 | + mutex_unlock(&sport->mutex); |
681 | + |
682 | srpt_destroy_ch_ib(ch); |
683 | |
684 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, |
685 | @@ -2047,11 +2060,6 @@ static void srpt_release_channel_work(struct work_struct *w) |
686 | sdev, ch->rq_size, |
687 | srp_max_req_size, DMA_FROM_DEVICE); |
688 | |
689 | - sport = ch->sport; |
690 | - mutex_lock(&sport->mutex); |
691 | - list_del_rcu(&ch->list); |
692 | - mutex_unlock(&sport->mutex); |
693 | - |
694 | wake_up(&sport->ch_releaseQ); |
695 | |
696 | kref_put(&ch->kref, srpt_free_ch); |
697 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
698 | index 41a4b8808802..f3afab82f3ee 100644 |
699 | --- a/drivers/iommu/intel-iommu.c |
700 | +++ b/drivers/iommu/intel-iommu.c |
701 | @@ -2044,7 +2044,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, |
702 | * than default. Unnecessary for PT mode. |
703 | */ |
704 | if (translation != CONTEXT_TT_PASS_THROUGH) { |
705 | - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { |
706 | + for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { |
707 | ret = -ENOMEM; |
708 | pgd = phys_to_virt(dma_pte_addr(pgd)); |
709 | if (!dma_pte_present(pgd)) |
710 | @@ -2058,7 +2058,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, |
711 | translation = CONTEXT_TT_MULTI_LEVEL; |
712 | |
713 | context_set_address_root(context, virt_to_phys(pgd)); |
714 | - context_set_address_width(context, iommu->agaw); |
715 | + context_set_address_width(context, agaw); |
716 | } else { |
717 | /* |
718 | * In pass through mode, AW must be programmed to |
719 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
720 | index 9038c302d5c2..44f180e47622 100644 |
721 | --- a/drivers/md/dm-table.c |
722 | +++ b/drivers/md/dm-table.c |
723 | @@ -1927,6 +1927,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
724 | */ |
725 | if (blk_queue_is_zoned(q)) |
726 | blk_revalidate_disk_zones(t->md->disk); |
727 | + |
728 | + /* Allow reads to exceed readahead limits */ |
729 | + q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); |
730 | } |
731 | |
732 | unsigned int dm_table_get_num_targets(struct dm_table *t) |
733 | diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c |
734 | index 39804d830305..fd5c52b21436 100644 |
735 | --- a/drivers/media/pci/cx23885/cx23885-core.c |
736 | +++ b/drivers/media/pci/cx23885/cx23885-core.c |
737 | @@ -23,6 +23,7 @@ |
738 | #include <linux/moduleparam.h> |
739 | #include <linux/kmod.h> |
740 | #include <linux/kernel.h> |
741 | +#include <linux/pci.h> |
742 | #include <linux/slab.h> |
743 | #include <linux/interrupt.h> |
744 | #include <linux/delay.h> |
745 | @@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); |
746 | MODULE_LICENSE("GPL"); |
747 | MODULE_VERSION(CX23885_VERSION); |
748 | |
749 | +/* |
750 | + * Some platforms have been found to require periodic resetting of the DMA |
751 | + * engine. Ryzen and XEON platforms are known to be affected. The symptom |
752 | + * encountered is "mpeg risc op code error". Only Ryzen platforms employ |
753 | + * this workaround if the option equals 1. The workaround can be explicitly |
754 | + * disabled for all platforms by setting to 0, the workaround can be forced |
755 | + * on for any platform by setting to 2. |
756 | + */ |
757 | +static unsigned int dma_reset_workaround = 1; |
758 | +module_param(dma_reset_workaround, int, 0644); |
759 | +MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable"); |
760 | + |
761 | static unsigned int debug; |
762 | module_param(debug, int, 0644); |
763 | MODULE_PARM_DESC(debug, "enable debug messages"); |
764 | @@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port, |
765 | |
766 | static void cx23885_clear_bridge_error(struct cx23885_dev *dev) |
767 | { |
768 | - uint32_t reg1_val = cx_read(TC_REQ); /* read-only */ |
769 | - uint32_t reg2_val = cx_read(TC_REQ_SET); |
770 | + uint32_t reg1_val, reg2_val; |
771 | + |
772 | + if (!dev->need_dma_reset) |
773 | + return; |
774 | + |
775 | + reg1_val = cx_read(TC_REQ); /* read-only */ |
776 | + reg2_val = cx_read(TC_REQ_SET); |
777 | |
778 | if (reg1_val && reg2_val) { |
779 | cx_write(TC_REQ, reg1_val); |
780 | @@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput) |
781 | /* TODO: 23-19 */ |
782 | } |
783 | |
784 | +static struct { |
785 | + int vendor, dev; |
786 | +} const broken_dev_id[] = { |
787 | + /* According with |
788 | + * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci, |
789 | + * 0x1451 is PCI ID for the IOMMU found on Ryzen |
790 | + */ |
791 | + { PCI_VENDOR_ID_AMD, 0x1451 }, |
792 | +}; |
793 | + |
794 | +static bool cx23885_does_need_dma_reset(void) |
795 | +{ |
796 | + int i; |
797 | + struct pci_dev *pdev = NULL; |
798 | + |
799 | + if (dma_reset_workaround == 0) |
800 | + return false; |
801 | + else if (dma_reset_workaround == 2) |
802 | + return true; |
803 | + |
804 | + for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) { |
805 | + pdev = pci_get_device(broken_dev_id[i].vendor, |
806 | + broken_dev_id[i].dev, NULL); |
807 | + if (pdev) { |
808 | + pci_dev_put(pdev); |
809 | + return true; |
810 | + } |
811 | + } |
812 | + return false; |
813 | +} |
814 | + |
815 | static int cx23885_initdev(struct pci_dev *pci_dev, |
816 | const struct pci_device_id *pci_id) |
817 | { |
818 | @@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev, |
819 | if (NULL == dev) |
820 | return -ENOMEM; |
821 | |
822 | + dev->need_dma_reset = cx23885_does_need_dma_reset(); |
823 | + |
824 | err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); |
825 | if (err < 0) |
826 | goto fail_free; |
827 | diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h |
828 | index d54c7ee1ab21..cf965efabe66 100644 |
829 | --- a/drivers/media/pci/cx23885/cx23885.h |
830 | +++ b/drivers/media/pci/cx23885/cx23885.h |
831 | @@ -451,6 +451,8 @@ struct cx23885_dev { |
832 | /* Analog raw audio */ |
833 | struct cx23885_audio_dev *audio_dev; |
834 | |
835 | + /* Does the system require periodic DMA resets? */ |
836 | + unsigned int need_dma_reset:1; |
837 | }; |
838 | |
839 | static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev) |
840 | diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c |
841 | index 3fcb9a2fe1c9..efe2fb72d54b 100644 |
842 | --- a/drivers/misc/genwqe/card_utils.c |
843 | +++ b/drivers/misc/genwqe/card_utils.c |
844 | @@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) |
845 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, |
846 | dma_addr_t *dma_handle) |
847 | { |
848 | - if (get_order(size) > MAX_ORDER) |
849 | + if (get_order(size) >= MAX_ORDER) |
850 | return NULL; |
851 | |
852 | return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, |
853 | diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c |
854 | index 85f2ca989565..ef3ffa5ad466 100644 |
855 | --- a/drivers/net/wireless/broadcom/b43/phy_common.c |
856 | +++ b/drivers/net/wireless/broadcom/b43/phy_common.c |
857 | @@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta) |
858 | u8 i; |
859 | s32 tmp; |
860 | s8 signx = 1; |
861 | - u32 angle = 0; |
862 | + s32 angle = 0; |
863 | struct b43_c32 ret = { .i = 39797, .q = 0, }; |
864 | |
865 | while (theta > (180 << 16)) |
866 | diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c |
867 | index 0e39e3d1846f..d28418b05a04 100644 |
868 | --- a/drivers/nvdimm/pmem.c |
869 | +++ b/drivers/nvdimm/pmem.c |
870 | @@ -309,8 +309,11 @@ static void pmem_release_queue(void *q) |
871 | blk_cleanup_queue(q); |
872 | } |
873 | |
874 | -static void pmem_freeze_queue(void *q) |
875 | +static void pmem_freeze_queue(struct percpu_ref *ref) |
876 | { |
877 | + struct request_queue *q; |
878 | + |
879 | + q = container_of(ref, typeof(*q), q_usage_counter); |
880 | blk_freeze_queue_start(q); |
881 | } |
882 | |
883 | @@ -402,6 +405,7 @@ static int pmem_attach_disk(struct device *dev, |
884 | |
885 | pmem->pfn_flags = PFN_DEV; |
886 | pmem->pgmap.ref = &q->q_usage_counter; |
887 | + pmem->pgmap.kill = pmem_freeze_queue; |
888 | if (is_nd_pfn(dev)) { |
889 | if (setup_pagemap_fsdax(dev, &pmem->pgmap)) |
890 | return -ENOMEM; |
891 | @@ -427,13 +431,6 @@ static int pmem_attach_disk(struct device *dev, |
892 | memcpy(&bb_res, &nsio->res, sizeof(bb_res)); |
893 | } |
894 | |
895 | - /* |
896 | - * At release time the queue must be frozen before |
897 | - * devm_memremap_pages is unwound |
898 | - */ |
899 | - if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) |
900 | - return -ENOMEM; |
901 | - |
902 | if (IS_ERR(addr)) |
903 | return PTR_ERR(addr); |
904 | pmem->virt_addr = addr; |
905 | diff --git a/drivers/of/base.c b/drivers/of/base.c |
906 | index 09692c9b32a7..6d20b6dcf034 100644 |
907 | --- a/drivers/of/base.c |
908 | +++ b/drivers/of/base.c |
909 | @@ -116,9 +116,6 @@ int __weak of_node_to_nid(struct device_node *np) |
910 | } |
911 | #endif |
912 | |
913 | -static struct device_node **phandle_cache; |
914 | -static u32 phandle_cache_mask; |
915 | - |
916 | /* |
917 | * Assumptions behind phandle_cache implementation: |
918 | * - phandle property values are in a contiguous range of 1..n |
919 | @@ -127,6 +124,66 @@ static u32 phandle_cache_mask; |
920 | * - the phandle lookup overhead reduction provided by the cache |
921 | * will likely be less |
922 | */ |
923 | + |
924 | +static struct device_node **phandle_cache; |
925 | +static u32 phandle_cache_mask; |
926 | + |
927 | +/* |
928 | + * Caller must hold devtree_lock. |
929 | + */ |
930 | +static void __of_free_phandle_cache(void) |
931 | +{ |
932 | + u32 cache_entries = phandle_cache_mask + 1; |
933 | + u32 k; |
934 | + |
935 | + if (!phandle_cache) |
936 | + return; |
937 | + |
938 | + for (k = 0; k < cache_entries; k++) |
939 | + of_node_put(phandle_cache[k]); |
940 | + |
941 | + kfree(phandle_cache); |
942 | + phandle_cache = NULL; |
943 | +} |
944 | + |
945 | +int of_free_phandle_cache(void) |
946 | +{ |
947 | + unsigned long flags; |
948 | + |
949 | + raw_spin_lock_irqsave(&devtree_lock, flags); |
950 | + |
951 | + __of_free_phandle_cache(); |
952 | + |
953 | + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
954 | + |
955 | + return 0; |
956 | +} |
957 | +#if !defined(CONFIG_MODULES) |
958 | +late_initcall_sync(of_free_phandle_cache); |
959 | +#endif |
960 | + |
961 | +/* |
962 | + * Caller must hold devtree_lock. |
963 | + */ |
964 | +void __of_free_phandle_cache_entry(phandle handle) |
965 | +{ |
966 | + phandle masked_handle; |
967 | + struct device_node *np; |
968 | + |
969 | + if (!handle) |
970 | + return; |
971 | + |
972 | + masked_handle = handle & phandle_cache_mask; |
973 | + |
974 | + if (phandle_cache) { |
975 | + np = phandle_cache[masked_handle]; |
976 | + if (np && handle == np->phandle) { |
977 | + of_node_put(np); |
978 | + phandle_cache[masked_handle] = NULL; |
979 | + } |
980 | + } |
981 | +} |
982 | + |
983 | void of_populate_phandle_cache(void) |
984 | { |
985 | unsigned long flags; |
986 | @@ -136,8 +193,7 @@ void of_populate_phandle_cache(void) |
987 | |
988 | raw_spin_lock_irqsave(&devtree_lock, flags); |
989 | |
990 | - kfree(phandle_cache); |
991 | - phandle_cache = NULL; |
992 | + __of_free_phandle_cache(); |
993 | |
994 | for_each_of_allnodes(np) |
995 | if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
996 | @@ -155,30 +211,15 @@ void of_populate_phandle_cache(void) |
997 | goto out; |
998 | |
999 | for_each_of_allnodes(np) |
1000 | - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
1001 | + if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { |
1002 | + of_node_get(np); |
1003 | phandle_cache[np->phandle & phandle_cache_mask] = np; |
1004 | + } |
1005 | |
1006 | out: |
1007 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
1008 | } |
1009 | |
1010 | -int of_free_phandle_cache(void) |
1011 | -{ |
1012 | - unsigned long flags; |
1013 | - |
1014 | - raw_spin_lock_irqsave(&devtree_lock, flags); |
1015 | - |
1016 | - kfree(phandle_cache); |
1017 | - phandle_cache = NULL; |
1018 | - |
1019 | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
1020 | - |
1021 | - return 0; |
1022 | -} |
1023 | -#if !defined(CONFIG_MODULES) |
1024 | -late_initcall_sync(of_free_phandle_cache); |
1025 | -#endif |
1026 | - |
1027 | void __init of_core_init(void) |
1028 | { |
1029 | struct device_node *np; |
1030 | @@ -1190,13 +1231,23 @@ struct device_node *of_find_node_by_phandle(phandle handle) |
1031 | if (phandle_cache[masked_handle] && |
1032 | handle == phandle_cache[masked_handle]->phandle) |
1033 | np = phandle_cache[masked_handle]; |
1034 | + if (np && of_node_check_flag(np, OF_DETACHED)) { |
1035 | + WARN_ON(1); /* did not uncache np on node removal */ |
1036 | + of_node_put(np); |
1037 | + phandle_cache[masked_handle] = NULL; |
1038 | + np = NULL; |
1039 | + } |
1040 | } |
1041 | |
1042 | if (!np) { |
1043 | for_each_of_allnodes(np) |
1044 | - if (np->phandle == handle) { |
1045 | - if (phandle_cache) |
1046 | + if (np->phandle == handle && |
1047 | + !of_node_check_flag(np, OF_DETACHED)) { |
1048 | + if (phandle_cache) { |
1049 | + /* will put when removed from cache */ |
1050 | + of_node_get(np); |
1051 | phandle_cache[masked_handle] = np; |
1052 | + } |
1053 | break; |
1054 | } |
1055 | } |
1056 | diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c |
1057 | index f4f8ed9b5454..ecea92f68c87 100644 |
1058 | --- a/drivers/of/dynamic.c |
1059 | +++ b/drivers/of/dynamic.c |
1060 | @@ -268,6 +268,9 @@ void __of_detach_node(struct device_node *np) |
1061 | } |
1062 | |
1063 | of_node_set_flag(np, OF_DETACHED); |
1064 | + |
1065 | + /* race with of_find_node_by_phandle() prevented by devtree_lock */ |
1066 | + __of_free_phandle_cache_entry(np->phandle); |
1067 | } |
1068 | |
1069 | /** |
1070 | diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h |
1071 | index 5d1567025358..24786818e32e 100644 |
1072 | --- a/drivers/of/of_private.h |
1073 | +++ b/drivers/of/of_private.h |
1074 | @@ -84,6 +84,10 @@ static inline void __of_detach_node_sysfs(struct device_node *np) {} |
1075 | int of_resolve_phandles(struct device_node *tree); |
1076 | #endif |
1077 | |
1078 | +#if defined(CONFIG_OF_DYNAMIC) |
1079 | +void __of_free_phandle_cache_entry(phandle handle); |
1080 | +#endif |
1081 | + |
1082 | #if defined(CONFIG_OF_OVERLAY) |
1083 | void of_overlay_mutex_lock(void); |
1084 | void of_overlay_mutex_unlock(void); |
1085 | diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c |
1086 | index ae3c5b25dcc7..a2eb25271c96 100644 |
1087 | --- a/drivers/pci/p2pdma.c |
1088 | +++ b/drivers/pci/p2pdma.c |
1089 | @@ -82,10 +82,8 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref) |
1090 | complete_all(&p2p->devmap_ref_done); |
1091 | } |
1092 | |
1093 | -static void pci_p2pdma_percpu_kill(void *data) |
1094 | +static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) |
1095 | { |
1096 | - struct percpu_ref *ref = data; |
1097 | - |
1098 | /* |
1099 | * pci_p2pdma_add_resource() may be called multiple times |
1100 | * by a driver and may register the percpu_kill devm action multiple |
1101 | @@ -198,6 +196,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, |
1102 | pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; |
1103 | pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - |
1104 | pci_resource_start(pdev, bar); |
1105 | + pgmap->kill = pci_p2pdma_percpu_kill; |
1106 | |
1107 | addr = devm_memremap_pages(&pdev->dev, pgmap); |
1108 | if (IS_ERR(addr)) { |
1109 | @@ -211,11 +210,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, |
1110 | if (error) |
1111 | goto pgmap_free; |
1112 | |
1113 | - error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill, |
1114 | - &pdev->p2pdma->devmap_ref); |
1115 | - if (error) |
1116 | - goto pgmap_free; |
1117 | - |
1118 | pci_info(pdev, "added peer-to-peer DMA memory %pR\n", |
1119 | &pgmap->res); |
1120 | |
1121 | diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c |
1122 | index bef17c3fca67..33f3f475e5c6 100644 |
1123 | --- a/drivers/pci/pci-driver.c |
1124 | +++ b/drivers/pci/pci-driver.c |
1125 | @@ -1251,30 +1251,29 @@ static int pci_pm_runtime_suspend(struct device *dev) |
1126 | return 0; |
1127 | } |
1128 | |
1129 | - if (!pm || !pm->runtime_suspend) |
1130 | - return -ENOSYS; |
1131 | - |
1132 | pci_dev->state_saved = false; |
1133 | - error = pm->runtime_suspend(dev); |
1134 | - if (error) { |
1135 | + if (pm && pm->runtime_suspend) { |
1136 | + error = pm->runtime_suspend(dev); |
1137 | /* |
1138 | * -EBUSY and -EAGAIN is used to request the runtime PM core |
1139 | * to schedule a new suspend, so log the event only with debug |
1140 | * log level. |
1141 | */ |
1142 | - if (error == -EBUSY || error == -EAGAIN) |
1143 | + if (error == -EBUSY || error == -EAGAIN) { |
1144 | dev_dbg(dev, "can't suspend now (%pf returned %d)\n", |
1145 | pm->runtime_suspend, error); |
1146 | - else |
1147 | + return error; |
1148 | + } else if (error) { |
1149 | dev_err(dev, "can't suspend (%pf returned %d)\n", |
1150 | pm->runtime_suspend, error); |
1151 | - |
1152 | - return error; |
1153 | + return error; |
1154 | + } |
1155 | } |
1156 | |
1157 | pci_fixup_device(pci_fixup_suspend, pci_dev); |
1158 | |
1159 | - if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 |
1160 | + if (pm && pm->runtime_suspend |
1161 | + && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 |
1162 | && pci_dev->current_state != PCI_UNKNOWN) { |
1163 | WARN_ONCE(pci_dev->current_state != prev, |
1164 | "PCI PM: State of device not saved by %pF\n", |
1165 | @@ -1292,7 +1291,7 @@ static int pci_pm_runtime_suspend(struct device *dev) |
1166 | |
1167 | static int pci_pm_runtime_resume(struct device *dev) |
1168 | { |
1169 | - int rc; |
1170 | + int rc = 0; |
1171 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1172 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
1173 | |
1174 | @@ -1306,14 +1305,12 @@ static int pci_pm_runtime_resume(struct device *dev) |
1175 | if (!pci_dev->driver) |
1176 | return 0; |
1177 | |
1178 | - if (!pm || !pm->runtime_resume) |
1179 | - return -ENOSYS; |
1180 | - |
1181 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
1182 | pci_enable_wake(pci_dev, PCI_D0, false); |
1183 | pci_fixup_device(pci_fixup_resume, pci_dev); |
1184 | |
1185 | - rc = pm->runtime_resume(dev); |
1186 | + if (pm && pm->runtime_resume) |
1187 | + rc = pm->runtime_resume(dev); |
1188 | |
1189 | pci_dev->runtime_d3cold = false; |
1190 | |
1191 | diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c |
1192 | index 1b10ea05a914..69372e2bc93c 100644 |
1193 | --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c |
1194 | +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c |
1195 | @@ -30,8 +30,8 @@ |
1196 | #define DDRC_FLUX_RCMD 0x38c |
1197 | #define DDRC_PRE_CMD 0x3c0 |
1198 | #define DDRC_ACT_CMD 0x3c4 |
1199 | -#define DDRC_BNK_CHG 0x3c8 |
1200 | #define DDRC_RNK_CHG 0x3cc |
1201 | +#define DDRC_RW_CHG 0x3d0 |
1202 | #define DDRC_EVENT_CTRL 0x6C0 |
1203 | #define DDRC_INT_MASK 0x6c8 |
1204 | #define DDRC_INT_STATUS 0x6cc |
1205 | @@ -51,7 +51,7 @@ |
1206 | |
1207 | static const u32 ddrc_reg_off[] = { |
1208 | DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, |
1209 | - DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG |
1210 | + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG |
1211 | }; |
1212 | |
1213 | /* |
1214 | diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c |
1215 | index 6da79ae14860..5a97e42a3547 100644 |
1216 | --- a/drivers/power/supply/olpc_battery.c |
1217 | +++ b/drivers/power/supply/olpc_battery.c |
1218 | @@ -428,14 +428,14 @@ static int olpc_bat_get_property(struct power_supply *psy, |
1219 | if (ret) |
1220 | return ret; |
1221 | |
1222 | - val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256; |
1223 | + val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256; |
1224 | break; |
1225 | case POWER_SUPPLY_PROP_TEMP_AMBIENT: |
1226 | ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2); |
1227 | if (ret) |
1228 | return ret; |
1229 | |
1230 | - val->intval = (int)be16_to_cpu(ec_word) * 100 / 256; |
1231 | + val->intval = (int)be16_to_cpu(ec_word) * 10 / 256; |
1232 | break; |
1233 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: |
1234 | ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2); |
1235 | diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c |
1236 | index 94f4d8fe85e0..d1b531fe9ada 100644 |
1237 | --- a/drivers/s390/scsi/zfcp_aux.c |
1238 | +++ b/drivers/s390/scsi/zfcp_aux.c |
1239 | @@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) |
1240 | */ |
1241 | int zfcp_status_read_refill(struct zfcp_adapter *adapter) |
1242 | { |
1243 | - while (atomic_read(&adapter->stat_miss) > 0) |
1244 | + while (atomic_add_unless(&adapter->stat_miss, -1, 0)) |
1245 | if (zfcp_fsf_status_read(adapter->qdio)) { |
1246 | + atomic_inc(&adapter->stat_miss); /* undo add -1 */ |
1247 | if (atomic_read(&adapter->stat_miss) >= |
1248 | adapter->stat_read_buf_num) { |
1249 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); |
1250 | return 1; |
1251 | } |
1252 | break; |
1253 | - } else |
1254 | - atomic_dec(&adapter->stat_miss); |
1255 | + } |
1256 | return 0; |
1257 | } |
1258 | |
1259 | diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
1260 | index b9e5cd79931a..462ed4ad21d2 100644 |
1261 | --- a/drivers/scsi/lpfc/lpfc_sli.c |
1262 | +++ b/drivers/scsi/lpfc/lpfc_sli.c |
1263 | @@ -14501,7 +14501,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, |
1264 | hw_page_size))/hw_page_size; |
1265 | |
1266 | /* If needed, Adjust page count to match the max the adapter supports */ |
1267 | - if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) |
1268 | + if (phba->sli4_hba.pc_sli4_params.wqpcnt && |
1269 | + (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) |
1270 | queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; |
1271 | |
1272 | INIT_LIST_HEAD(&queue->list); |
1273 | diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c |
1274 | index bbed039617a4..d59c8a59f582 100644 |
1275 | --- a/drivers/video/fbdev/pxafb.c |
1276 | +++ b/drivers/video/fbdev/pxafb.c |
1277 | @@ -2234,10 +2234,8 @@ static struct pxafb_mach_info *of_pxafb_of_mach_info(struct device *dev) |
1278 | if (!info) |
1279 | return ERR_PTR(-ENOMEM); |
1280 | ret = of_get_pxafb_mode_info(dev, info); |
1281 | - if (ret) { |
1282 | - kfree(info->modes); |
1283 | + if (ret) |
1284 | return ERR_PTR(ret); |
1285 | - } |
1286 | |
1287 | /* |
1288 | * On purpose, neither lccrX registers nor video memory size can be |
1289 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
1290 | index f3496db4bb3e..a58666a3f8dd 100644 |
1291 | --- a/fs/ceph/caps.c |
1292 | +++ b/fs/ceph/caps.c |
1293 | @@ -3569,7 +3569,6 @@ retry: |
1294 | tcap->cap_id = t_cap_id; |
1295 | tcap->seq = t_seq - 1; |
1296 | tcap->issue_seq = t_seq - 1; |
1297 | - tcap->mseq = t_mseq; |
1298 | tcap->issued |= issued; |
1299 | tcap->implemented |= issued; |
1300 | if (cap == ci->i_auth_cap) |
1301 | diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c |
1302 | index cc91963683de..a928ba008d7d 100644 |
1303 | --- a/fs/dlm/lock.c |
1304 | +++ b/fs/dlm/lock.c |
1305 | @@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) |
1306 | |
1307 | if (rv < 0) { |
1308 | log_error(ls, "create_lkb idr error %d", rv); |
1309 | + dlm_free_lkb(lkb); |
1310 | return rv; |
1311 | } |
1312 | |
1313 | @@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) |
1314 | (unsigned long long)lkb->lkb_recover_seq, |
1315 | ms->m_header.h_nodeid, ms->m_lkid); |
1316 | error = -ENOENT; |
1317 | + dlm_put_lkb(lkb); |
1318 | goto fail; |
1319 | } |
1320 | |
1321 | @@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) |
1322 | lkb->lkb_id, lkb->lkb_remid, |
1323 | ms->m_header.h_nodeid, ms->m_lkid); |
1324 | error = -ENOENT; |
1325 | + dlm_put_lkb(lkb); |
1326 | goto fail; |
1327 | } |
1328 | |
1329 | @@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, |
1330 | goto out; |
1331 | } |
1332 | } |
1333 | - |
1334 | - /* After ua is attached to lkb it will be freed by dlm_free_lkb(). |
1335 | - When DLM_IFL_USER is set, the dlm knows that this is a userspace |
1336 | - lock and that lkb_astparam is the dlm_user_args structure. */ |
1337 | - |
1338 | error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, |
1339 | fake_astfn, ua, fake_bastfn, &args); |
1340 | - lkb->lkb_flags |= DLM_IFL_USER; |
1341 | - |
1342 | if (error) { |
1343 | + kfree(ua->lksb.sb_lvbptr); |
1344 | + ua->lksb.sb_lvbptr = NULL; |
1345 | + kfree(ua); |
1346 | __put_lkb(ls, lkb); |
1347 | goto out; |
1348 | } |
1349 | |
1350 | + /* After ua is attached to lkb it will be freed by dlm_free_lkb(). |
1351 | + When DLM_IFL_USER is set, the dlm knows that this is a userspace |
1352 | + lock and that lkb_astparam is the dlm_user_args structure. */ |
1353 | + lkb->lkb_flags |= DLM_IFL_USER; |
1354 | error = request_lock(ls, lkb, name, namelen, &args); |
1355 | |
1356 | switch (error) { |
1357 | diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c |
1358 | index 5ba94be006ee..6a1529e478f3 100644 |
1359 | --- a/fs/dlm/lockspace.c |
1360 | +++ b/fs/dlm/lockspace.c |
1361 | @@ -680,11 +680,11 @@ static int new_lockspace(const char *name, const char *cluster, |
1362 | kfree(ls->ls_recover_buf); |
1363 | out_lkbidr: |
1364 | idr_destroy(&ls->ls_lkbidr); |
1365 | + out_rsbtbl: |
1366 | for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { |
1367 | if (ls->ls_remove_names[i]) |
1368 | kfree(ls->ls_remove_names[i]); |
1369 | } |
1370 | - out_rsbtbl: |
1371 | vfree(ls->ls_rsbtbl); |
1372 | out_lsfree: |
1373 | if (do_unreg) |
1374 | diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c |
1375 | index 648f0ca1ad57..998051c4aea7 100644 |
1376 | --- a/fs/gfs2/inode.c |
1377 | +++ b/fs/gfs2/inode.c |
1378 | @@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
1379 | the gfs2 structures. */ |
1380 | if (default_acl) { |
1381 | error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); |
1382 | + if (error) |
1383 | + goto fail_gunlock3; |
1384 | posix_acl_release(default_acl); |
1385 | + default_acl = NULL; |
1386 | } |
1387 | if (acl) { |
1388 | - if (!error) |
1389 | - error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); |
1390 | + error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); |
1391 | + if (error) |
1392 | + goto fail_gunlock3; |
1393 | posix_acl_release(acl); |
1394 | + acl = NULL; |
1395 | } |
1396 | |
1397 | - if (error) |
1398 | - goto fail_gunlock3; |
1399 | - |
1400 | error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name, |
1401 | &gfs2_initxattrs, NULL); |
1402 | if (error) |
1403 | @@ -789,10 +791,8 @@ fail_free_inode: |
1404 | } |
1405 | gfs2_rsqa_delete(ip, NULL); |
1406 | fail_free_acls: |
1407 | - if (default_acl) |
1408 | - posix_acl_release(default_acl); |
1409 | - if (acl) |
1410 | - posix_acl_release(acl); |
1411 | + posix_acl_release(default_acl); |
1412 | + posix_acl_release(acl); |
1413 | fail_gunlock: |
1414 | gfs2_dir_no_add(&da); |
1415 | gfs2_glock_dq_uninit(ghs); |
1416 | diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c |
1417 | index b08a530433ad..8d7916570362 100644 |
1418 | --- a/fs/gfs2/rgrp.c |
1419 | +++ b/fs/gfs2/rgrp.c |
1420 | @@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, |
1421 | goto next_iter; |
1422 | } |
1423 | if (ret == -E2BIG) { |
1424 | + n += rbm->bii - initial_bii; |
1425 | rbm->bii = 0; |
1426 | rbm->offset = 0; |
1427 | - n += (rbm->bii - initial_bii); |
1428 | goto res_covered_end_of_rgrp; |
1429 | } |
1430 | return ret; |
1431 | diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c |
1432 | index d20b92f271c2..0a67dd4250e9 100644 |
1433 | --- a/fs/lockd/clntproc.c |
1434 | +++ b/fs/lockd/clntproc.c |
1435 | @@ -442,7 +442,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) |
1436 | fl->fl_start = req->a_res.lock.fl.fl_start; |
1437 | fl->fl_end = req->a_res.lock.fl.fl_end; |
1438 | fl->fl_type = req->a_res.lock.fl.fl_type; |
1439 | - fl->fl_pid = 0; |
1440 | + fl->fl_pid = -req->a_res.lock.fl.fl_pid; |
1441 | break; |
1442 | default: |
1443 | status = nlm_stat_to_errno(req->a_res.status); |
1444 | diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c |
1445 | index 7147e4aebecc..9846f7e95282 100644 |
1446 | --- a/fs/lockd/xdr.c |
1447 | +++ b/fs/lockd/xdr.c |
1448 | @@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock) |
1449 | |
1450 | locks_init_lock(fl); |
1451 | fl->fl_owner = current->files; |
1452 | - fl->fl_pid = (pid_t)lock->svid; |
1453 | + fl->fl_pid = current->tgid; |
1454 | fl->fl_flags = FL_POSIX; |
1455 | fl->fl_type = F_RDLCK; /* as good as anything else */ |
1456 | start = ntohl(*p++); |
1457 | @@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p) |
1458 | memset(lock, 0, sizeof(*lock)); |
1459 | locks_init_lock(&lock->fl); |
1460 | lock->svid = ~(u32) 0; |
1461 | - lock->fl.fl_pid = (pid_t)lock->svid; |
1462 | + lock->fl.fl_pid = current->tgid; |
1463 | |
1464 | if (!(p = nlm_decode_cookie(p, &argp->cookie)) |
1465 | || !(p = xdr_decode_string_inplace(p, &lock->caller, |
1466 | diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c |
1467 | index 7ed9edf9aed4..70154f376695 100644 |
1468 | --- a/fs/lockd/xdr4.c |
1469 | +++ b/fs/lockd/xdr4.c |
1470 | @@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) |
1471 | |
1472 | locks_init_lock(fl); |
1473 | fl->fl_owner = current->files; |
1474 | - fl->fl_pid = (pid_t)lock->svid; |
1475 | + fl->fl_pid = current->tgid; |
1476 | fl->fl_flags = FL_POSIX; |
1477 | fl->fl_type = F_RDLCK; /* as good as anything else */ |
1478 | p = xdr_decode_hyper(p, &start); |
1479 | @@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p) |
1480 | memset(lock, 0, sizeof(*lock)); |
1481 | locks_init_lock(&lock->fl); |
1482 | lock->svid = ~(u32) 0; |
1483 | - lock->fl.fl_pid = (pid_t)lock->svid; |
1484 | + lock->fl.fl_pid = current->tgid; |
1485 | |
1486 | if (!(p = nlm4_decode_cookie(p, &argp->cookie)) |
1487 | || !(p = xdr_decode_string_inplace(p, &lock->caller, |
1488 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
1489 | index d505990dac7c..c364acbb6aba 100644 |
1490 | --- a/fs/nfsd/nfs4proc.c |
1491 | +++ b/fs/nfsd/nfs4proc.c |
1492 | @@ -1016,8 +1016,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
1493 | |
1494 | nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist, |
1495 | &write->wr_head, write->wr_buflen); |
1496 | - if (!nvecs) |
1497 | - return nfserr_io; |
1498 | WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec)); |
1499 | |
1500 | status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp, |
1501 | diff --git a/include/linux/hmm.h b/include/linux/hmm.h |
1502 | index c6fb869a81c0..ed89fbc525d2 100644 |
1503 | --- a/include/linux/hmm.h |
1504 | +++ b/include/linux/hmm.h |
1505 | @@ -512,8 +512,7 @@ struct hmm_devmem { |
1506 | * enough and allocate struct page for it. |
1507 | * |
1508 | * The device driver can wrap the hmm_devmem struct inside a private device |
1509 | - * driver struct. The device driver must call hmm_devmem_remove() before the |
1510 | - * device goes away and before freeing the hmm_devmem struct memory. |
1511 | + * driver struct. |
1512 | */ |
1513 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
1514 | struct device *device, |
1515 | @@ -521,7 +520,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
1516 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
1517 | struct device *device, |
1518 | struct resource *res); |
1519 | -void hmm_devmem_remove(struct hmm_devmem *devmem); |
1520 | |
1521 | /* |
1522 | * hmm_devmem_page_set_drvdata - set per-page driver data field |
1523 | diff --git a/include/linux/memremap.h b/include/linux/memremap.h |
1524 | index 0ac69ddf5fc4..55db66b3716f 100644 |
1525 | --- a/include/linux/memremap.h |
1526 | +++ b/include/linux/memremap.h |
1527 | @@ -111,6 +111,7 @@ typedef void (*dev_page_free_t)(struct page *page, void *data); |
1528 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
1529 | * @res: physical address range covered by @ref |
1530 | * @ref: reference count that pins the devm_memremap_pages() mapping |
1531 | + * @kill: callback to transition @ref to the dead state |
1532 | * @dev: host device of the mapping for debug |
1533 | * @data: private data pointer for page_free() |
1534 | * @type: memory type: see MEMORY_* in memory_hotplug.h |
1535 | @@ -122,6 +123,7 @@ struct dev_pagemap { |
1536 | bool altmap_valid; |
1537 | struct resource res; |
1538 | struct percpu_ref *ref; |
1539 | + void (*kill)(struct percpu_ref *ref); |
1540 | struct device *dev; |
1541 | void *data; |
1542 | enum memory_type type; |
1543 | diff --git a/kernel/fork.c b/kernel/fork.c |
1544 | index e2a5156bc9c3..3c16bc490583 100644 |
1545 | --- a/kernel/fork.c |
1546 | +++ b/kernel/fork.c |
1547 | @@ -1837,8 +1837,6 @@ static __latent_entropy struct task_struct *copy_process( |
1548 | |
1549 | posix_cpu_timers_init(p); |
1550 | |
1551 | - p->start_time = ktime_get_ns(); |
1552 | - p->real_start_time = ktime_get_boot_ns(); |
1553 | p->io_context = NULL; |
1554 | audit_set_context(p, NULL); |
1555 | cgroup_fork(p); |
1556 | @@ -2004,6 +2002,17 @@ static __latent_entropy struct task_struct *copy_process( |
1557 | if (retval) |
1558 | goto bad_fork_free_pid; |
1559 | |
1560 | + /* |
1561 | + * From this point on we must avoid any synchronous user-space |
1562 | + * communication until we take the tasklist-lock. In particular, we do |
1563 | + * not want user-space to be able to predict the process start-time by |
1564 | + * stalling fork(2) after we recorded the start_time but before it is |
1565 | + * visible to the system. |
1566 | + */ |
1567 | + |
1568 | + p->start_time = ktime_get_ns(); |
1569 | + p->real_start_time = ktime_get_boot_ns(); |
1570 | + |
1571 | /* |
1572 | * Make it visible to the rest of the system, but dont wake it up yet. |
1573 | * Need tasklist lock for parent etc handling! |
1574 | diff --git a/kernel/memremap.c b/kernel/memremap.c |
1575 | index 9eced2cc9f94..3eef989ef035 100644 |
1576 | --- a/kernel/memremap.c |
1577 | +++ b/kernel/memremap.c |
1578 | @@ -88,23 +88,25 @@ static void devm_memremap_pages_release(void *data) |
1579 | resource_size_t align_start, align_size; |
1580 | unsigned long pfn; |
1581 | |
1582 | + pgmap->kill(pgmap->ref); |
1583 | for_each_device_pfn(pfn, pgmap) |
1584 | put_page(pfn_to_page(pfn)); |
1585 | |
1586 | - if (percpu_ref_tryget_live(pgmap->ref)) { |
1587 | - dev_WARN(dev, "%s: page mapping is still live!\n", __func__); |
1588 | - percpu_ref_put(pgmap->ref); |
1589 | - } |
1590 | - |
1591 | /* pages are dead and unused, undo the arch mapping */ |
1592 | align_start = res->start & ~(SECTION_SIZE - 1); |
1593 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
1594 | - align_start; |
1595 | |
1596 | mem_hotplug_begin(); |
1597 | - arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? |
1598 | - &pgmap->altmap : NULL); |
1599 | - kasan_remove_zero_shadow(__va(align_start), align_size); |
1600 | + if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
1601 | + pfn = align_start >> PAGE_SHIFT; |
1602 | + __remove_pages(page_zone(pfn_to_page(pfn)), pfn, |
1603 | + align_size >> PAGE_SHIFT, NULL); |
1604 | + } else { |
1605 | + arch_remove_memory(align_start, align_size, |
1606 | + pgmap->altmap_valid ? &pgmap->altmap : NULL); |
1607 | + kasan_remove_zero_shadow(__va(align_start), align_size); |
1608 | + } |
1609 | mem_hotplug_done(); |
1610 | |
1611 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
1612 | @@ -116,7 +118,7 @@ static void devm_memremap_pages_release(void *data) |
1613 | /** |
1614 | * devm_memremap_pages - remap and provide memmap backing for the given resource |
1615 | * @dev: hosting device for @res |
1616 | - * @pgmap: pointer to a struct dev_pgmap |
1617 | + * @pgmap: pointer to a struct dev_pagemap |
1618 | * |
1619 | * Notes: |
1620 | * 1/ At a minimum the res, ref and type members of @pgmap must be initialized |
1621 | @@ -125,11 +127,8 @@ static void devm_memremap_pages_release(void *data) |
1622 | * 2/ The altmap field may optionally be initialized, in which case altmap_valid |
1623 | * must be set to true |
1624 | * |
1625 | - * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages() |
1626 | - * time (or devm release event). The expected order of events is that ref has |
1627 | - * been through percpu_ref_kill() before devm_memremap_pages_release(). The |
1628 | - * wait for the completion of all references being dropped and |
1629 | - * percpu_ref_exit() must occur after devm_memremap_pages_release(). |
1630 | + * 3/ pgmap->ref must be 'live' on entry and will be killed at |
1631 | + * devm_memremap_pages_release() time, or if this routine fails. |
1632 | * |
1633 | * 4/ res is expected to be a host memory range that could feasibly be |
1634 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
1635 | @@ -145,6 +144,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
1636 | pgprot_t pgprot = PAGE_KERNEL; |
1637 | int error, nid, is_ram; |
1638 | |
1639 | + if (!pgmap->ref || !pgmap->kill) |
1640 | + return ERR_PTR(-EINVAL); |
1641 | + |
1642 | align_start = res->start & ~(SECTION_SIZE - 1); |
1643 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
1644 | - align_start; |
1645 | @@ -167,18 +169,13 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
1646 | is_ram = region_intersects(align_start, align_size, |
1647 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
1648 | |
1649 | - if (is_ram == REGION_MIXED) { |
1650 | - WARN_ONCE(1, "%s attempted on mixed region %pr\n", |
1651 | - __func__, res); |
1652 | - return ERR_PTR(-ENXIO); |
1653 | + if (is_ram != REGION_DISJOINT) { |
1654 | + WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, |
1655 | + is_ram == REGION_MIXED ? "mixed" : "ram", res); |
1656 | + error = -ENXIO; |
1657 | + goto err_array; |
1658 | } |
1659 | |
1660 | - if (is_ram == REGION_INTERSECTS) |
1661 | - return __va(res->start); |
1662 | - |
1663 | - if (!pgmap->ref) |
1664 | - return ERR_PTR(-EINVAL); |
1665 | - |
1666 | pgmap->dev = dev; |
1667 | |
1668 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), |
1669 | @@ -196,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
1670 | goto err_pfn_remap; |
1671 | |
1672 | mem_hotplug_begin(); |
1673 | - error = kasan_add_zero_shadow(__va(align_start), align_size); |
1674 | - if (error) { |
1675 | - mem_hotplug_done(); |
1676 | - goto err_kasan; |
1677 | + |
1678 | + /* |
1679 | + * For device private memory we call add_pages() as we only need to |
1680 | + * allocate and initialize struct page for the device memory. More- |
1681 | + * over the device memory is un-accessible thus we do not want to |
1682 | + * create a linear mapping for the memory like arch_add_memory() |
1683 | + * would do. |
1684 | + * |
1685 | + * For all other device memory types, which are accessible by |
1686 | + * the CPU, we do want the linear mapping and thus use |
1687 | + * arch_add_memory(). |
1688 | + */ |
1689 | + if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
1690 | + error = add_pages(nid, align_start >> PAGE_SHIFT, |
1691 | + align_size >> PAGE_SHIFT, NULL, false); |
1692 | + } else { |
1693 | + error = kasan_add_zero_shadow(__va(align_start), align_size); |
1694 | + if (error) { |
1695 | + mem_hotplug_done(); |
1696 | + goto err_kasan; |
1697 | + } |
1698 | + |
1699 | + error = arch_add_memory(nid, align_start, align_size, altmap, |
1700 | + false); |
1701 | + } |
1702 | + |
1703 | + if (!error) { |
1704 | + struct zone *zone; |
1705 | + |
1706 | + zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; |
1707 | + move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT, |
1708 | + align_size >> PAGE_SHIFT, altmap); |
1709 | } |
1710 | |
1711 | - error = arch_add_memory(nid, align_start, align_size, altmap, false); |
1712 | - if (!error) |
1713 | - move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
1714 | - align_start >> PAGE_SHIFT, |
1715 | - align_size >> PAGE_SHIFT, altmap); |
1716 | mem_hotplug_done(); |
1717 | if (error) |
1718 | goto err_add_memory; |
1719 | @@ -220,7 +240,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
1720 | align_size >> PAGE_SHIFT, pgmap); |
1721 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); |
1722 | |
1723 | - devm_add_action(dev, devm_memremap_pages_release, pgmap); |
1724 | + error = devm_add_action_or_reset(dev, devm_memremap_pages_release, |
1725 | + pgmap); |
1726 | + if (error) |
1727 | + return ERR_PTR(error); |
1728 | |
1729 | return __va(res->start); |
1730 | |
1731 | @@ -231,9 +254,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
1732 | err_pfn_remap: |
1733 | pgmap_array_delete(res); |
1734 | err_array: |
1735 | + pgmap->kill(pgmap->ref); |
1736 | return ERR_PTR(error); |
1737 | } |
1738 | -EXPORT_SYMBOL(devm_memremap_pages); |
1739 | +EXPORT_SYMBOL_GPL(devm_memremap_pages); |
1740 | |
1741 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
1742 | { |
1743 | diff --git a/kernel/pid.c b/kernel/pid.c |
1744 | index b2f6c506035d..20881598bdfa 100644 |
1745 | --- a/kernel/pid.c |
1746 | +++ b/kernel/pid.c |
1747 | @@ -233,8 +233,10 @@ out_unlock: |
1748 | |
1749 | out_free: |
1750 | spin_lock_irq(&pidmap_lock); |
1751 | - while (++i <= ns->level) |
1752 | - idr_remove(&ns->idr, (pid->numbers + i)->nr); |
1753 | + while (++i <= ns->level) { |
1754 | + upid = pid->numbers + i; |
1755 | + idr_remove(&upid->ns->idr, upid->nr); |
1756 | + } |
1757 | |
1758 | /* On failure to allocate the first pid, reset the state */ |
1759 | if (ns->pid_allocated == PIDNS_ADDING) |
1760 | diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c |
1761 | index a8846ed7f352..a180abc8c925 100644 |
1762 | --- a/kernel/rcu/srcutree.c |
1763 | +++ b/kernel/rcu/srcutree.c |
1764 | @@ -451,10 +451,12 @@ static void srcu_gp_start(struct srcu_struct *sp) |
1765 | |
1766 | lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); |
1767 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
1768 | + spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
1769 | rcu_segcblist_advance(&sdp->srcu_cblist, |
1770 | rcu_seq_current(&sp->srcu_gp_seq)); |
1771 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
1772 | rcu_seq_snap(&sp->srcu_gp_seq)); |
1773 | + spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
1774 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
1775 | rcu_seq_start(&sp->srcu_gp_seq); |
1776 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
1777 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
1778 | index ac855b2f4774..e8f191ba3fe5 100644 |
1779 | --- a/kernel/sched/fair.c |
1780 | +++ b/kernel/sched/fair.c |
1781 | @@ -352,10 +352,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
1782 | } |
1783 | } |
1784 | |
1785 | -/* Iterate thr' all leaf cfs_rq's on a runqueue */ |
1786 | -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ |
1787 | - list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ |
1788 | - leaf_cfs_rq_list) |
1789 | +/* Iterate through all leaf cfs_rq's on a runqueue: */ |
1790 | +#define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
1791 | + list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) |
1792 | |
1793 | /* Do the two (enqueued) entities belong to the same group ? */ |
1794 | static inline struct cfs_rq * |
1795 | @@ -447,8 +446,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
1796 | { |
1797 | } |
1798 | |
1799 | -#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ |
1800 | - for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) |
1801 | +#define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
1802 | + for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) |
1803 | |
1804 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
1805 | { |
1806 | @@ -7387,27 +7386,10 @@ static inline bool others_have_blocked(struct rq *rq) |
1807 | |
1808 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1809 | |
1810 | -static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) |
1811 | -{ |
1812 | - if (cfs_rq->load.weight) |
1813 | - return false; |
1814 | - |
1815 | - if (cfs_rq->avg.load_sum) |
1816 | - return false; |
1817 | - |
1818 | - if (cfs_rq->avg.util_sum) |
1819 | - return false; |
1820 | - |
1821 | - if (cfs_rq->avg.runnable_load_sum) |
1822 | - return false; |
1823 | - |
1824 | - return true; |
1825 | -} |
1826 | - |
1827 | static void update_blocked_averages(int cpu) |
1828 | { |
1829 | struct rq *rq = cpu_rq(cpu); |
1830 | - struct cfs_rq *cfs_rq, *pos; |
1831 | + struct cfs_rq *cfs_rq; |
1832 | const struct sched_class *curr_class; |
1833 | struct rq_flags rf; |
1834 | bool done = true; |
1835 | @@ -7419,7 +7401,7 @@ static void update_blocked_averages(int cpu) |
1836 | * Iterates the task_group tree in a bottom up fashion, see |
1837 | * list_add_leaf_cfs_rq() for details. |
1838 | */ |
1839 | - for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { |
1840 | + for_each_leaf_cfs_rq(rq, cfs_rq) { |
1841 | struct sched_entity *se; |
1842 | |
1843 | /* throttled entities do not contribute to load */ |
1844 | @@ -7434,13 +7416,6 @@ static void update_blocked_averages(int cpu) |
1845 | if (se && !skip_blocked_update(se)) |
1846 | update_load_avg(cfs_rq_of(se), se, 0); |
1847 | |
1848 | - /* |
1849 | - * There can be a lot of idle CPU cgroups. Don't let fully |
1850 | - * decayed cfs_rqs linger on the list. |
1851 | - */ |
1852 | - if (cfs_rq_is_decayed(cfs_rq)) |
1853 | - list_del_leaf_cfs_rq(cfs_rq); |
1854 | - |
1855 | /* Don't need periodic decay once load/util_avg are null */ |
1856 | if (cfs_rq_has_blocked(cfs_rq)) |
1857 | done = false; |
1858 | @@ -10289,10 +10264,10 @@ const struct sched_class fair_sched_class = { |
1859 | #ifdef CONFIG_SCHED_DEBUG |
1860 | void print_cfs_stats(struct seq_file *m, int cpu) |
1861 | { |
1862 | - struct cfs_rq *cfs_rq, *pos; |
1863 | + struct cfs_rq *cfs_rq; |
1864 | |
1865 | rcu_read_lock(); |
1866 | - for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) |
1867 | + for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) |
1868 | print_cfs_rq(m, cpu, cfs_rq); |
1869 | rcu_read_unlock(); |
1870 | } |
1871 | diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c |
1872 | index d5a06addeb27..bf864c73e462 100644 |
1873 | --- a/lib/test_debug_virtual.c |
1874 | +++ b/lib/test_debug_virtual.c |
1875 | @@ -5,6 +5,7 @@ |
1876 | #include <linux/vmalloc.h> |
1877 | #include <linux/slab.h> |
1878 | #include <linux/sizes.h> |
1879 | +#include <linux/io.h> |
1880 | |
1881 | #include <asm/page.h> |
1882 | #ifdef CONFIG_MIPS |
1883 | diff --git a/mm/hmm.c b/mm/hmm.c |
1884 | index 90c34f3d1243..50fbaf80f95e 100644 |
1885 | --- a/mm/hmm.c |
1886 | +++ b/mm/hmm.c |
1887 | @@ -986,19 +986,16 @@ static void hmm_devmem_ref_exit(void *data) |
1888 | struct hmm_devmem *devmem; |
1889 | |
1890 | devmem = container_of(ref, struct hmm_devmem, ref); |
1891 | + wait_for_completion(&devmem->completion); |
1892 | percpu_ref_exit(ref); |
1893 | - devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data); |
1894 | } |
1895 | |
1896 | -static void hmm_devmem_ref_kill(void *data) |
1897 | +static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
1898 | { |
1899 | - struct percpu_ref *ref = data; |
1900 | struct hmm_devmem *devmem; |
1901 | |
1902 | devmem = container_of(ref, struct hmm_devmem, ref); |
1903 | percpu_ref_kill(ref); |
1904 | - wait_for_completion(&devmem->completion); |
1905 | - devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data); |
1906 | } |
1907 | |
1908 | static int hmm_devmem_fault(struct vm_area_struct *vma, |
1909 | @@ -1021,172 +1018,6 @@ static void hmm_devmem_free(struct page *page, void *data) |
1910 | devmem->ops->free(devmem, page); |
1911 | } |
1912 | |
1913 | -static DEFINE_MUTEX(hmm_devmem_lock); |
1914 | -static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); |
1915 | - |
1916 | -static void hmm_devmem_radix_release(struct resource *resource) |
1917 | -{ |
1918 | - resource_size_t key; |
1919 | - |
1920 | - mutex_lock(&hmm_devmem_lock); |
1921 | - for (key = resource->start; |
1922 | - key <= resource->end; |
1923 | - key += PA_SECTION_SIZE) |
1924 | - radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); |
1925 | - mutex_unlock(&hmm_devmem_lock); |
1926 | -} |
1927 | - |
1928 | -static void hmm_devmem_release(struct device *dev, void *data) |
1929 | -{ |
1930 | - struct hmm_devmem *devmem = data; |
1931 | - struct resource *resource = devmem->resource; |
1932 | - unsigned long start_pfn, npages; |
1933 | - struct zone *zone; |
1934 | - struct page *page; |
1935 | - |
1936 | - if (percpu_ref_tryget_live(&devmem->ref)) { |
1937 | - dev_WARN(dev, "%s: page mapping is still live!\n", __func__); |
1938 | - percpu_ref_put(&devmem->ref); |
1939 | - } |
1940 | - |
1941 | - /* pages are dead and unused, undo the arch mapping */ |
1942 | - start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; |
1943 | - npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; |
1944 | - |
1945 | - page = pfn_to_page(start_pfn); |
1946 | - zone = page_zone(page); |
1947 | - |
1948 | - mem_hotplug_begin(); |
1949 | - if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) |
1950 | - __remove_pages(zone, start_pfn, npages, NULL); |
1951 | - else |
1952 | - arch_remove_memory(start_pfn << PAGE_SHIFT, |
1953 | - npages << PAGE_SHIFT, NULL); |
1954 | - mem_hotplug_done(); |
1955 | - |
1956 | - hmm_devmem_radix_release(resource); |
1957 | -} |
1958 | - |
1959 | -static int hmm_devmem_pages_create(struct hmm_devmem *devmem) |
1960 | -{ |
1961 | - resource_size_t key, align_start, align_size, align_end; |
1962 | - struct device *device = devmem->device; |
1963 | - int ret, nid, is_ram; |
1964 | - |
1965 | - align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); |
1966 | - align_size = ALIGN(devmem->resource->start + |
1967 | - resource_size(devmem->resource), |
1968 | - PA_SECTION_SIZE) - align_start; |
1969 | - |
1970 | - is_ram = region_intersects(align_start, align_size, |
1971 | - IORESOURCE_SYSTEM_RAM, |
1972 | - IORES_DESC_NONE); |
1973 | - if (is_ram == REGION_MIXED) { |
1974 | - WARN_ONCE(1, "%s attempted on mixed region %pr\n", |
1975 | - __func__, devmem->resource); |
1976 | - return -ENXIO; |
1977 | - } |
1978 | - if (is_ram == REGION_INTERSECTS) |
1979 | - return -ENXIO; |
1980 | - |
1981 | - if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) |
1982 | - devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1983 | - else |
1984 | - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1985 | - |
1986 | - devmem->pagemap.res = *devmem->resource; |
1987 | - devmem->pagemap.page_fault = hmm_devmem_fault; |
1988 | - devmem->pagemap.page_free = hmm_devmem_free; |
1989 | - devmem->pagemap.dev = devmem->device; |
1990 | - devmem->pagemap.ref = &devmem->ref; |
1991 | - devmem->pagemap.data = devmem; |
1992 | - |
1993 | - mutex_lock(&hmm_devmem_lock); |
1994 | - align_end = align_start + align_size - 1; |
1995 | - for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { |
1996 | - struct hmm_devmem *dup; |
1997 | - |
1998 | - dup = radix_tree_lookup(&hmm_devmem_radix, |
1999 | - key >> PA_SECTION_SHIFT); |
2000 | - if (dup) { |
2001 | - dev_err(device, "%s: collides with mapping for %s\n", |
2002 | - __func__, dev_name(dup->device)); |
2003 | - mutex_unlock(&hmm_devmem_lock); |
2004 | - ret = -EBUSY; |
2005 | - goto error; |
2006 | - } |
2007 | - ret = radix_tree_insert(&hmm_devmem_radix, |
2008 | - key >> PA_SECTION_SHIFT, |
2009 | - devmem); |
2010 | - if (ret) { |
2011 | - dev_err(device, "%s: failed: %d\n", __func__, ret); |
2012 | - mutex_unlock(&hmm_devmem_lock); |
2013 | - goto error_radix; |
2014 | - } |
2015 | - } |
2016 | - mutex_unlock(&hmm_devmem_lock); |
2017 | - |
2018 | - nid = dev_to_node(device); |
2019 | - if (nid < 0) |
2020 | - nid = numa_mem_id(); |
2021 | - |
2022 | - mem_hotplug_begin(); |
2023 | - /* |
2024 | - * For device private memory we call add_pages() as we only need to |
2025 | - * allocate and initialize struct page for the device memory. More- |
2026 | - * over the device memory is un-accessible thus we do not want to |
2027 | - * create a linear mapping for the memory like arch_add_memory() |
2028 | - * would do. |
2029 | - * |
2030 | - * For device public memory, which is accesible by the CPU, we do |
2031 | - * want the linear mapping and thus use arch_add_memory(). |
2032 | - */ |
2033 | - if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) |
2034 | - ret = arch_add_memory(nid, align_start, align_size, NULL, |
2035 | - false); |
2036 | - else |
2037 | - ret = add_pages(nid, align_start >> PAGE_SHIFT, |
2038 | - align_size >> PAGE_SHIFT, NULL, false); |
2039 | - if (ret) { |
2040 | - mem_hotplug_done(); |
2041 | - goto error_add_memory; |
2042 | - } |
2043 | - move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
2044 | - align_start >> PAGE_SHIFT, |
2045 | - align_size >> PAGE_SHIFT, NULL); |
2046 | - mem_hotplug_done(); |
2047 | - |
2048 | - /* |
2049 | - * Initialization of the pages has been deferred until now in order |
2050 | - * to allow us to do the work while not holding the hotplug lock. |
2051 | - */ |
2052 | - memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
2053 | - align_start >> PAGE_SHIFT, |
2054 | - align_size >> PAGE_SHIFT, &devmem->pagemap); |
2055 | - |
2056 | - return 0; |
2057 | - |
2058 | -error_add_memory: |
2059 | - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
2060 | -error_radix: |
2061 | - hmm_devmem_radix_release(devmem->resource); |
2062 | -error: |
2063 | - return ret; |
2064 | -} |
2065 | - |
2066 | -static int hmm_devmem_match(struct device *dev, void *data, void *match_data) |
2067 | -{ |
2068 | - struct hmm_devmem *devmem = data; |
2069 | - |
2070 | - return devmem->resource == match_data; |
2071 | -} |
2072 | - |
2073 | -static void hmm_devmem_pages_remove(struct hmm_devmem *devmem) |
2074 | -{ |
2075 | - devres_release(devmem->device, &hmm_devmem_release, |
2076 | - &hmm_devmem_match, devmem->resource); |
2077 | -} |
2078 | - |
2079 | /* |
2080 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory |
2081 | * |
2082 | @@ -1210,12 +1041,12 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
2083 | { |
2084 | struct hmm_devmem *devmem; |
2085 | resource_size_t addr; |
2086 | + void *result; |
2087 | int ret; |
2088 | |
2089 | dev_pagemap_get_ops(); |
2090 | |
2091 | - devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), |
2092 | - GFP_KERNEL, dev_to_node(device)); |
2093 | + devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
2094 | if (!devmem) |
2095 | return ERR_PTR(-ENOMEM); |
2096 | |
2097 | @@ -1229,11 +1060,11 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
2098 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, |
2099 | 0, GFP_KERNEL); |
2100 | if (ret) |
2101 | - goto error_percpu_ref; |
2102 | + return ERR_PTR(ret); |
2103 | |
2104 | - ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); |
2105 | + ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
2106 | if (ret) |
2107 | - goto error_devm_add_action; |
2108 | + return ERR_PTR(ret); |
2109 | |
2110 | size = ALIGN(size, PA_SECTION_SIZE); |
2111 | addr = min((unsigned long)iomem_resource.end, |
2112 | @@ -1253,54 +1084,40 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
2113 | |
2114 | devmem->resource = devm_request_mem_region(device, addr, size, |
2115 | dev_name(device)); |
2116 | - if (!devmem->resource) { |
2117 | - ret = -ENOMEM; |
2118 | - goto error_no_resource; |
2119 | - } |
2120 | + if (!devmem->resource) |
2121 | + return ERR_PTR(-ENOMEM); |
2122 | break; |
2123 | } |
2124 | - if (!devmem->resource) { |
2125 | - ret = -ERANGE; |
2126 | - goto error_no_resource; |
2127 | - } |
2128 | + if (!devmem->resource) |
2129 | + return ERR_PTR(-ERANGE); |
2130 | |
2131 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; |
2132 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; |
2133 | devmem->pfn_last = devmem->pfn_first + |
2134 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
2135 | |
2136 | - ret = hmm_devmem_pages_create(devmem); |
2137 | - if (ret) |
2138 | - goto error_pages; |
2139 | - |
2140 | - devres_add(device, devmem); |
2141 | - |
2142 | - ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); |
2143 | - if (ret) { |
2144 | - hmm_devmem_remove(devmem); |
2145 | - return ERR_PTR(ret); |
2146 | - } |
2147 | + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
2148 | + devmem->pagemap.res = *devmem->resource; |
2149 | + devmem->pagemap.page_fault = hmm_devmem_fault; |
2150 | + devmem->pagemap.page_free = hmm_devmem_free; |
2151 | + devmem->pagemap.altmap_valid = false; |
2152 | + devmem->pagemap.ref = &devmem->ref; |
2153 | + devmem->pagemap.data = devmem; |
2154 | + devmem->pagemap.kill = hmm_devmem_ref_kill; |
2155 | |
2156 | + result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
2157 | + if (IS_ERR(result)) |
2158 | + return result; |
2159 | return devmem; |
2160 | - |
2161 | -error_pages: |
2162 | - devm_release_mem_region(device, devmem->resource->start, |
2163 | - resource_size(devmem->resource)); |
2164 | -error_no_resource: |
2165 | -error_devm_add_action: |
2166 | - hmm_devmem_ref_kill(&devmem->ref); |
2167 | - hmm_devmem_ref_exit(&devmem->ref); |
2168 | -error_percpu_ref: |
2169 | - devres_free(devmem); |
2170 | - return ERR_PTR(ret); |
2171 | } |
2172 | -EXPORT_SYMBOL(hmm_devmem_add); |
2173 | +EXPORT_SYMBOL_GPL(hmm_devmem_add); |
2174 | |
2175 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
2176 | struct device *device, |
2177 | struct resource *res) |
2178 | { |
2179 | struct hmm_devmem *devmem; |
2180 | + void *result; |
2181 | int ret; |
2182 | |
2183 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) |
2184 | @@ -1308,8 +1125,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
2185 | |
2186 | dev_pagemap_get_ops(); |
2187 | |
2188 | - devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), |
2189 | - GFP_KERNEL, dev_to_node(device)); |
2190 | + devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
2191 | if (!devmem) |
2192 | return ERR_PTR(-ENOMEM); |
2193 | |
2194 | @@ -1323,71 +1139,32 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
2195 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, |
2196 | 0, GFP_KERNEL); |
2197 | if (ret) |
2198 | - goto error_percpu_ref; |
2199 | + return ERR_PTR(ret); |
2200 | |
2201 | - ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); |
2202 | + ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, |
2203 | + &devmem->ref); |
2204 | if (ret) |
2205 | - goto error_devm_add_action; |
2206 | - |
2207 | + return ERR_PTR(ret); |
2208 | |
2209 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; |
2210 | devmem->pfn_last = devmem->pfn_first + |
2211 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
2212 | |
2213 | - ret = hmm_devmem_pages_create(devmem); |
2214 | - if (ret) |
2215 | - goto error_devm_add_action; |
2216 | - |
2217 | - devres_add(device, devmem); |
2218 | - |
2219 | - ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); |
2220 | - if (ret) { |
2221 | - hmm_devmem_remove(devmem); |
2222 | - return ERR_PTR(ret); |
2223 | - } |
2224 | + devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
2225 | + devmem->pagemap.res = *devmem->resource; |
2226 | + devmem->pagemap.page_fault = hmm_devmem_fault; |
2227 | + devmem->pagemap.page_free = hmm_devmem_free; |
2228 | + devmem->pagemap.altmap_valid = false; |
2229 | + devmem->pagemap.ref = &devmem->ref; |
2230 | + devmem->pagemap.data = devmem; |
2231 | + devmem->pagemap.kill = hmm_devmem_ref_kill; |
2232 | |
2233 | + result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
2234 | + if (IS_ERR(result)) |
2235 | + return result; |
2236 | return devmem; |
2237 | - |
2238 | -error_devm_add_action: |
2239 | - hmm_devmem_ref_kill(&devmem->ref); |
2240 | - hmm_devmem_ref_exit(&devmem->ref); |
2241 | -error_percpu_ref: |
2242 | - devres_free(devmem); |
2243 | - return ERR_PTR(ret); |
2244 | -} |
2245 | -EXPORT_SYMBOL(hmm_devmem_add_resource); |
2246 | - |
2247 | -/* |
2248 | - * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE) |
2249 | - * |
2250 | - * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory |
2251 | - * |
2252 | - * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf |
2253 | - * of the device driver. It will free struct page and remove the resource that |
2254 | - * reserved the physical address range for this device memory. |
2255 | - */ |
2256 | -void hmm_devmem_remove(struct hmm_devmem *devmem) |
2257 | -{ |
2258 | - resource_size_t start, size; |
2259 | - struct device *device; |
2260 | - bool cdm = false; |
2261 | - |
2262 | - if (!devmem) |
2263 | - return; |
2264 | - |
2265 | - device = devmem->device; |
2266 | - start = devmem->resource->start; |
2267 | - size = resource_size(devmem->resource); |
2268 | - |
2269 | - cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY; |
2270 | - hmm_devmem_ref_kill(&devmem->ref); |
2271 | - hmm_devmem_ref_exit(&devmem->ref); |
2272 | - hmm_devmem_pages_remove(devmem); |
2273 | - |
2274 | - if (!cdm) |
2275 | - devm_release_mem_region(device, start, size); |
2276 | } |
2277 | -EXPORT_SYMBOL(hmm_devmem_remove); |
2278 | +EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
2279 | |
2280 | /* |
2281 | * A device driver that wants to handle multiple devices memory through a |
2282 | diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
2283 | index 6e1469b80cb7..7e6bf74ddb1e 100644 |
2284 | --- a/mm/memcontrol.c |
2285 | +++ b/mm/memcontrol.c |
2286 | @@ -1666,6 +1666,9 @@ enum oom_status { |
2287 | |
2288 | static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) |
2289 | { |
2290 | + enum oom_status ret; |
2291 | + bool locked; |
2292 | + |
2293 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
2294 | return OOM_SKIPPED; |
2295 | |
2296 | @@ -1700,10 +1703,23 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int |
2297 | return OOM_ASYNC; |
2298 | } |
2299 | |
2300 | + mem_cgroup_mark_under_oom(memcg); |
2301 | + |
2302 | + locked = mem_cgroup_oom_trylock(memcg); |
2303 | + |
2304 | + if (locked) |
2305 | + mem_cgroup_oom_notify(memcg); |
2306 | + |
2307 | + mem_cgroup_unmark_under_oom(memcg); |
2308 | if (mem_cgroup_out_of_memory(memcg, mask, order)) |
2309 | - return OOM_SUCCESS; |
2310 | + ret = OOM_SUCCESS; |
2311 | + else |
2312 | + ret = OOM_FAILED; |
2313 | |
2314 | - return OOM_FAILED; |
2315 | + if (locked) |
2316 | + mem_cgroup_oom_unlock(memcg); |
2317 | + |
2318 | + return ret; |
2319 | } |
2320 | |
2321 | /** |
2322 | diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
2323 | index 2b2b3ccbbfb5..cea0880eadfb 100644 |
2324 | --- a/mm/memory_hotplug.c |
2325 | +++ b/mm/memory_hotplug.c |
2326 | @@ -34,6 +34,7 @@ |
2327 | #include <linux/hugetlb.h> |
2328 | #include <linux/memblock.h> |
2329 | #include <linux/compaction.h> |
2330 | +#include <linux/rmap.h> |
2331 | |
2332 | #include <asm/tlbflush.h> |
2333 | |
2334 | @@ -1369,6 +1370,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
2335 | pfn = page_to_pfn(compound_head(page)) |
2336 | + hpage_nr_pages(page) - 1; |
2337 | |
2338 | + /* |
2339 | + * HWPoison pages have elevated reference counts so the migration would |
2340 | + * fail on them. It also doesn't make any sense to migrate them in the |
2341 | + * first place. Still try to unmap such a page in case it is still mapped |
2342 | + * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep |
2343 | + * the unmap as the catch all safety net). |
2344 | + */ |
2345 | + if (PageHWPoison(page)) { |
2346 | + if (WARN_ON(PageLRU(page))) |
2347 | + isolate_lru_page(page); |
2348 | + if (page_mapped(page)) |
2349 | + try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); |
2350 | + continue; |
2351 | + } |
2352 | + |
2353 | if (!get_page_unless_zero(page)) |
2354 | continue; |
2355 | /* |
2356 | diff --git a/mm/swapfile.c b/mm/swapfile.c |
2357 | index 8688ae65ef58..20d3c0f47a5f 100644 |
2358 | --- a/mm/swapfile.c |
2359 | +++ b/mm/swapfile.c |
2360 | @@ -2197,7 +2197,8 @@ int try_to_unuse(unsigned int type, bool frontswap, |
2361 | */ |
2362 | if (PageSwapCache(page) && |
2363 | likely(page_private(page) == entry.val) && |
2364 | - !page_swapped(page)) |
2365 | + (!PageTransCompound(page) || |
2366 | + !swap_page_trans_huge_swapped(si, entry))) |
2367 | delete_from_swap_cache(compound_head(page)); |
2368 | |
2369 | /* |
2370 | diff --git a/net/9p/client.c b/net/9p/client.c |
2371 | index 2c9a17b9b46b..357214a51f13 100644 |
2372 | --- a/net/9p/client.c |
2373 | +++ b/net/9p/client.c |
2374 | @@ -181,6 +181,12 @@ static int parse_opts(char *opts, struct p9_client *clnt) |
2375 | ret = r; |
2376 | continue; |
2377 | } |
2378 | + if (option < 4096) { |
2379 | + p9_debug(P9_DEBUG_ERROR, |
2380 | + "msize should be at least 4k\n"); |
2381 | + ret = -EINVAL; |
2382 | + continue; |
2383 | + } |
2384 | clnt->msize = option; |
2385 | break; |
2386 | case Opt_trans: |
2387 | @@ -983,10 +989,18 @@ static int p9_client_version(struct p9_client *c) |
2388 | else if (!strncmp(version, "9P2000", 6)) |
2389 | c->proto_version = p9_proto_legacy; |
2390 | else { |
2391 | + p9_debug(P9_DEBUG_ERROR, |
2392 | + "server returned an unknown version: %s\n", version); |
2393 | err = -EREMOTEIO; |
2394 | goto error; |
2395 | } |
2396 | |
2397 | + if (msize < 4096) { |
2398 | + p9_debug(P9_DEBUG_ERROR, |
2399 | + "server returned a msize < 4096: %d\n", msize); |
2400 | + err = -EREMOTEIO; |
2401 | + goto error; |
2402 | + } |
2403 | if (msize < c->msize) |
2404 | c->msize = msize; |
2405 | |
2406 | @@ -1043,6 +1057,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) |
2407 | if (clnt->msize > clnt->trans_mod->maxsize) |
2408 | clnt->msize = clnt->trans_mod->maxsize; |
2409 | |
2410 | + if (clnt->msize < 4096) { |
2411 | + p9_debug(P9_DEBUG_ERROR, |
2412 | + "Please specify a msize of at least 4k\n"); |
2413 | + err = -EINVAL; |
2414 | + goto free_client; |
2415 | + } |
2416 | + |
2417 | err = p9_client_version(clnt); |
2418 | if (err) |
2419 | goto close_trans; |
2420 | diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c |
2421 | index 1ece4bc3eb8d..152790ed309c 100644 |
2422 | --- a/net/sunrpc/auth_gss/svcauth_gss.c |
2423 | +++ b/net/sunrpc/auth_gss/svcauth_gss.c |
2424 | @@ -1142,7 +1142,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp, |
2425 | struct kvec *resv = &rqstp->rq_res.head[0]; |
2426 | struct rsi *rsip, rsikey; |
2427 | int ret; |
2428 | - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id); |
2429 | + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); |
2430 | |
2431 | memset(&rsikey, 0, sizeof(rsikey)); |
2432 | ret = gss_read_verf(gc, argv, authp, |
2433 | @@ -1253,7 +1253,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, |
2434 | uint64_t handle; |
2435 | int status; |
2436 | int ret; |
2437 | - struct net *net = rqstp->rq_xprt->xpt_net; |
2438 | + struct net *net = SVC_NET(rqstp); |
2439 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); |
2440 | |
2441 | memset(&ud, 0, sizeof(ud)); |
2442 | @@ -1444,7 +1444,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) |
2443 | __be32 *rpcstart; |
2444 | __be32 *reject_stat = resv->iov_base + resv->iov_len; |
2445 | int ret; |
2446 | - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id); |
2447 | + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); |
2448 | |
2449 | dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n", |
2450 | argv->iov_len); |
2451 | @@ -1734,7 +1734,7 @@ svcauth_gss_release(struct svc_rqst *rqstp) |
2452 | struct rpc_gss_wire_cred *gc = &gsd->clcred; |
2453 | struct xdr_buf *resbuf = &rqstp->rq_res; |
2454 | int stat = -EINVAL; |
2455 | - struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id); |
2456 | + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); |
2457 | |
2458 | if (gc->gc_proc != RPC_GSS_PROC_DATA) |
2459 | goto out; |
2460 | diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c |
2461 | index f96345b1180e..12bb23b8e0c5 100644 |
2462 | --- a/net/sunrpc/cache.c |
2463 | +++ b/net/sunrpc/cache.c |
2464 | @@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail) |
2465 | h->last_refresh = now; |
2466 | } |
2467 | |
2468 | +static void cache_fresh_locked(struct cache_head *head, time_t expiry, |
2469 | + struct cache_detail *detail); |
2470 | +static void cache_fresh_unlocked(struct cache_head *head, |
2471 | + struct cache_detail *detail); |
2472 | + |
2473 | static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail, |
2474 | struct cache_head *key, |
2475 | int hash) |
2476 | @@ -100,6 +105,7 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, |
2477 | if (cache_is_expired(detail, tmp)) { |
2478 | hlist_del_init_rcu(&tmp->cache_list); |
2479 | detail->entries --; |
2480 | + cache_fresh_locked(tmp, 0, detail); |
2481 | freeme = tmp; |
2482 | break; |
2483 | } |
2484 | @@ -115,8 +121,10 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, |
2485 | cache_get(new); |
2486 | spin_unlock(&detail->hash_lock); |
2487 | |
2488 | - if (freeme) |
2489 | + if (freeme) { |
2490 | + cache_fresh_unlocked(freeme, detail); |
2491 | cache_put(freeme, detail); |
2492 | + } |
2493 | return new; |
2494 | } |
2495 | |
2496 | diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c |
2497 | index fc6378cc0c1c..20ced24cc61b 100644 |
2498 | --- a/net/sunrpc/xprtrdma/frwr_ops.c |
2499 | +++ b/net/sunrpc/xprtrdma/frwr_ops.c |
2500 | @@ -117,15 +117,15 @@ static void |
2501 | frwr_mr_recycle_worker(struct work_struct *work) |
2502 | { |
2503 | struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle); |
2504 | - enum rpcrdma_frwr_state state = mr->frwr.fr_state; |
2505 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
2506 | |
2507 | trace_xprtrdma_mr_recycle(mr); |
2508 | |
2509 | - if (state != FRWR_FLUSHED_LI) { |
2510 | + if (mr->mr_dir != DMA_NONE) { |
2511 | trace_xprtrdma_mr_unmap(mr); |
2512 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
2513 | mr->mr_sg, mr->mr_nents, mr->mr_dir); |
2514 | + mr->mr_dir = DMA_NONE; |
2515 | } |
2516 | |
2517 | spin_lock(&r_xprt->rx_buf.rb_mrlock); |
2518 | @@ -150,6 +150,8 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr) |
2519 | if (!mr->mr_sg) |
2520 | goto out_list_err; |
2521 | |
2522 | + frwr->fr_state = FRWR_IS_INVALID; |
2523 | + mr->mr_dir = DMA_NONE; |
2524 | INIT_LIST_HEAD(&mr->mr_list); |
2525 | INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker); |
2526 | sg_init_table(mr->mr_sg, depth); |
2527 | diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
2528 | index 3ddba94c939f..b9bc7f9f6bb9 100644 |
2529 | --- a/net/sunrpc/xprtrdma/verbs.c |
2530 | +++ b/net/sunrpc/xprtrdma/verbs.c |
2531 | @@ -1329,9 +1329,12 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) |
2532 | { |
2533 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
2534 | |
2535 | - trace_xprtrdma_mr_unmap(mr); |
2536 | - ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
2537 | - mr->mr_sg, mr->mr_nents, mr->mr_dir); |
2538 | + if (mr->mr_dir != DMA_NONE) { |
2539 | + trace_xprtrdma_mr_unmap(mr); |
2540 | + ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
2541 | + mr->mr_sg, mr->mr_nents, mr->mr_dir); |
2542 | + mr->mr_dir = DMA_NONE; |
2543 | + } |
2544 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); |
2545 | } |
2546 | |
2547 | diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c |
2548 | index f4eadd3f7350..b63ef865ce1e 100644 |
2549 | --- a/security/selinux/ss/policydb.c |
2550 | +++ b/security/selinux/ss/policydb.c |
2551 | @@ -2108,6 +2108,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, |
2552 | { |
2553 | int i, j, rc; |
2554 | u32 nel, len; |
2555 | + __be64 prefixbuf[1]; |
2556 | __le32 buf[3]; |
2557 | struct ocontext *l, *c; |
2558 | u32 nodebuf[8]; |
2559 | @@ -2217,21 +2218,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, |
2560 | goto out; |
2561 | break; |
2562 | } |
2563 | - case OCON_IBPKEY: |
2564 | - rc = next_entry(nodebuf, fp, sizeof(u32) * 4); |
2565 | + case OCON_IBPKEY: { |
2566 | + u32 pkey_lo, pkey_hi; |
2567 | + |
2568 | + rc = next_entry(prefixbuf, fp, sizeof(u64)); |
2569 | + if (rc) |
2570 | + goto out; |
2571 | + |
2572 | + /* we need to have subnet_prefix in CPU order */ |
2573 | + c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]); |
2574 | + |
2575 | + rc = next_entry(buf, fp, sizeof(u32) * 2); |
2576 | if (rc) |
2577 | goto out; |
2578 | |
2579 | - c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf)); |
2580 | + pkey_lo = le32_to_cpu(buf[0]); |
2581 | + pkey_hi = le32_to_cpu(buf[1]); |
2582 | |
2583 | - if (nodebuf[2] > 0xffff || |
2584 | - nodebuf[3] > 0xffff) { |
2585 | + if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) { |
2586 | rc = -EINVAL; |
2587 | goto out; |
2588 | } |
2589 | |
2590 | - c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]); |
2591 | - c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]); |
2592 | + c->u.ibpkey.low_pkey = pkey_lo; |
2593 | + c->u.ibpkey.high_pkey = pkey_hi; |
2594 | |
2595 | rc = context_read_and_validate(&c->context[0], |
2596 | p, |
2597 | @@ -2239,7 +2249,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, |
2598 | if (rc) |
2599 | goto out; |
2600 | break; |
2601 | - case OCON_IBENDPORT: |
2602 | + } |
2603 | + case OCON_IBENDPORT: { |
2604 | + u32 port; |
2605 | + |
2606 | rc = next_entry(buf, fp, sizeof(u32) * 2); |
2607 | if (rc) |
2608 | goto out; |
2609 | @@ -2249,12 +2262,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, |
2610 | if (rc) |
2611 | goto out; |
2612 | |
2613 | - if (buf[1] > 0xff || buf[1] == 0) { |
2614 | + port = le32_to_cpu(buf[1]); |
2615 | + if (port > U8_MAX || port == 0) { |
2616 | rc = -EINVAL; |
2617 | goto out; |
2618 | } |
2619 | |
2620 | - c->u.ibendport.port = le32_to_cpu(buf[1]); |
2621 | + c->u.ibendport.port = port; |
2622 | |
2623 | rc = context_read_and_validate(&c->context[0], |
2624 | p, |
2625 | @@ -2262,7 +2276,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, |
2626 | if (rc) |
2627 | goto out; |
2628 | break; |
2629 | - } |
2630 | + } /* end case */ |
2631 | + } /* end switch */ |
2632 | } |
2633 | } |
2634 | rc = 0; |
2635 | @@ -3105,6 +3120,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info, |
2636 | { |
2637 | unsigned int i, j, rc; |
2638 | size_t nel, len; |
2639 | + __be64 prefixbuf[1]; |
2640 | __le32 buf[3]; |
2641 | u32 nodebuf[8]; |
2642 | struct ocontext *c; |
2643 | @@ -3192,12 +3208,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info, |
2644 | return rc; |
2645 | break; |
2646 | case OCON_IBPKEY: |
2647 | - *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix); |
2648 | + /* subnet_prefix is in CPU order */ |
2649 | + prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix); |
2650 | |
2651 | - nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey); |
2652 | - nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey); |
2653 | + rc = put_entry(prefixbuf, sizeof(u64), 1, fp); |
2654 | + if (rc) |
2655 | + return rc; |
2656 | + |
2657 | + buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey); |
2658 | + buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey); |
2659 | |
2660 | - rc = put_entry(nodebuf, sizeof(u32), 4, fp); |
2661 | + rc = put_entry(buf, sizeof(u32), 2, fp); |
2662 | if (rc) |
2663 | return rc; |
2664 | rc = context_write(p, &c->context[0], fp); |
2665 | diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c |
2666 | index 598d140bb7cb..5fc497c6d738 100644 |
2667 | --- a/sound/pci/cs46xx/dsp_spos.c |
2668 | +++ b/sound/pci/cs46xx/dsp_spos.c |
2669 | @@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip) |
2670 | struct dsp_spos_instance * ins = chip->dsp_spos_instance; |
2671 | int i; |
2672 | |
2673 | + if (!ins) |
2674 | + return 0; |
2675 | + |
2676 | snd_info_free_entry(ins->proc_sym_info_entry); |
2677 | ins->proc_sym_info_entry = NULL; |
2678 | |
2679 | diff --git a/sound/usb/card.c b/sound/usb/card.c |
2680 | index a105947eaf55..746a72e23cf9 100644 |
2681 | --- a/sound/usb/card.c |
2682 | +++ b/sound/usb/card.c |
2683 | @@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) |
2684 | h1 = snd_usb_find_csint_desc(host_iface->extra, |
2685 | host_iface->extralen, |
2686 | NULL, UAC_HEADER); |
2687 | - if (!h1) { |
2688 | + if (!h1 || h1->bLength < sizeof(*h1)) { |
2689 | dev_err(&dev->dev, "cannot find UAC_HEADER\n"); |
2690 | return -EINVAL; |
2691 | } |
2692 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
2693 | index c63c84b54969..e7d441d0e839 100644 |
2694 | --- a/sound/usb/mixer.c |
2695 | +++ b/sound/usb/mixer.c |
2696 | @@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, |
2697 | struct uac_mixer_unit_descriptor *desc) |
2698 | { |
2699 | int mu_channels; |
2700 | + void *c; |
2701 | |
2702 | - if (desc->bLength < 11) |
2703 | + if (desc->bLength < sizeof(*desc)) |
2704 | return -EINVAL; |
2705 | if (!desc->bNrInPins) |
2706 | return -EINVAL; |
2707 | @@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, |
2708 | case UAC_VERSION_1: |
2709 | case UAC_VERSION_2: |
2710 | default: |
2711 | + if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1) |
2712 | + return 0; /* no bmControls -> skip */ |
2713 | mu_channels = uac_mixer_unit_bNrChannels(desc); |
2714 | break; |
2715 | case UAC_VERSION_3: |
2716 | @@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, |
2717 | } |
2718 | |
2719 | if (!mu_channels) |
2720 | - return -EINVAL; |
2721 | + return 0; |
2722 | + |
2723 | + c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); |
2724 | + if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength) |
2725 | + return 0; /* no bmControls -> skip */ |
2726 | |
2727 | return mu_channels; |
2728 | } |
2729 | @@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id, |
2730 | struct uac_mixer_unit_descriptor *d = p1; |
2731 | |
2732 | err = uac_mixer_unit_get_channels(state, d); |
2733 | - if (err < 0) |
2734 | + if (err <= 0) |
2735 | return err; |
2736 | |
2737 | term->channels = err; |
2738 | @@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid, |
2739 | |
2740 | if (state->mixer->protocol == UAC_VERSION_2) { |
2741 | struct uac2_input_terminal_descriptor *d_v2 = raw_desc; |
2742 | + if (d_v2->bLength < sizeof(*d_v2)) |
2743 | + return -EINVAL; |
2744 | control = UAC2_TE_CONNECTOR; |
2745 | term_id = d_v2->bTerminalID; |
2746 | bmctls = le16_to_cpu(d_v2->bmControls); |
2747 | } else if (state->mixer->protocol == UAC_VERSION_3) { |
2748 | struct uac3_input_terminal_descriptor *d_v3 = raw_desc; |
2749 | + if (d_v3->bLength < sizeof(*d_v3)) |
2750 | + return -EINVAL; |
2751 | control = UAC3_TE_INSERTION; |
2752 | term_id = d_v3->bTerminalID; |
2753 | bmctls = le32_to_cpu(d_v3->bmControls); |
2754 | @@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, |
2755 | if (err < 0) |
2756 | continue; |
2757 | /* no bmControls field (e.g. Maya44) -> ignore */ |
2758 | - if (desc->bLength <= 10 + input_pins) |
2759 | + if (!num_outs) |
2760 | continue; |
2761 | err = check_input_term(state, desc->baSourceID[pin], &iterm); |
2762 | if (err < 0) |
2763 | @@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, |
2764 | char *name) |
2765 | { |
2766 | struct uac_processing_unit_descriptor *desc = raw_desc; |
2767 | - int num_ins = desc->bNrInPins; |
2768 | + int num_ins; |
2769 | struct usb_mixer_elem_info *cval; |
2770 | struct snd_kcontrol *kctl; |
2771 | int i, err, nameid, type, len; |
2772 | @@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, |
2773 | 0, NULL, default_value_info |
2774 | }; |
2775 | |
2776 | - if (desc->bLength < 13 || desc->bLength < 13 + num_ins || |
2777 | + if (desc->bLength < 13) { |
2778 | + usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); |
2779 | + return -EINVAL; |
2780 | + } |
2781 | + |
2782 | + num_ins = desc->bNrInPins; |
2783 | + if (desc->bLength < 13 + num_ins || |
2784 | desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { |
2785 | usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); |
2786 | return -EINVAL; |
2787 | diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
2788 | index 37fc0447c071..b345beb447bd 100644 |
2789 | --- a/sound/usb/quirks-table.h |
2790 | +++ b/sound/usb/quirks-table.h |
2791 | @@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), |
2792 | } |
2793 | } |
2794 | }, |
2795 | + { |
2796 | + .ifnum = -1 |
2797 | + }, |
2798 | } |
2799 | } |
2800 | }, |
2801 | @@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), |
2802 | } |
2803 | } |
2804 | }, |
2805 | + { |
2806 | + .ifnum = -1 |
2807 | + }, |
2808 | } |
2809 | } |
2810 | }, |
2811 | diff --git a/sound/usb/stream.c b/sound/usb/stream.c |
2812 | index 67cf849aa16b..d9e3de495c16 100644 |
2813 | --- a/sound/usb/stream.c |
2814 | +++ b/sound/usb/stream.c |
2815 | @@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, |
2816 | csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); |
2817 | |
2818 | if (!csep || csep->bLength < 7 || |
2819 | - csep->bDescriptorSubtype != UAC_EP_GENERAL) { |
2820 | - usb_audio_warn(chip, |
2821 | - "%u:%d : no or invalid class specific endpoint descriptor\n", |
2822 | - iface_no, altsd->bAlternateSetting); |
2823 | - return 0; |
2824 | - } |
2825 | + csep->bDescriptorSubtype != UAC_EP_GENERAL) |
2826 | + goto error; |
2827 | |
2828 | if (protocol == UAC_VERSION_1) { |
2829 | attributes = csep->bmAttributes; |
2830 | @@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, |
2831 | struct uac2_iso_endpoint_descriptor *csep2 = |
2832 | (struct uac2_iso_endpoint_descriptor *) csep; |
2833 | |
2834 | + if (csep2->bLength < sizeof(*csep2)) |
2835 | + goto error; |
2836 | attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; |
2837 | |
2838 | /* emulate the endpoint attributes of a v1 device */ |
2839 | @@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, |
2840 | struct uac3_iso_endpoint_descriptor *csep3 = |
2841 | (struct uac3_iso_endpoint_descriptor *) csep; |
2842 | |
2843 | + if (csep3->bLength < sizeof(*csep3)) |
2844 | + goto error; |
2845 | /* emulate the endpoint attributes of a v1 device */ |
2846 | if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) |
2847 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; |
2848 | } |
2849 | |
2850 | return attributes; |
2851 | + |
2852 | + error: |
2853 | + usb_audio_warn(chip, |
2854 | + "%u:%d : no or invalid class specific endpoint descriptor\n", |
2855 | + iface_no, altsd->bAlternateSetting); |
2856 | + return 0; |
2857 | } |
2858 | |
2859 | /* find an input terminal descriptor (either UAC1 or UAC2) with the given |
2860 | @@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, |
2861 | */ |
2862 | static void * |
2863 | snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, |
2864 | - int terminal_id) |
2865 | + int terminal_id, bool uac23) |
2866 | { |
2867 | struct uac2_input_terminal_descriptor *term = NULL; |
2868 | + size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) : |
2869 | + sizeof(struct uac_input_terminal_descriptor); |
2870 | |
2871 | while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, |
2872 | ctrl_iface->extralen, |
2873 | term, UAC_INPUT_TERMINAL))) { |
2874 | + if (term->bLength < minlen) |
2875 | + continue; |
2876 | if (term->bTerminalID == terminal_id) |
2877 | return term; |
2878 | } |
2879 | @@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface, |
2880 | while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, |
2881 | ctrl_iface->extralen, |
2882 | term, UAC_OUTPUT_TERMINAL))) { |
2883 | - if (term->bTerminalID == terminal_id) |
2884 | + if (term->bLength >= sizeof(*term) && |
2885 | + term->bTerminalID == terminal_id) |
2886 | return term; |
2887 | } |
2888 | |
2889 | @@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip, |
2890 | format = le16_to_cpu(as->wFormatTag); /* remember the format value */ |
2891 | |
2892 | iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, |
2893 | - as->bTerminalLink); |
2894 | + as->bTerminalLink, |
2895 | + false); |
2896 | if (iterm) { |
2897 | num_channels = iterm->bNrChannels; |
2898 | chconfig = le16_to_cpu(iterm->wChannelConfig); |
2899 | @@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip, |
2900 | * to extract the clock |
2901 | */ |
2902 | input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, |
2903 | - as->bTerminalLink); |
2904 | + as->bTerminalLink, |
2905 | + true); |
2906 | if (input_term) { |
2907 | clock = input_term->bCSourceID; |
2908 | if (!chconfig && (num_channels == input_term->bNrChannels)) |
2909 | @@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, |
2910 | * to extract the clock |
2911 | */ |
2912 | input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, |
2913 | - as->bTerminalLink); |
2914 | + as->bTerminalLink, |
2915 | + true); |
2916 | if (input_term) { |
2917 | clock = input_term->bCSourceID; |
2918 | goto found_clock; |
2919 | diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c |
2920 | index ff9d3a5825e1..c6635fee27d8 100644 |
2921 | --- a/tools/testing/nvdimm/test/iomap.c |
2922 | +++ b/tools/testing/nvdimm/test/iomap.c |
2923 | @@ -104,16 +104,29 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, |
2924 | } |
2925 | EXPORT_SYMBOL(__wrap_devm_memremap); |
2926 | |
2927 | +static void nfit_test_kill(void *_pgmap) |
2928 | +{ |
2929 | + struct dev_pagemap *pgmap = _pgmap; |
2930 | + |
2931 | + pgmap->kill(pgmap->ref); |
2932 | +} |
2933 | + |
2934 | void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
2935 | { |
2936 | resource_size_t offset = pgmap->res.start; |
2937 | struct nfit_test_resource *nfit_res = get_nfit_res(offset); |
2938 | |
2939 | - if (nfit_res) |
2940 | + if (nfit_res) { |
2941 | + int rc; |
2942 | + |
2943 | + rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap); |
2944 | + if (rc) |
2945 | + return ERR_PTR(rc); |
2946 | return nfit_res->buf + offset - nfit_res->res.start; |
2947 | + } |
2948 | return devm_memremap_pages(dev, pgmap); |
2949 | } |
2950 | -EXPORT_SYMBOL(__wrap_devm_memremap_pages); |
2951 | +EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages); |
2952 | |
2953 | pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) |
2954 | { |
2955 | diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile |
2956 | index d9a725478375..72c25a3cb658 100644 |
2957 | --- a/tools/testing/selftests/android/Makefile |
2958 | +++ b/tools/testing/selftests/android/Makefile |
2959 | @@ -6,7 +6,7 @@ TEST_PROGS := run.sh |
2960 | |
2961 | include ../lib.mk |
2962 | |
2963 | -all: khdr |
2964 | +all: |
2965 | @for DIR in $(SUBDIRS); do \ |
2966 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
2967 | mkdir $$BUILD_TARGET -p; \ |
2968 | diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile |
2969 | index ad1eeb14fda7..30996306cabc 100644 |
2970 | --- a/tools/testing/selftests/futex/functional/Makefile |
2971 | +++ b/tools/testing/selftests/futex/functional/Makefile |
2972 | @@ -19,6 +19,7 @@ TEST_GEN_FILES := \ |
2973 | TEST_PROGS := run.sh |
2974 | |
2975 | top_srcdir = ../../../../.. |
2976 | +KSFT_KHDR_INSTALL := 1 |
2977 | include ../../lib.mk |
2978 | |
2979 | $(TEST_GEN_FILES): $(HEADERS) |
2980 | diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile |
2981 | index 46648427d537..07f572a1bd3f 100644 |
2982 | --- a/tools/testing/selftests/gpio/Makefile |
2983 | +++ b/tools/testing/selftests/gpio/Makefile |
2984 | @@ -10,8 +10,6 @@ TEST_PROGS_EXTENDED := gpio-mockup-chardev |
2985 | GPIODIR := $(realpath ../../../gpio) |
2986 | GPIOOBJ := gpio-utils.o |
2987 | |
2988 | -include ../lib.mk |
2989 | - |
2990 | all: $(TEST_PROGS_EXTENDED) |
2991 | |
2992 | override define CLEAN |
2993 | @@ -19,7 +17,9 @@ override define CLEAN |
2994 | $(MAKE) -C $(GPIODIR) OUTPUT=$(GPIODIR)/ clean |
2995 | endef |
2996 | |
2997 | -$(TEST_PROGS_EXTENDED):| khdr |
2998 | +KSFT_KHDR_INSTALL := 1 |
2999 | +include ../lib.mk |
3000 | + |
3001 | $(TEST_PROGS_EXTENDED): $(GPIODIR)/$(GPIOOBJ) |
3002 | |
3003 | $(GPIODIR)/$(GPIOOBJ): |
3004 | diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile |
3005 | index 01a219229238..52bfe5e76907 100644 |
3006 | --- a/tools/testing/selftests/kvm/Makefile |
3007 | +++ b/tools/testing/selftests/kvm/Makefile |
3008 | @@ -1,6 +1,7 @@ |
3009 | all: |
3010 | |
3011 | top_srcdir = ../../../.. |
3012 | +KSFT_KHDR_INSTALL := 1 |
3013 | UNAME_M := $(shell uname -m) |
3014 | |
3015 | LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c |
3016 | @@ -44,7 +45,6 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) |
3017 | |
3018 | all: $(STATIC_LIBS) |
3019 | $(TEST_GEN_PROGS): $(STATIC_LIBS) |
3020 | -$(STATIC_LIBS):| khdr |
3021 | |
3022 | cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. |
3023 | cscope: |
3024 | diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk |
3025 | index 0a8e75886224..8b0f16409ed7 100644 |
3026 | --- a/tools/testing/selftests/lib.mk |
3027 | +++ b/tools/testing/selftests/lib.mk |
3028 | @@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) |
3029 | TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) |
3030 | TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) |
3031 | |
3032 | +ifdef KSFT_KHDR_INSTALL |
3033 | top_srcdir ?= ../../../.. |
3034 | include $(top_srcdir)/scripts/subarch.include |
3035 | ARCH ?= $(SUBARCH) |
3036 | |
3037 | -all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) |
3038 | - |
3039 | .PHONY: khdr |
3040 | khdr: |
3041 | make ARCH=$(ARCH) -C $(top_srcdir) headers_install |
3042 | |
3043 | -ifdef KSFT_KHDR_INSTALL |
3044 | -$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr |
3045 | +all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) |
3046 | +else |
3047 | +all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) |
3048 | endif |
3049 | |
3050 | .ONESHELL: |
3051 | diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile |
3052 | index 14cfcf006936..c46c0eefab9e 100644 |
3053 | --- a/tools/testing/selftests/networking/timestamping/Makefile |
3054 | +++ b/tools/testing/selftests/networking/timestamping/Makefile |
3055 | @@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp |
3056 | all: $(TEST_PROGS) |
3057 | |
3058 | top_srcdir = ../../../../.. |
3059 | +KSFT_KHDR_INSTALL := 1 |
3060 | include ../../lib.mk |
3061 | |
3062 | clean: |
3063 | diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile |
3064 | index dc92eb271d9a..be5a5e542804 100644 |
3065 | --- a/tools/testing/selftests/tc-testing/bpf/Makefile |
3066 | +++ b/tools/testing/selftests/tc-testing/bpf/Makefile |
3067 | @@ -4,6 +4,7 @@ APIDIR := ../../../../include/uapi |
3068 | TEST_GEN_FILES = action.o |
3069 | |
3070 | top_srcdir = ../../../../.. |
3071 | +KSFT_KHDR_INSTALL := 1 |
3072 | include ../../lib.mk |
3073 | |
3074 | CLANG ?= clang |
3075 | diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile |
3076 | index 6e67e726e5a5..e13eb6cc8901 100644 |
3077 | --- a/tools/testing/selftests/vm/Makefile |
3078 | +++ b/tools/testing/selftests/vm/Makefile |
3079 | @@ -25,6 +25,7 @@ TEST_GEN_FILES += virtual_address_range |
3080 | |
3081 | TEST_PROGS := run_vmtests |
3082 | |
3083 | +KSFT_KHDR_INSTALL := 1 |
3084 | include ../lib.mk |
3085 | |
3086 | $(OUTPUT)/userfaultfd: LDLIBS += -lpthread |