Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0187-5.4.88-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 30223 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index 71968b4bb313d..450ebe1528062 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 87
10 +SUBLEVEL = 88
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
15 index ff366c2f58c18..303bc3e601a1c 100644
16 --- a/drivers/dma/at_hdmac.c
17 +++ b/drivers/dma/at_hdmac.c
18 @@ -1673,9 +1673,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
19 dma_cap_zero(mask);
20 dma_cap_set(DMA_SLAVE, mask);
21
22 - atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
23 - if (!atslave)
24 + atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
25 + if (!atslave) {
26 + put_device(&dmac_pdev->dev);
27 return NULL;
28 + }
29
30 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
31 /*
32 @@ -1704,8 +1706,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
33 atslave->dma_dev = &dmac_pdev->dev;
34
35 chan = dma_request_channel(mask, at_dma_filter, atslave);
36 - if (!chan)
37 + if (!chan) {
38 + put_device(&dmac_pdev->dev);
39 + kfree(atslave);
40 return NULL;
41 + }
42
43 atchan = to_at_dma_chan(chan);
44 atchan->per_if = dma_spec->args[0] & 0xff;
45 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
46 index 09410971615c4..d2dd387c95d86 100644
47 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
48 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
49 @@ -1434,8 +1434,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
50
51 drm_connector_update_edid_property(connector,
52 aconnector->edid);
53 - aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
54 - drm_connector_list_update(connector);
55 + drm_add_edid_modes(connector, aconnector->edid);
56
57 if (aconnector->dc_link->aux_mode)
58 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
59 diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
60 index 621f5309d735a..431f10c2b951d 100644
61 --- a/drivers/iio/imu/bmi160/bmi160.h
62 +++ b/drivers/iio/imu/bmi160/bmi160.h
63 @@ -7,6 +7,13 @@
64 struct bmi160_data {
65 struct regmap *regmap;
66 struct iio_trigger *trig;
67 + /*
68 + * Ensure natural alignment for timestamp if present.
69 + * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts.
70 + * If fewer channels are enabled, less space may be needed, as
71 + * long as the timestamp is still aligned to 8 bytes.
72 + */
73 + __le16 buf[12] __aligned(8);
74 };
75
76 extern const struct regmap_config bmi160_regmap_config;
77 diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
78 index a5994899e3965..088694c82327a 100644
79 --- a/drivers/iio/imu/bmi160/bmi160_core.c
80 +++ b/drivers/iio/imu/bmi160/bmi160_core.c
81 @@ -411,8 +411,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
82 struct iio_poll_func *pf = p;
83 struct iio_dev *indio_dev = pf->indio_dev;
84 struct bmi160_data *data = iio_priv(indio_dev);
85 - __le16 buf[12];
86 - /* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
87 int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
88 __le16 sample;
89
90 @@ -422,10 +420,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
91 &sample, sizeof(sample));
92 if (ret)
93 goto done;
94 - buf[j++] = sample;
95 + data->buf[j++] = sample;
96 }
97
98 - iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
99 + iio_push_to_buffers_with_timestamp(indio_dev, data->buf, pf->timestamp);
100 done:
101 iio_trigger_notify_done(indio_dev->trig);
102 return IRQ_HANDLED;
103 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
104 index 671700af91804..0d21c68bfe245 100644
105 --- a/drivers/mtd/nand/spi/core.c
106 +++ b/drivers/mtd/nand/spi/core.c
107 @@ -317,10 +317,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
108 buf += ret;
109 }
110
111 - if (req->ooblen)
112 - memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
113 - req->ooblen);
114 -
115 return 0;
116 }
117
118 diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
119 index d87aeff70cefb..c2cb1e711c06e 100644
120 --- a/drivers/net/wireless/marvell/mwifiex/join.c
121 +++ b/drivers/net/wireless/marvell/mwifiex/join.c
122 @@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
123
124 memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
125
126 + if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN)
127 + req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN;
128 memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
129
130 mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
131 diff --git a/fs/exec.c b/fs/exec.c
132 index 2441eb1a1e2d0..1b4d2206d53a1 100644
133 --- a/fs/exec.c
134 +++ b/fs/exec.c
135 @@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(read_code);
136
137 /*
138 * Maps the mm_struct mm into the current task struct.
139 - * On success, this function returns with the mutex
140 - * exec_update_mutex locked.
141 + * On success, this function returns with exec_update_lock
142 + * held for writing.
143 */
144 static int exec_mmap(struct mm_struct *mm)
145 {
146 @@ -1023,7 +1023,7 @@ static int exec_mmap(struct mm_struct *mm)
147 old_mm = current->mm;
148 exec_mm_release(tsk, old_mm);
149
150 - ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
151 + ret = down_write_killable(&tsk->signal->exec_update_lock);
152 if (ret)
153 return ret;
154
155 @@ -1038,7 +1038,7 @@ static int exec_mmap(struct mm_struct *mm)
156 down_read(&old_mm->mmap_sem);
157 if (unlikely(old_mm->core_state)) {
158 up_read(&old_mm->mmap_sem);
159 - mutex_unlock(&tsk->signal->exec_update_mutex);
160 + up_write(&tsk->signal->exec_update_lock);
161 return -EINTR;
162 }
163 }
164 @@ -1450,7 +1450,7 @@ static void free_bprm(struct linux_binprm *bprm)
165 free_arg_pages(bprm);
166 if (bprm->cred) {
167 if (bprm->called_exec_mmap)
168 - mutex_unlock(&current->signal->exec_update_mutex);
169 + up_write(&current->signal->exec_update_lock);
170 mutex_unlock(&current->signal->cred_guard_mutex);
171 abort_creds(bprm->cred);
172 }
173 @@ -1500,7 +1500,7 @@ void install_exec_creds(struct linux_binprm *bprm)
174 * credentials; any time after this it may be unlocked.
175 */
176 security_bprm_committed_creds(bprm);
177 - mutex_unlock(&current->signal->exec_update_mutex);
178 + up_write(&current->signal->exec_update_lock);
179 mutex_unlock(&current->signal->cred_guard_mutex);
180 }
181 EXPORT_SYMBOL(install_exec_creds);
182 diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
183 index 5a48cee6d7d33..f529075a2ce87 100644
184 --- a/fs/fuse/acl.c
185 +++ b/fs/fuse/acl.c
186 @@ -19,6 +19,9 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type)
187 void *value = NULL;
188 struct posix_acl *acl;
189
190 + if (fuse_is_bad(inode))
191 + return ERR_PTR(-EIO);
192 +
193 if (!fc->posix_acl || fc->no_getxattr)
194 return NULL;
195
196 @@ -53,6 +56,9 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type)
197 const char *name;
198 int ret;
199
200 + if (fuse_is_bad(inode))
201 + return -EIO;
202 +
203 if (!fc->posix_acl || fc->no_setxattr)
204 return -EOPNOTSUPP;
205
206 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
207 index ee190119f45cc..60378f3baaae1 100644
208 --- a/fs/fuse/dir.c
209 +++ b/fs/fuse/dir.c
210 @@ -201,7 +201,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
211 int ret;
212
213 inode = d_inode_rcu(entry);
214 - if (inode && is_bad_inode(inode))
215 + if (inode && fuse_is_bad(inode))
216 goto invalid;
217 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
218 (flags & LOOKUP_REVAL)) {
219 @@ -386,6 +386,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
220 bool outarg_valid = true;
221 bool locked;
222
223 + if (fuse_is_bad(dir))
224 + return ERR_PTR(-EIO);
225 +
226 locked = fuse_lock_inode(dir);
227 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
228 &outarg, &inode);
229 @@ -529,6 +532,9 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
230 struct fuse_conn *fc = get_fuse_conn(dir);
231 struct dentry *res = NULL;
232
233 + if (fuse_is_bad(dir))
234 + return -EIO;
235 +
236 if (d_in_lookup(entry)) {
237 res = fuse_lookup(dir, entry, 0);
238 if (IS_ERR(res))
239 @@ -577,6 +583,9 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
240 int err;
241 struct fuse_forget_link *forget;
242
243 + if (fuse_is_bad(dir))
244 + return -EIO;
245 +
246 forget = fuse_alloc_forget();
247 if (!forget)
248 return -ENOMEM;
249 @@ -704,6 +713,9 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
250 struct fuse_conn *fc = get_fuse_conn(dir);
251 FUSE_ARGS(args);
252
253 + if (fuse_is_bad(dir))
254 + return -EIO;
255 +
256 args.opcode = FUSE_UNLINK;
257 args.nodeid = get_node_id(dir);
258 args.in_numargs = 1;
259 @@ -740,6 +752,9 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
260 struct fuse_conn *fc = get_fuse_conn(dir);
261 FUSE_ARGS(args);
262
263 + if (fuse_is_bad(dir))
264 + return -EIO;
265 +
266 args.opcode = FUSE_RMDIR;
267 args.nodeid = get_node_id(dir);
268 args.in_numargs = 1;
269 @@ -818,6 +833,9 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
270 struct fuse_conn *fc = get_fuse_conn(olddir);
271 int err;
272
273 + if (fuse_is_bad(olddir))
274 + return -EIO;
275 +
276 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
277 return -EINVAL;
278
279 @@ -953,7 +971,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
280 if (!err) {
281 if (fuse_invalid_attr(&outarg.attr) ||
282 (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
283 - make_bad_inode(inode);
284 + fuse_make_bad(inode);
285 err = -EIO;
286 } else {
287 fuse_change_attributes(inode, &outarg.attr,
288 @@ -1155,6 +1173,9 @@ static int fuse_permission(struct inode *inode, int mask)
289 bool refreshed = false;
290 int err = 0;
291
292 + if (fuse_is_bad(inode))
293 + return -EIO;
294 +
295 if (!fuse_allow_current_process(fc))
296 return -EACCES;
297
298 @@ -1250,7 +1271,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
299 int err;
300
301 err = -EIO;
302 - if (is_bad_inode(inode))
303 + if (fuse_is_bad(inode))
304 goto out_err;
305
306 if (fc->cache_symlinks)
307 @@ -1298,7 +1319,7 @@ static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
308 struct fuse_conn *fc = get_fuse_conn(inode);
309 int err;
310
311 - if (is_bad_inode(inode))
312 + if (fuse_is_bad(inode))
313 return -EIO;
314
315 if (fc->no_fsyncdir)
316 @@ -1575,7 +1596,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
317
318 if (fuse_invalid_attr(&outarg.attr) ||
319 (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
320 - make_bad_inode(inode);
321 + fuse_make_bad(inode);
322 err = -EIO;
323 goto error;
324 }
325 @@ -1631,6 +1652,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
326 struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
327 int ret;
328
329 + if (fuse_is_bad(inode))
330 + return -EIO;
331 +
332 if (!fuse_allow_current_process(get_fuse_conn(inode)))
333 return -EACCES;
334
335 @@ -1689,6 +1713,9 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
336 struct inode *inode = d_inode(path->dentry);
337 struct fuse_conn *fc = get_fuse_conn(inode);
338
339 + if (fuse_is_bad(inode))
340 + return -EIO;
341 +
342 if (!fuse_allow_current_process(fc))
343 return -EACCES;
344
345 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
346 index ab4fc1255aca8..1e1aef1bc20b3 100644
347 --- a/fs/fuse/file.c
348 +++ b/fs/fuse/file.c
349 @@ -222,6 +222,9 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
350 fc->atomic_o_trunc &&
351 fc->writeback_cache;
352
353 + if (fuse_is_bad(inode))
354 + return -EIO;
355 +
356 err = generic_file_open(inode, file);
357 if (err)
358 return err;
359 @@ -443,7 +446,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
360 FUSE_ARGS(args);
361 int err;
362
363 - if (is_bad_inode(inode))
364 + if (fuse_is_bad(inode))
365 return -EIO;
366
367 if (fc->no_flush)
368 @@ -506,7 +509,7 @@ static int fuse_fsync(struct file *file, loff_t start, loff_t end,
369 struct fuse_conn *fc = get_fuse_conn(inode);
370 int err;
371
372 - if (is_bad_inode(inode))
373 + if (fuse_is_bad(inode))
374 return -EIO;
375
376 inode_lock(inode);
377 @@ -830,7 +833,7 @@ static int fuse_readpage(struct file *file, struct page *page)
378 int err;
379
380 err = -EIO;
381 - if (is_bad_inode(inode))
382 + if (fuse_is_bad(inode))
383 goto out;
384
385 err = fuse_do_readpage(file, page);
386 @@ -973,7 +976,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
387 int err;
388
389 err = -EIO;
390 - if (is_bad_inode(inode))
391 + if (fuse_is_bad(inode))
392 goto out;
393
394 data.file = file;
395 @@ -1569,7 +1572,7 @@ static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
396 struct file *file = iocb->ki_filp;
397 struct fuse_file *ff = file->private_data;
398
399 - if (is_bad_inode(file_inode(file)))
400 + if (fuse_is_bad(file_inode(file)))
401 return -EIO;
402
403 if (!(ff->open_flags & FOPEN_DIRECT_IO))
404 @@ -1583,7 +1586,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
405 struct file *file = iocb->ki_filp;
406 struct fuse_file *ff = file->private_data;
407
408 - if (is_bad_inode(file_inode(file)))
409 + if (fuse_is_bad(file_inode(file)))
410 return -EIO;
411
412 if (!(ff->open_flags & FOPEN_DIRECT_IO))
413 @@ -2133,7 +2136,7 @@ static int fuse_writepages(struct address_space *mapping,
414 int err;
415
416 err = -EIO;
417 - if (is_bad_inode(inode))
418 + if (fuse_is_bad(inode))
419 goto out;
420
421 data.inode = inode;
422 @@ -2911,7 +2914,7 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd,
423 if (!fuse_allow_current_process(fc))
424 return -EACCES;
425
426 - if (is_bad_inode(inode))
427 + if (fuse_is_bad(inode))
428 return -EIO;
429
430 return fuse_do_ioctl(file, cmd, arg, flags);
431 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
432 index d7cde216fc871..e3688312e9f1b 100644
433 --- a/fs/fuse/fuse_i.h
434 +++ b/fs/fuse/fuse_i.h
435 @@ -158,6 +158,8 @@ enum {
436 FUSE_I_INIT_RDPLUS,
437 /** An operation changing file size is in progress */
438 FUSE_I_SIZE_UNSTABLE,
439 + /* Bad inode */
440 + FUSE_I_BAD,
441 };
442
443 struct fuse_conn;
444 @@ -787,6 +789,16 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
445 return atomic64_read(&fc->attr_version);
446 }
447
448 +static inline void fuse_make_bad(struct inode *inode)
449 +{
450 + set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
451 +}
452 +
453 +static inline bool fuse_is_bad(struct inode *inode)
454 +{
455 + return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
456 +}
457 +
458 /** Device operations */
459 extern const struct file_operations fuse_dev_operations;
460
461 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
462 index f58ab84b09fb3..aa1d5cf1bc3a4 100644
463 --- a/fs/fuse/inode.c
464 +++ b/fs/fuse/inode.c
465 @@ -115,7 +115,7 @@ static void fuse_evict_inode(struct inode *inode)
466 fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
467 fi->forget = NULL;
468 }
469 - if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
470 + if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
471 WARN_ON(!list_empty(&fi->write_files));
472 WARN_ON(!list_empty(&fi->queued_writes));
473 }
474 @@ -306,7 +306,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
475 unlock_new_inode(inode);
476 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
477 /* Inode has changed type, any I/O on the old should fail */
478 - make_bad_inode(inode);
479 + fuse_make_bad(inode);
480 iput(inode);
481 goto retry;
482 }
483 diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
484 index 6a40f75a0d25e..70f685b61e3a5 100644
485 --- a/fs/fuse/readdir.c
486 +++ b/fs/fuse/readdir.c
487 @@ -207,7 +207,7 @@ retry:
488 dput(dentry);
489 goto retry;
490 }
491 - if (is_bad_inode(inode)) {
492 + if (fuse_is_bad(inode)) {
493 dput(dentry);
494 return -EIO;
495 }
496 @@ -568,7 +568,7 @@ int fuse_readdir(struct file *file, struct dir_context *ctx)
497 struct inode *inode = file_inode(file);
498 int err;
499
500 - if (is_bad_inode(inode))
501 + if (fuse_is_bad(inode))
502 return -EIO;
503
504 mutex_lock(&ff->readdir.lock);
505 diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
506 index 20d052e08b3be..28fed52957707 100644
507 --- a/fs/fuse/xattr.c
508 +++ b/fs/fuse/xattr.c
509 @@ -113,6 +113,9 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
510 struct fuse_getxattr_out outarg;
511 ssize_t ret;
512
513 + if (fuse_is_bad(inode))
514 + return -EIO;
515 +
516 if (!fuse_allow_current_process(fc))
517 return -EACCES;
518
519 @@ -178,6 +181,9 @@ static int fuse_xattr_get(const struct xattr_handler *handler,
520 struct dentry *dentry, struct inode *inode,
521 const char *name, void *value, size_t size)
522 {
523 + if (fuse_is_bad(inode))
524 + return -EIO;
525 +
526 return fuse_getxattr(inode, name, value, size);
527 }
528
529 @@ -186,6 +192,9 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
530 const char *name, const void *value, size_t size,
531 int flags)
532 {
533 + if (fuse_is_bad(inode))
534 + return -EIO;
535 +
536 if (!value)
537 return fuse_removexattr(inode, name);
538
539 diff --git a/fs/proc/base.c b/fs/proc/base.c
540 index b690074e65ffa..653c2d8aa1cd7 100644
541 --- a/fs/proc/base.c
542 +++ b/fs/proc/base.c
543 @@ -403,11 +403,11 @@ print0:
544
545 static int lock_trace(struct task_struct *task)
546 {
547 - int err = mutex_lock_killable(&task->signal->exec_update_mutex);
548 + int err = down_read_killable(&task->signal->exec_update_lock);
549 if (err)
550 return err;
551 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
552 - mutex_unlock(&task->signal->exec_update_mutex);
553 + up_read(&task->signal->exec_update_lock);
554 return -EPERM;
555 }
556 return 0;
557 @@ -415,7 +415,7 @@ static int lock_trace(struct task_struct *task)
558
559 static void unlock_trace(struct task_struct *task)
560 {
561 - mutex_unlock(&task->signal->exec_update_mutex);
562 + up_read(&task->signal->exec_update_lock);
563 }
564
565 #ifdef CONFIG_STACKTRACE
566 @@ -2769,7 +2769,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
567 unsigned long flags;
568 int result;
569
570 - result = mutex_lock_killable(&task->signal->exec_update_mutex);
571 + result = down_read_killable(&task->signal->exec_update_lock);
572 if (result)
573 return result;
574
575 @@ -2805,7 +2805,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
576 result = 0;
577
578 out_unlock:
579 - mutex_unlock(&task->signal->exec_update_mutex);
580 + up_read(&task->signal->exec_update_lock);
581 return result;
582 }
583
584 diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
585 index 85b5151911cfd..4856706fbfeb4 100644
586 --- a/include/linux/kdev_t.h
587 +++ b/include/linux/kdev_t.h
588 @@ -21,61 +21,61 @@
589 })
590
591 /* acceptable for old filesystems */
592 -static inline bool old_valid_dev(dev_t dev)
593 +static __always_inline bool old_valid_dev(dev_t dev)
594 {
595 return MAJOR(dev) < 256 && MINOR(dev) < 256;
596 }
597
598 -static inline u16 old_encode_dev(dev_t dev)
599 +static __always_inline u16 old_encode_dev(dev_t dev)
600 {
601 return (MAJOR(dev) << 8) | MINOR(dev);
602 }
603
604 -static inline dev_t old_decode_dev(u16 val)
605 +static __always_inline dev_t old_decode_dev(u16 val)
606 {
607 return MKDEV((val >> 8) & 255, val & 255);
608 }
609
610 -static inline u32 new_encode_dev(dev_t dev)
611 +static __always_inline u32 new_encode_dev(dev_t dev)
612 {
613 unsigned major = MAJOR(dev);
614 unsigned minor = MINOR(dev);
615 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
616 }
617
618 -static inline dev_t new_decode_dev(u32 dev)
619 +static __always_inline dev_t new_decode_dev(u32 dev)
620 {
621 unsigned major = (dev & 0xfff00) >> 8;
622 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
623 return MKDEV(major, minor);
624 }
625
626 -static inline u64 huge_encode_dev(dev_t dev)
627 +static __always_inline u64 huge_encode_dev(dev_t dev)
628 {
629 return new_encode_dev(dev);
630 }
631
632 -static inline dev_t huge_decode_dev(u64 dev)
633 +static __always_inline dev_t huge_decode_dev(u64 dev)
634 {
635 return new_decode_dev(dev);
636 }
637
638 -static inline int sysv_valid_dev(dev_t dev)
639 +static __always_inline int sysv_valid_dev(dev_t dev)
640 {
641 return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
642 }
643
644 -static inline u32 sysv_encode_dev(dev_t dev)
645 +static __always_inline u32 sysv_encode_dev(dev_t dev)
646 {
647 return MINOR(dev) | (MAJOR(dev) << 18);
648 }
649
650 -static inline unsigned sysv_major(u32 dev)
651 +static __always_inline unsigned sysv_major(u32 dev)
652 {
653 return (dev >> 18) & 0x3fff;
654 }
655
656 -static inline unsigned sysv_minor(u32 dev)
657 +static __always_inline unsigned sysv_minor(u32 dev)
658 {
659 return dev & 0x3ffff;
660 }
661 diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
662 index 00d6054687dd2..8a3606372abc8 100644
663 --- a/include/linux/rwsem.h
664 +++ b/include/linux/rwsem.h
665 @@ -125,6 +125,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
666 * lock for reading
667 */
668 extern void down_read(struct rw_semaphore *sem);
669 +extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
670 extern int __must_check down_read_killable(struct rw_semaphore *sem);
671
672 /*
673 @@ -173,6 +174,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
674 * See Documentation/locking/lockdep-design.rst for more details.)
675 */
676 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
677 +extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
678 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
679 extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
680 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
681 @@ -193,6 +195,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
682 extern void up_read_non_owner(struct rw_semaphore *sem);
683 #else
684 # define down_read_nested(sem, subclass) down_read(sem)
685 +# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
686 # define down_write_nest_lock(sem, nest_lock) down_write(sem)
687 # define down_write_nested(sem, subclass) down_write(sem)
688 # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
689 diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
690 index a29df79540ce6..baf58f4cb0578 100644
691 --- a/include/linux/sched/signal.h
692 +++ b/include/linux/sched/signal.h
693 @@ -226,12 +226,13 @@ struct signal_struct {
694 * credential calculations
695 * (notably. ptrace)
696 * Deprecated do not use in new code.
697 - * Use exec_update_mutex instead.
698 - */
699 - struct mutex exec_update_mutex; /* Held while task_struct is being
700 - * updated during exec, and may have
701 - * inconsistent permissions.
702 + * Use exec_update_lock instead.
703 */
704 + struct rw_semaphore exec_update_lock; /* Held while task_struct is
705 + * being updated during exec,
706 + * and may have inconsistent
707 + * permissions.
708 + */
709 } __randomize_layout;
710
711 /*
712 diff --git a/init/init_task.c b/init/init_task.c
713 index bd403ed3e4184..df7041be96fca 100644
714 --- a/init/init_task.c
715 +++ b/init/init_task.c
716 @@ -26,7 +26,7 @@ static struct signal_struct init_signals = {
717 .multiprocess = HLIST_HEAD_INIT,
718 .rlim = INIT_RLIMITS,
719 .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
720 - .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex),
721 + .exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
722 #ifdef CONFIG_POSIX_TIMERS
723 .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
724 .cputimer = {
725 diff --git a/kernel/events/core.c b/kernel/events/core.c
726 index 9f7c2da992991..2ef33e9a75910 100644
727 --- a/kernel/events/core.c
728 +++ b/kernel/events/core.c
729 @@ -1254,7 +1254,7 @@ static void put_ctx(struct perf_event_context *ctx)
730 * function.
731 *
732 * Lock order:
733 - * exec_update_mutex
734 + * exec_update_lock
735 * task_struct::perf_event_mutex
736 * perf_event_context::mutex
737 * perf_event::child_mutex;
738 @@ -11001,24 +11001,6 @@ SYSCALL_DEFINE5(perf_event_open,
739 goto err_task;
740 }
741
742 - if (task) {
743 - err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
744 - if (err)
745 - goto err_task;
746 -
747 - /*
748 - * Reuse ptrace permission checks for now.
749 - *
750 - * We must hold exec_update_mutex across this and any potential
751 - * perf_install_in_context() call for this new event to
752 - * serialize against exec() altering our credentials (and the
753 - * perf_event_exit_task() that could imply).
754 - */
755 - err = -EACCES;
756 - if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
757 - goto err_cred;
758 - }
759 -
760 if (flags & PERF_FLAG_PID_CGROUP)
761 cgroup_fd = pid;
762
763 @@ -11026,7 +11008,7 @@ SYSCALL_DEFINE5(perf_event_open,
764 NULL, NULL, cgroup_fd);
765 if (IS_ERR(event)) {
766 err = PTR_ERR(event);
767 - goto err_cred;
768 + goto err_task;
769 }
770
771 if (is_sampling_event(event)) {
772 @@ -11145,6 +11127,24 @@ SYSCALL_DEFINE5(perf_event_open,
773 goto err_context;
774 }
775
776 + if (task) {
777 + err = down_read_interruptible(&task->signal->exec_update_lock);
778 + if (err)
779 + goto err_file;
780 +
781 + /*
782 + * Preserve ptrace permission check for backwards compatibility.
783 + *
784 + * We must hold exec_update_lock across this and any potential
785 + * perf_install_in_context() call for this new event to
786 + * serialize against exec() altering our credentials (and the
787 + * perf_event_exit_task() that could imply).
788 + */
789 + err = -EACCES;
790 + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
791 + goto err_cred;
792 + }
793 +
794 if (move_group) {
795 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
796
797 @@ -11298,7 +11298,7 @@ SYSCALL_DEFINE5(perf_event_open,
798 mutex_unlock(&ctx->mutex);
799
800 if (task) {
801 - mutex_unlock(&task->signal->exec_update_mutex);
802 + up_read(&task->signal->exec_update_lock);
803 put_task_struct(task);
804 }
805
806 @@ -11320,7 +11320,10 @@ err_locked:
807 if (move_group)
808 perf_event_ctx_unlock(group_leader, gctx);
809 mutex_unlock(&ctx->mutex);
810 -/* err_file: */
811 +err_cred:
812 + if (task)
813 + up_read(&task->signal->exec_update_lock);
814 +err_file:
815 fput(event_file);
816 err_context:
817 perf_unpin_context(ctx);
818 @@ -11332,9 +11335,6 @@ err_alloc:
819 */
820 if (!event_file)
821 free_event(event);
822 -err_cred:
823 - if (task)
824 - mutex_unlock(&task->signal->exec_update_mutex);
825 err_task:
826 if (task)
827 put_task_struct(task);
828 @@ -11639,7 +11639,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
829 /*
830 * When a child task exits, feed back event values to parent events.
831 *
832 - * Can be called with exec_update_mutex held when called from
833 + * Can be called with exec_update_lock held when called from
834 * install_exec_creds().
835 */
836 void perf_event_exit_task(struct task_struct *child)
837 diff --git a/kernel/fork.c b/kernel/fork.c
838 index 419fff8eb9e55..50f37d5afb32b 100644
839 --- a/kernel/fork.c
840 +++ b/kernel/fork.c
841 @@ -1221,7 +1221,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
842 struct mm_struct *mm;
843 int err;
844
845 - err = mutex_lock_killable(&task->signal->exec_update_mutex);
846 + err = down_read_killable(&task->signal->exec_update_lock);
847 if (err)
848 return ERR_PTR(err);
849
850 @@ -1231,7 +1231,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
851 mmput(mm);
852 mm = ERR_PTR(-EACCES);
853 }
854 - mutex_unlock(&task->signal->exec_update_mutex);
855 + up_read(&task->signal->exec_update_lock);
856
857 return mm;
858 }
859 @@ -1586,7 +1586,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
860 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
861
862 mutex_init(&sig->cred_guard_mutex);
863 - mutex_init(&sig->exec_update_mutex);
864 + init_rwsem(&sig->exec_update_lock);
865
866 return 0;
867 }
868 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
869 index b3ff9288c6cc9..c0d2ad9b4705d 100644
870 --- a/kernel/kcmp.c
871 +++ b/kernel/kcmp.c
872 @@ -75,25 +75,25 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx)
873 return file;
874 }
875
876 -static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
877 +static void kcmp_unlock(struct rw_semaphore *l1, struct rw_semaphore *l2)
878 {
879 - if (likely(m2 != m1))
880 - mutex_unlock(m2);
881 - mutex_unlock(m1);
882 + if (likely(l2 != l1))
883 + up_read(l2);
884 + up_read(l1);
885 }
886
887 -static int kcmp_lock(struct mutex *m1, struct mutex *m2)
888 +static int kcmp_lock(struct rw_semaphore *l1, struct rw_semaphore *l2)
889 {
890 int err;
891
892 - if (m2 > m1)
893 - swap(m1, m2);
894 + if (l2 > l1)
895 + swap(l1, l2);
896
897 - err = mutex_lock_killable(m1);
898 - if (!err && likely(m1 != m2)) {
899 - err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
900 + err = down_read_killable(l1);
901 + if (!err && likely(l1 != l2)) {
902 + err = down_read_killable_nested(l2, SINGLE_DEPTH_NESTING);
903 if (err)
904 - mutex_unlock(m1);
905 + up_read(l1);
906 }
907
908 return err;
909 @@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
910 /*
911 * One should have enough rights to inspect task details.
912 */
913 - ret = kcmp_lock(&task1->signal->exec_update_mutex,
914 - &task2->signal->exec_update_mutex);
915 + ret = kcmp_lock(&task1->signal->exec_update_lock,
916 + &task2->signal->exec_update_lock);
917 if (ret)
918 goto err;
919 if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
920 @@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
921 }
922
923 err_unlock:
924 - kcmp_unlock(&task1->signal->exec_update_mutex,
925 - &task2->signal->exec_update_mutex);
926 + kcmp_unlock(&task1->signal->exec_update_lock,
927 + &task2->signal->exec_update_lock);
928 err:
929 put_task_struct(task1);
930 put_task_struct(task2);
931 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
932 index baafa1dd9fcc4..5d54ff3179b80 100644
933 --- a/kernel/locking/rwsem.c
934 +++ b/kernel/locking/rwsem.c
935 @@ -1348,6 +1348,18 @@ inline void __down_read(struct rw_semaphore *sem)
936 }
937 }
938
939 +static inline int __down_read_interruptible(struct rw_semaphore *sem)
940 +{
941 + if (!rwsem_read_trylock(sem)) {
942 + if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
943 + return -EINTR;
944 + DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
945 + } else {
946 + rwsem_set_reader_owned(sem);
947 + }
948 + return 0;
949 +}
950 +
951 static inline int __down_read_killable(struct rw_semaphore *sem)
952 {
953 if (!rwsem_read_trylock(sem)) {
954 @@ -1498,6 +1510,20 @@ void __sched down_read(struct rw_semaphore *sem)
955 }
956 EXPORT_SYMBOL(down_read);
957
958 +int __sched down_read_interruptible(struct rw_semaphore *sem)
959 +{
960 + might_sleep();
961 + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
962 +
963 + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
964 + rwsem_release(&sem->dep_map, 1, _RET_IP_);
965 + return -EINTR;
966 + }
967 +
968 + return 0;
969 +}
970 +EXPORT_SYMBOL(down_read_interruptible);
971 +
972 int __sched down_read_killable(struct rw_semaphore *sem)
973 {
974 might_sleep();
975 @@ -1608,6 +1634,20 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
976 }
977 EXPORT_SYMBOL(down_read_nested);
978
979 +int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
980 +{
981 + might_sleep();
982 + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
983 +
984 + if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
985 + rwsem_release(&sem->dep_map, 1, _RET_IP_);
986 + return -EINTR;
987 + }
988 +
989 + return 0;
990 +}
991 +EXPORT_SYMBOL(down_read_killable_nested);
992 +
993 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
994 {
995 might_sleep();