Contents of /trunk/kernel-alx/patches-5.4/0193-5.4.94-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 35357 byte(s)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 35357 byte(s)
-add missing
1 | diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst |
2 | index a30aa91b5fbe9..3463883844c0b 100644 |
3 | --- a/Documentation/admin-guide/device-mapper/dm-integrity.rst |
4 | +++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst |
5 | @@ -177,6 +177,12 @@ bitmap_flush_interval:number |
6 | The bitmap flush interval in milliseconds. The metadata buffers |
7 | are synchronized when this interval expires. |
8 | |
9 | +legacy_recalculate |
10 | + Allow recalculating of volumes with HMAC keys. This is disabled by |
11 | + default for security reasons - an attacker could modify the volume, |
12 | + set recalc_sector to zero, and the kernel would not detect the |
13 | + modification. |
14 | + |
15 | |
16 | The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can |
17 | be changed when reloading the target (load an inactive table and swap the |
18 | diff --git a/Makefile b/Makefile |
19 | index f8462f8d8a151..ad1b8dc6e462a 100644 |
20 | --- a/Makefile |
21 | +++ b/Makefile |
22 | @@ -1,7 +1,7 @@ |
23 | # SPDX-License-Identifier: GPL-2.0 |
24 | VERSION = 5 |
25 | PATCHLEVEL = 4 |
26 | -SUBLEVEL = 93 |
27 | +SUBLEVEL = 94 |
28 | EXTRAVERSION = |
29 | NAME = Kleptomaniac Octopus |
30 | |
31 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
32 | index 08df42e4db96c..51d867cf146c1 100644 |
33 | --- a/arch/arm64/include/asm/memory.h |
34 | +++ b/arch/arm64/include/asm/memory.h |
35 | @@ -178,7 +178,6 @@ extern u64 vabits_actual; |
36 | #include <linux/bitops.h> |
37 | #include <linux/mmdebug.h> |
38 | |
39 | -extern s64 physvirt_offset; |
40 | extern s64 memstart_addr; |
41 | /* PHYS_OFFSET - the physical address of the start of memory. */ |
42 | #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) |
43 | @@ -254,7 +253,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) |
44 | */ |
45 | #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) |
46 | |
47 | -#define __lm_to_phys(addr) (((addr) + physvirt_offset)) |
48 | +#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) |
49 | #define __kimg_to_phys(addr) ((addr) - kimage_voffset) |
50 | |
51 | #define __virt_to_phys_nodebug(x) ({ \ |
52 | @@ -272,7 +271,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); |
53 | #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) |
54 | #endif /* CONFIG_DEBUG_VIRTUAL */ |
55 | |
56 | -#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) |
57 | +#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) |
58 | #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) |
59 | |
60 | /* |
61 | diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
62 | index 69dfc340e71b1..8c420f916fe2e 100644 |
63 | --- a/arch/arm64/include/asm/pgtable.h |
64 | +++ b/arch/arm64/include/asm/pgtable.h |
65 | @@ -23,6 +23,8 @@ |
66 | #define VMALLOC_START (MODULES_END) |
67 | #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
68 | |
69 | +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) |
70 | + |
71 | #define FIRST_USER_ADDRESS 0UL |
72 | |
73 | #ifndef __ASSEMBLY__ |
74 | @@ -33,8 +35,6 @@ |
75 | #include <linux/mm_types.h> |
76 | #include <linux/sched.h> |
77 | |
78 | -extern struct page *vmemmap; |
79 | - |
80 | extern void __pte_error(const char *file, int line, unsigned long val); |
81 | extern void __pmd_error(const char *file, int line, unsigned long val); |
82 | extern void __pud_error(const char *file, int line, unsigned long val); |
83 | diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c |
84 | index 45c00a54909c9..602bd19630ff8 100644 |
85 | --- a/arch/arm64/mm/init.c |
86 | +++ b/arch/arm64/mm/init.c |
87 | @@ -50,12 +50,6 @@ |
88 | s64 memstart_addr __ro_after_init = -1; |
89 | EXPORT_SYMBOL(memstart_addr); |
90 | |
91 | -s64 physvirt_offset __ro_after_init; |
92 | -EXPORT_SYMBOL(physvirt_offset); |
93 | - |
94 | -struct page *vmemmap __ro_after_init; |
95 | -EXPORT_SYMBOL(vmemmap); |
96 | - |
97 | phys_addr_t arm64_dma_phys_limit __ro_after_init; |
98 | |
99 | #ifdef CONFIG_KEXEC_CORE |
100 | @@ -321,20 +315,6 @@ void __init arm64_memblock_init(void) |
101 | memstart_addr = round_down(memblock_start_of_DRAM(), |
102 | ARM64_MEMSTART_ALIGN); |
103 | |
104 | - physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; |
105 | - |
106 | - vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); |
107 | - |
108 | - /* |
109 | - * If we are running with a 52-bit kernel VA config on a system that |
110 | - * does not support it, we have to offset our vmemmap and physvirt_offset |
111 | - * s.t. we avoid the 52-bit portion of the direct linear map |
112 | - */ |
113 | - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { |
114 | - vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; |
115 | - physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); |
116 | - } |
117 | - |
118 | /* |
119 | * Remove the memory that we will not be able to cover with the |
120 | * linear mapping. Take care not to clip the kernel which may be |
121 | @@ -349,6 +329,16 @@ void __init arm64_memblock_init(void) |
122 | memblock_remove(0, memstart_addr); |
123 | } |
124 | |
125 | + /* |
126 | + * If we are running with a 52-bit kernel VA config on a system that |
127 | + * does not support it, we have to place the available physical |
128 | + * memory in the 48-bit addressable part of the linear region, i.e., |
129 | + * we have to move it upward. Since memstart_addr represents the |
130 | + * physical address of PAGE_OFFSET, we have to *subtract* from it. |
131 | + */ |
132 | + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) |
133 | + memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); |
134 | + |
135 | /* |
136 | * Apply the memory limit if it was set. Since the kernel may be loaded |
137 | * high up in memory, add back the kernel region that must be accessible |
138 | diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c |
139 | index 3985d6e1c17dc..89a053b1d2799 100644 |
140 | --- a/drivers/gpio/gpio-mvebu.c |
141 | +++ b/drivers/gpio/gpio-mvebu.c |
142 | @@ -657,9 +657,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, |
143 | |
144 | spin_lock_irqsave(&mvpwm->lock, flags); |
145 | |
146 | - val = (unsigned long long) |
147 | - readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); |
148 | - val *= NSEC_PER_SEC; |
149 | + u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); |
150 | + val = (unsigned long long) u * NSEC_PER_SEC; |
151 | do_div(val, mvpwm->clk_rate); |
152 | if (val > UINT_MAX) |
153 | state->duty_cycle = UINT_MAX; |
154 | @@ -668,21 +667,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, |
155 | else |
156 | state->duty_cycle = 1; |
157 | |
158 | - val = (unsigned long long) |
159 | - readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); |
160 | + val = (unsigned long long) u; /* on duration */ |
161 | + /* period = on + off duration */ |
162 | + val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); |
163 | val *= NSEC_PER_SEC; |
164 | do_div(val, mvpwm->clk_rate); |
165 | - if (val < state->duty_cycle) { |
166 | + if (val > UINT_MAX) |
167 | + state->period = UINT_MAX; |
168 | + else if (val) |
169 | + state->period = val; |
170 | + else |
171 | state->period = 1; |
172 | - } else { |
173 | - val -= state->duty_cycle; |
174 | - if (val > UINT_MAX) |
175 | - state->period = UINT_MAX; |
176 | - else if (val) |
177 | - state->period = val; |
178 | - else |
179 | - state->period = 1; |
180 | - } |
181 | |
182 | regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u); |
183 | if (u) |
184 | diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
185 | index 9e852b4bbf92b..73dafa60080f1 100644 |
186 | --- a/drivers/hid/wacom_sys.c |
187 | +++ b/drivers/hid/wacom_sys.c |
188 | @@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, |
189 | } |
190 | |
191 | if (flush) |
192 | - wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); |
193 | + wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo); |
194 | else if (insert) |
195 | - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, |
196 | + wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo, |
197 | raw_data, report_size); |
198 | |
199 | return insert && !flush; |
200 | @@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res) |
201 | static int wacom_devm_kfifo_alloc(struct wacom *wacom) |
202 | { |
203 | struct wacom_wac *wacom_wac = &wacom->wacom_wac; |
204 | - struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo; |
205 | + struct kfifo_rec_ptr_2 *pen_fifo; |
206 | int error; |
207 | |
208 | pen_fifo = devres_alloc(wacom_devm_kfifo_release, |
209 | @@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom) |
210 | } |
211 | |
212 | devres_add(&wacom->hdev->dev, pen_fifo); |
213 | + wacom_wac->pen_fifo = pen_fifo; |
214 | |
215 | return 0; |
216 | } |
217 | diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h |
218 | index da612b6e9c779..195910dd2154e 100644 |
219 | --- a/drivers/hid/wacom_wac.h |
220 | +++ b/drivers/hid/wacom_wac.h |
221 | @@ -342,7 +342,7 @@ struct wacom_wac { |
222 | struct input_dev *pen_input; |
223 | struct input_dev *touch_input; |
224 | struct input_dev *pad_input; |
225 | - struct kfifo_rec_ptr_2 pen_fifo; |
226 | + struct kfifo_rec_ptr_2 *pen_fifo; |
227 | int pid; |
228 | int num_contacts_left; |
229 | u8 bt_features; |
230 | diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c |
231 | index 57f66f2ad98dc..c967c2cdba870 100644 |
232 | --- a/drivers/md/dm-integrity.c |
233 | +++ b/drivers/md/dm-integrity.c |
234 | @@ -254,6 +254,7 @@ struct dm_integrity_c { |
235 | bool journal_uptodate; |
236 | bool just_formatted; |
237 | bool recalculate_flag; |
238 | + bool legacy_recalculate; |
239 | |
240 | struct alg_spec internal_hash_alg; |
241 | struct alg_spec journal_crypt_alg; |
242 | @@ -381,6 +382,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic) |
243 | return READ_ONCE(ic->failed); |
244 | } |
245 | |
246 | +static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) |
247 | +{ |
248 | + if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) && |
249 | + !ic->legacy_recalculate) |
250 | + return true; |
251 | + return false; |
252 | +} |
253 | + |
254 | static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, |
255 | unsigned j, unsigned char seq) |
256 | { |
257 | @@ -2998,6 +3007,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, |
258 | arg_count += !!ic->internal_hash_alg.alg_string; |
259 | arg_count += !!ic->journal_crypt_alg.alg_string; |
260 | arg_count += !!ic->journal_mac_alg.alg_string; |
261 | + arg_count += ic->legacy_recalculate; |
262 | DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, |
263 | ic->tag_size, ic->mode, arg_count); |
264 | if (ic->meta_dev) |
265 | @@ -3017,6 +3027,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, |
266 | DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); |
267 | DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); |
268 | } |
269 | + if (ic->legacy_recalculate) |
270 | + DMEMIT(" legacy_recalculate"); |
271 | |
272 | #define EMIT_ALG(a, n) \ |
273 | do { \ |
274 | @@ -3625,7 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) |
275 | unsigned extra_args; |
276 | struct dm_arg_set as; |
277 | static const struct dm_arg _args[] = { |
278 | - {0, 15, "Invalid number of feature args"}, |
279 | + {0, 14, "Invalid number of feature args"}, |
280 | }; |
281 | unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; |
282 | bool should_write_sb; |
283 | @@ -3769,6 +3781,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) |
284 | goto bad; |
285 | } else if (!strcmp(opt_string, "recalculate")) { |
286 | ic->recalculate_flag = true; |
287 | + } else if (!strcmp(opt_string, "legacy_recalculate")) { |
288 | + ic->legacy_recalculate = true; |
289 | } else { |
290 | r = -EINVAL; |
291 | ti->error = "Invalid argument"; |
292 | @@ -4067,6 +4081,14 @@ try_smaller_buffer: |
293 | } |
294 | } |
295 | |
296 | + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && |
297 | + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && |
298 | + dm_integrity_disable_recalculate(ic)) { |
299 | + ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; |
300 | + r = -EOPNOTSUPP; |
301 | + goto bad; |
302 | + } |
303 | + |
304 | ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, |
305 | 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); |
306 | if (IS_ERR(ic->bufio)) { |
307 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
308 | index be06b26d6ca03..7adecfd0c1e99 100644 |
309 | --- a/fs/cifs/smb2pdu.c |
310 | +++ b/fs/cifs/smb2pdu.c |
311 | @@ -490,8 +490,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) |
312 | pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; |
313 | pneg_ctxt->DataLength = cpu_to_le16(38); |
314 | pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); |
315 | - pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); |
316 | - get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); |
317 | + pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE); |
318 | + get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE); |
319 | pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; |
320 | } |
321 | |
322 | @@ -617,6 +617,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) |
323 | if (len < MIN_PREAUTH_CTXT_DATA_LEN) { |
324 | printk_once(KERN_WARNING "server sent bad preauth context\n"); |
325 | return; |
326 | + } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) { |
327 | + pr_warn_once("server sent invalid SaltLength\n"); |
328 | + return; |
329 | } |
330 | if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) |
331 | printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n"); |
332 | diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h |
333 | index f264e1d36fe16..2482978f09486 100644 |
334 | --- a/fs/cifs/smb2pdu.h |
335 | +++ b/fs/cifs/smb2pdu.h |
336 | @@ -271,12 +271,20 @@ struct smb2_neg_context { |
337 | /* Followed by array of data */ |
338 | } __packed; |
339 | |
340 | -#define SMB311_SALT_SIZE 32 |
341 | +#define SMB311_LINUX_CLIENT_SALT_SIZE 32 |
342 | /* Hash Algorithm Types */ |
343 | #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001) |
344 | #define SMB2_PREAUTH_HASH_SIZE 64 |
345 | |
346 | -#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6) |
347 | +/* |
348 | + * SaltLength that the server send can be zero, so the only three required |
349 | + * fields (all __le16) end up six bytes total, so the minimum context data len |
350 | + * in the response is six bytes which accounts for |
351 | + * |
352 | + * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm. |
353 | + */ |
354 | +#define MIN_PREAUTH_CTXT_DATA_LEN 6 |
355 | + |
356 | struct smb2_preauth_neg_context { |
357 | __le16 ContextType; /* 1 */ |
358 | __le16 DataLength; |
359 | @@ -284,7 +292,7 @@ struct smb2_preauth_neg_context { |
360 | __le16 HashAlgorithmCount; /* 1 */ |
361 | __le16 SaltLength; |
362 | __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */ |
363 | - __u8 Salt[SMB311_SALT_SIZE]; |
364 | + __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE]; |
365 | } __packed; |
366 | |
367 | /* Encryption Algorithms Ciphers */ |
368 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
369 | index 3bac525f0439d..539d95bd364d4 100644 |
370 | --- a/fs/ext4/inode.c |
371 | +++ b/fs/ext4/inode.c |
372 | @@ -5209,7 +5209,7 @@ static int other_inode_match(struct inode * inode, unsigned long ino, |
373 | (inode->i_state & I_DIRTY_TIME)) { |
374 | struct ext4_inode_info *ei = EXT4_I(inode); |
375 | |
376 | - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); |
377 | + inode->i_state &= ~I_DIRTY_TIME; |
378 | spin_unlock(&inode->i_lock); |
379 | |
380 | spin_lock(&ei->i_raw_lock); |
381 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
382 | index 5f6400ba82c00..a2cf2db0d3def 100644 |
383 | --- a/fs/fs-writeback.c |
384 | +++ b/fs/fs-writeback.c |
385 | @@ -1238,7 +1238,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) |
386 | */ |
387 | static int move_expired_inodes(struct list_head *delaying_queue, |
388 | struct list_head *dispatch_queue, |
389 | - int flags, unsigned long dirtied_before) |
390 | + unsigned long dirtied_before) |
391 | { |
392 | LIST_HEAD(tmp); |
393 | struct list_head *pos, *node; |
394 | @@ -1254,8 +1254,6 @@ static int move_expired_inodes(struct list_head *delaying_queue, |
395 | list_move(&inode->i_io_list, &tmp); |
396 | moved++; |
397 | spin_lock(&inode->i_lock); |
398 | - if (flags & EXPIRE_DIRTY_ATIME) |
399 | - inode->i_state |= I_DIRTY_TIME_EXPIRED; |
400 | inode->i_state |= I_SYNC_QUEUED; |
401 | spin_unlock(&inode->i_lock); |
402 | if (sb_is_blkdev_sb(inode->i_sb)) |
403 | @@ -1303,11 +1301,11 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, |
404 | |
405 | assert_spin_locked(&wb->list_lock); |
406 | list_splice_init(&wb->b_more_io, &wb->b_io); |
407 | - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); |
408 | + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); |
409 | if (!work->for_sync) |
410 | time_expire_jif = jiffies - dirtytime_expire_interval * HZ; |
411 | moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, |
412 | - EXPIRE_DIRTY_ATIME, time_expire_jif); |
413 | + time_expire_jif); |
414 | if (moved) |
415 | wb_io_lists_populated(wb); |
416 | trace_writeback_queue_io(wb, work, dirtied_before, moved); |
417 | @@ -1475,26 +1473,26 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
418 | ret = err; |
419 | } |
420 | |
421 | + /* |
422 | + * If the inode has dirty timestamps and we need to write them, call |
423 | + * mark_inode_dirty_sync() to notify the filesystem about it and to |
424 | + * change I_DIRTY_TIME into I_DIRTY_SYNC. |
425 | + */ |
426 | + if ((inode->i_state & I_DIRTY_TIME) && |
427 | + (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync || |
428 | + time_after(jiffies, inode->dirtied_time_when + |
429 | + dirtytime_expire_interval * HZ))) { |
430 | + trace_writeback_lazytime(inode); |
431 | + mark_inode_dirty_sync(inode); |
432 | + } |
433 | + |
434 | /* |
435 | * Some filesystems may redirty the inode during the writeback |
436 | * due to delalloc, clear dirty metadata flags right before |
437 | * write_inode() |
438 | */ |
439 | spin_lock(&inode->i_lock); |
440 | - |
441 | dirty = inode->i_state & I_DIRTY; |
442 | - if (inode->i_state & I_DIRTY_TIME) { |
443 | - if ((dirty & I_DIRTY_INODE) || |
444 | - wbc->sync_mode == WB_SYNC_ALL || |
445 | - unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || |
446 | - unlikely(time_after(jiffies, |
447 | - (inode->dirtied_time_when + |
448 | - dirtytime_expire_interval * HZ)))) { |
449 | - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; |
450 | - trace_writeback_lazytime(inode); |
451 | - } |
452 | - } else |
453 | - inode->i_state &= ~I_DIRTY_TIME_EXPIRED; |
454 | inode->i_state &= ~dirty; |
455 | |
456 | /* |
457 | @@ -1515,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
458 | |
459 | spin_unlock(&inode->i_lock); |
460 | |
461 | - if (dirty & I_DIRTY_TIME) |
462 | - mark_inode_dirty_sync(inode); |
463 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
464 | if (dirty & ~I_DIRTY_PAGES) { |
465 | int err = write_inode(inode, wbc); |
466 | diff --git a/fs/io_uring.c b/fs/io_uring.c |
467 | index 4127ea027a14d..478df7e10767a 100644 |
468 | --- a/fs/io_uring.c |
469 | +++ b/fs/io_uring.c |
470 | @@ -2226,7 +2226,8 @@ restart: |
471 | /* Ensure we clear previously set non-block flag */ |
472 | req->rw.ki_flags &= ~IOCB_NOWAIT; |
473 | |
474 | - if (req->fs != current->fs && current->fs != old_fs_struct) { |
475 | + if ((req->fs && req->fs != current->fs) || |
476 | + (!req->fs && current->fs != old_fs_struct)) { |
477 | task_lock(current); |
478 | if (req->fs) |
479 | current->fs = req->fs; |
480 | @@ -2351,7 +2352,7 @@ out: |
481 | mmput(cur_mm); |
482 | } |
483 | revert_creds(old_cred); |
484 | - if (old_fs_struct) { |
485 | + if (old_fs_struct != current->fs) { |
486 | task_lock(current); |
487 | current->fs = old_fs_struct; |
488 | task_unlock(current); |
489 | diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c |
490 | index 6c7354abd0aea..0ba7368b9a5f0 100644 |
491 | --- a/fs/xfs/libxfs/xfs_trans_inode.c |
492 | +++ b/fs/xfs/libxfs/xfs_trans_inode.c |
493 | @@ -100,9 +100,9 @@ xfs_trans_log_inode( |
494 | * to log the timestamps, or will clear already cleared fields in the |
495 | * worst case. |
496 | */ |
497 | - if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) { |
498 | + if (inode->i_state & I_DIRTY_TIME) { |
499 | spin_lock(&inode->i_lock); |
500 | - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); |
501 | + inode->i_state &= ~I_DIRTY_TIME; |
502 | spin_unlock(&inode->i_lock); |
503 | } |
504 | |
505 | diff --git a/include/linux/fs.h b/include/linux/fs.h |
506 | index 4c82683e034a7..ef118b8ba6993 100644 |
507 | --- a/include/linux/fs.h |
508 | +++ b/include/linux/fs.h |
509 | @@ -2161,7 +2161,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) |
510 | #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) |
511 | #define I_LINKABLE (1 << 10) |
512 | #define I_DIRTY_TIME (1 << 11) |
513 | -#define I_DIRTY_TIME_EXPIRED (1 << 12) |
514 | #define I_WB_SWITCH (1 << 13) |
515 | #define I_OVL_INUSE (1 << 14) |
516 | #define I_CREATING (1 << 15) |
517 | diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h |
518 | index a8af22e469ce5..011e8faa608b9 100644 |
519 | --- a/include/trace/events/writeback.h |
520 | +++ b/include/trace/events/writeback.h |
521 | @@ -20,7 +20,6 @@ |
522 | {I_CLEAR, "I_CLEAR"}, \ |
523 | {I_SYNC, "I_SYNC"}, \ |
524 | {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ |
525 | - {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ |
526 | {I_REFERENCED, "I_REFERENCED"} \ |
527 | ) |
528 | |
529 | diff --git a/kernel/futex.c b/kernel/futex.c |
530 | index b6dec5f79370c..042c2707e9131 100644 |
531 | --- a/kernel/futex.c |
532 | +++ b/kernel/futex.c |
533 | @@ -857,6 +857,29 @@ static struct futex_pi_state *alloc_pi_state(void) |
534 | return pi_state; |
535 | } |
536 | |
537 | +static void pi_state_update_owner(struct futex_pi_state *pi_state, |
538 | + struct task_struct *new_owner) |
539 | +{ |
540 | + struct task_struct *old_owner = pi_state->owner; |
541 | + |
542 | + lockdep_assert_held(&pi_state->pi_mutex.wait_lock); |
543 | + |
544 | + if (old_owner) { |
545 | + raw_spin_lock(&old_owner->pi_lock); |
546 | + WARN_ON(list_empty(&pi_state->list)); |
547 | + list_del_init(&pi_state->list); |
548 | + raw_spin_unlock(&old_owner->pi_lock); |
549 | + } |
550 | + |
551 | + if (new_owner) { |
552 | + raw_spin_lock(&new_owner->pi_lock); |
553 | + WARN_ON(!list_empty(&pi_state->list)); |
554 | + list_add(&pi_state->list, &new_owner->pi_state_list); |
555 | + pi_state->owner = new_owner; |
556 | + raw_spin_unlock(&new_owner->pi_lock); |
557 | + } |
558 | +} |
559 | + |
560 | static void get_pi_state(struct futex_pi_state *pi_state) |
561 | { |
562 | WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
563 | @@ -879,17 +902,11 @@ static void put_pi_state(struct futex_pi_state *pi_state) |
564 | * and has cleaned up the pi_state already |
565 | */ |
566 | if (pi_state->owner) { |
567 | - struct task_struct *owner; |
568 | unsigned long flags; |
569 | |
570 | raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); |
571 | - owner = pi_state->owner; |
572 | - if (owner) { |
573 | - raw_spin_lock(&owner->pi_lock); |
574 | - list_del_init(&pi_state->list); |
575 | - raw_spin_unlock(&owner->pi_lock); |
576 | - } |
577 | - rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); |
578 | + pi_state_update_owner(pi_state, NULL); |
579 | + rt_mutex_proxy_unlock(&pi_state->pi_mutex); |
580 | raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); |
581 | } |
582 | |
583 | @@ -1035,7 +1052,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } |
584 | * FUTEX_OWNER_DIED bit. See [4] |
585 | * |
586 | * [10] There is no transient state which leaves owner and user space |
587 | - * TID out of sync. |
588 | + * TID out of sync. Except one error case where the kernel is denied |
589 | + * write access to the user address, see fixup_pi_state_owner(). |
590 | * |
591 | * |
592 | * Serialization and lifetime rules: |
593 | @@ -1614,26 +1632,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ |
594 | ret = -EINVAL; |
595 | } |
596 | |
597 | - if (ret) |
598 | - goto out_unlock; |
599 | - |
600 | - /* |
601 | - * This is a point of no return; once we modify the uval there is no |
602 | - * going back and subsequent operations must not fail. |
603 | - */ |
604 | - |
605 | - raw_spin_lock(&pi_state->owner->pi_lock); |
606 | - WARN_ON(list_empty(&pi_state->list)); |
607 | - list_del_init(&pi_state->list); |
608 | - raw_spin_unlock(&pi_state->owner->pi_lock); |
609 | - |
610 | - raw_spin_lock(&new_owner->pi_lock); |
611 | - WARN_ON(!list_empty(&pi_state->list)); |
612 | - list_add(&pi_state->list, &new_owner->pi_state_list); |
613 | - pi_state->owner = new_owner; |
614 | - raw_spin_unlock(&new_owner->pi_lock); |
615 | - |
616 | - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
617 | + if (!ret) { |
618 | + /* |
619 | + * This is a point of no return; once we modified the uval |
620 | + * there is no going back and subsequent operations must |
621 | + * not fail. |
622 | + */ |
623 | + pi_state_update_owner(pi_state, new_owner); |
624 | + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
625 | + } |
626 | |
627 | out_unlock: |
628 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
629 | @@ -2456,18 +2463,13 @@ static void unqueue_me_pi(struct futex_q *q) |
630 | spin_unlock(q->lock_ptr); |
631 | } |
632 | |
633 | -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
634 | - struct task_struct *argowner) |
635 | +static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
636 | + struct task_struct *argowner) |
637 | { |
638 | + u32 uval, uninitialized_var(curval), newval, newtid; |
639 | struct futex_pi_state *pi_state = q->pi_state; |
640 | - u32 uval, uninitialized_var(curval), newval; |
641 | struct task_struct *oldowner, *newowner; |
642 | - u32 newtid; |
643 | - int ret, err = 0; |
644 | - |
645 | - lockdep_assert_held(q->lock_ptr); |
646 | - |
647 | - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
648 | + int err = 0; |
649 | |
650 | oldowner = pi_state->owner; |
651 | |
652 | @@ -2501,14 +2503,12 @@ retry: |
653 | * We raced against a concurrent self; things are |
654 | * already fixed up. Nothing to do. |
655 | */ |
656 | - ret = 0; |
657 | - goto out_unlock; |
658 | + return 0; |
659 | } |
660 | |
661 | if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { |
662 | - /* We got the lock after all, nothing to fix. */ |
663 | - ret = 0; |
664 | - goto out_unlock; |
665 | + /* We got the lock. pi_state is correct. Tell caller. */ |
666 | + return 1; |
667 | } |
668 | |
669 | /* |
670 | @@ -2535,8 +2535,7 @@ retry: |
671 | * We raced against a concurrent self; things are |
672 | * already fixed up. Nothing to do. |
673 | */ |
674 | - ret = 0; |
675 | - goto out_unlock; |
676 | + return 1; |
677 | } |
678 | newowner = argowner; |
679 | } |
680 | @@ -2566,22 +2565,9 @@ retry: |
681 | * We fixed up user space. Now we need to fix the pi_state |
682 | * itself. |
683 | */ |
684 | - if (pi_state->owner != NULL) { |
685 | - raw_spin_lock(&pi_state->owner->pi_lock); |
686 | - WARN_ON(list_empty(&pi_state->list)); |
687 | - list_del_init(&pi_state->list); |
688 | - raw_spin_unlock(&pi_state->owner->pi_lock); |
689 | - } |
690 | + pi_state_update_owner(pi_state, newowner); |
691 | |
692 | - pi_state->owner = newowner; |
693 | - |
694 | - raw_spin_lock(&newowner->pi_lock); |
695 | - WARN_ON(!list_empty(&pi_state->list)); |
696 | - list_add(&pi_state->list, &newowner->pi_state_list); |
697 | - raw_spin_unlock(&newowner->pi_lock); |
698 | - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
699 | - |
700 | - return 0; |
701 | + return argowner == current; |
702 | |
703 | /* |
704 | * In order to reschedule or handle a page fault, we need to drop the |
705 | @@ -2602,17 +2588,16 @@ handle_err: |
706 | |
707 | switch (err) { |
708 | case -EFAULT: |
709 | - ret = fault_in_user_writeable(uaddr); |
710 | + err = fault_in_user_writeable(uaddr); |
711 | break; |
712 | |
713 | case -EAGAIN: |
714 | cond_resched(); |
715 | - ret = 0; |
716 | + err = 0; |
717 | break; |
718 | |
719 | default: |
720 | WARN_ON_ONCE(1); |
721 | - ret = err; |
722 | break; |
723 | } |
724 | |
725 | @@ -2622,17 +2607,44 @@ handle_err: |
726 | /* |
727 | * Check if someone else fixed it for us: |
728 | */ |
729 | - if (pi_state->owner != oldowner) { |
730 | - ret = 0; |
731 | - goto out_unlock; |
732 | - } |
733 | + if (pi_state->owner != oldowner) |
734 | + return argowner == current; |
735 | |
736 | - if (ret) |
737 | - goto out_unlock; |
738 | + /* Retry if err was -EAGAIN or the fault in succeeded */ |
739 | + if (!err) |
740 | + goto retry; |
741 | |
742 | - goto retry; |
743 | + /* |
744 | + * fault_in_user_writeable() failed so user state is immutable. At |
745 | + * best we can make the kernel state consistent but user state will |
746 | + * be most likely hosed and any subsequent unlock operation will be |
747 | + * rejected due to PI futex rule [10]. |
748 | + * |
749 | + * Ensure that the rtmutex owner is also the pi_state owner despite |
750 | + * the user space value claiming something different. There is no |
751 | + * point in unlocking the rtmutex if current is the owner as it |
752 | + * would need to wait until the next waiter has taken the rtmutex |
753 | + * to guarantee consistent state. Keep it simple. Userspace asked |
754 | + * for this wreckaged state. |
755 | + * |
756 | + * The rtmutex has an owner - either current or some other |
757 | + * task. See the EAGAIN loop above. |
758 | + */ |
759 | + pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); |
760 | |
761 | -out_unlock: |
762 | + return err; |
763 | +} |
764 | + |
765 | +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
766 | + struct task_struct *argowner) |
767 | +{ |
768 | + struct futex_pi_state *pi_state = q->pi_state; |
769 | + int ret; |
770 | + |
771 | + lockdep_assert_held(q->lock_ptr); |
772 | + |
773 | + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
774 | + ret = __fixup_pi_state_owner(uaddr, q, argowner); |
775 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
776 | return ret; |
777 | } |
778 | @@ -2656,8 +2668,6 @@ static long futex_wait_restart(struct restart_block *restart); |
779 | */ |
780 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
781 | { |
782 | - int ret = 0; |
783 | - |
784 | if (locked) { |
785 | /* |
786 | * Got the lock. We might not be the anticipated owner if we |
787 | @@ -2668,8 +2678,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
788 | * stable state, anything else needs more attention. |
789 | */ |
790 | if (q->pi_state->owner != current) |
791 | - ret = fixup_pi_state_owner(uaddr, q, current); |
792 | - goto out; |
793 | + return fixup_pi_state_owner(uaddr, q, current); |
794 | + return 1; |
795 | } |
796 | |
797 | /* |
798 | @@ -2680,24 +2690,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
799 | * Another speculative read; pi_state->owner == current is unstable |
800 | * but needs our attention. |
801 | */ |
802 | - if (q->pi_state->owner == current) { |
803 | - ret = fixup_pi_state_owner(uaddr, q, NULL); |
804 | - goto out; |
805 | - } |
806 | + if (q->pi_state->owner == current) |
807 | + return fixup_pi_state_owner(uaddr, q, NULL); |
808 | |
809 | /* |
810 | * Paranoia check. If we did not take the lock, then we should not be |
811 | - * the owner of the rt_mutex. |
812 | + * the owner of the rt_mutex. Warn and establish consistent state. |
813 | */ |
814 | - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { |
815 | - printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
816 | - "pi-state %p\n", ret, |
817 | - q->pi_state->pi_mutex.owner, |
818 | - q->pi_state->owner); |
819 | - } |
820 | + if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) |
821 | + return fixup_pi_state_owner(uaddr, q, current); |
822 | |
823 | -out: |
824 | - return ret ? ret : locked; |
825 | + return 0; |
826 | } |
827 | |
828 | /** |
829 | @@ -2909,7 +2912,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
830 | ktime_t *time, int trylock) |
831 | { |
832 | struct hrtimer_sleeper timeout, *to; |
833 | - struct futex_pi_state *pi_state = NULL; |
834 | struct task_struct *exiting = NULL; |
835 | struct rt_mutex_waiter rt_waiter; |
836 | struct futex_hash_bucket *hb; |
837 | @@ -3046,23 +3048,9 @@ no_block: |
838 | if (res) |
839 | ret = (res < 0) ? res : 0; |
840 | |
841 | - /* |
842 | - * If fixup_owner() faulted and was unable to handle the fault, unlock |
843 | - * it and return the fault to userspace. |
844 | - */ |
845 | - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { |
846 | - pi_state = q.pi_state; |
847 | - get_pi_state(pi_state); |
848 | - } |
849 | - |
850 | /* Unqueue and drop the lock */ |
851 | unqueue_me_pi(&q); |
852 | |
853 | - if (pi_state) { |
854 | - rt_mutex_futex_unlock(&pi_state->pi_mutex); |
855 | - put_pi_state(pi_state); |
856 | - } |
857 | - |
858 | goto out_put_key; |
859 | |
860 | out_unlock_put_key: |
861 | @@ -3328,7 +3316,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
862 | u32 __user *uaddr2) |
863 | { |
864 | struct hrtimer_sleeper timeout, *to; |
865 | - struct futex_pi_state *pi_state = NULL; |
866 | struct rt_mutex_waiter rt_waiter; |
867 | struct futex_hash_bucket *hb; |
868 | union futex_key key2 = FUTEX_KEY_INIT; |
869 | @@ -3406,16 +3393,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
870 | if (q.pi_state && (q.pi_state->owner != current)) { |
871 | spin_lock(q.lock_ptr); |
872 | ret = fixup_pi_state_owner(uaddr2, &q, current); |
873 | - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { |
874 | - pi_state = q.pi_state; |
875 | - get_pi_state(pi_state); |
876 | - } |
877 | /* |
878 | * Drop the reference to the pi state which |
879 | * the requeue_pi() code acquired for us. |
880 | */ |
881 | put_pi_state(q.pi_state); |
882 | spin_unlock(q.lock_ptr); |
883 | + /* |
884 | + * Adjust the return value. It's either -EFAULT or |
885 | + * success (1) but the caller expects 0 for success. |
886 | + */ |
887 | + ret = ret < 0 ? ret : 0; |
888 | } |
889 | } else { |
890 | struct rt_mutex *pi_mutex; |
891 | @@ -3446,25 +3434,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
892 | if (res) |
893 | ret = (res < 0) ? res : 0; |
894 | |
895 | - /* |
896 | - * If fixup_pi_state_owner() faulted and was unable to handle |
897 | - * the fault, unlock the rt_mutex and return the fault to |
898 | - * userspace. |
899 | - */ |
900 | - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { |
901 | - pi_state = q.pi_state; |
902 | - get_pi_state(pi_state); |
903 | - } |
904 | - |
905 | /* Unqueue and drop the lock. */ |
906 | unqueue_me_pi(&q); |
907 | } |
908 | |
909 | - if (pi_state) { |
910 | - rt_mutex_futex_unlock(&pi_state->pi_mutex); |
911 | - put_pi_state(pi_state); |
912 | - } |
913 | - |
914 | if (ret == -EINTR) { |
915 | /* |
916 | * We've already been requeued, but cannot restart by calling |
917 | diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
918 | index 2874bf5561620..734698aec5f9e 100644 |
919 | --- a/kernel/locking/rtmutex.c |
920 | +++ b/kernel/locking/rtmutex.c |
921 | @@ -1718,8 +1718,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
922 | * possible because it belongs to the pi_state which is about to be freed |
923 | * and it is not longer visible to other tasks. |
924 | */ |
925 | -void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
926 | - struct task_struct *proxy_owner) |
927 | +void rt_mutex_proxy_unlock(struct rt_mutex *lock) |
928 | { |
929 | debug_rt_mutex_proxy_unlock(lock); |
930 | rt_mutex_set_owner(lock, NULL); |
931 | diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h |
932 | index d1d62f942be22..ca6fb489007b6 100644 |
933 | --- a/kernel/locking/rtmutex_common.h |
934 | +++ b/kernel/locking/rtmutex_common.h |
935 | @@ -133,8 +133,7 @@ enum rtmutex_chainwalk { |
936 | extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); |
937 | extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
938 | struct task_struct *proxy_owner); |
939 | -extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
940 | - struct task_struct *proxy_owner); |
941 | +extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); |
942 | extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); |
943 | extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
944 | struct rt_mutex_waiter *waiter, |
945 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
946 | index 077877ed54f73..7283741666538 100644 |
947 | --- a/kernel/trace/ring_buffer.c |
948 | +++ b/kernel/trace/ring_buffer.c |
949 | @@ -4448,6 +4448,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
950 | |
951 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
952 | return; |
953 | + /* prevent another thread from changing buffer sizes */ |
954 | + mutex_lock(&buffer->mutex); |
955 | |
956 | atomic_inc(&buffer->resize_disabled); |
957 | atomic_inc(&cpu_buffer->record_disabled); |
958 | @@ -4471,6 +4473,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
959 | |
960 | atomic_dec(&cpu_buffer->record_disabled); |
961 | atomic_dec(&buffer->resize_disabled); |
962 | + |
963 | + mutex_unlock(&buffer->mutex); |
964 | } |
965 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
966 | |
967 | diff --git a/mm/slub.c b/mm/slub.c |
968 | index 8b3ef45a0f103..e622e8f4c2ac4 100644 |
969 | --- a/mm/slub.c |
970 | +++ b/mm/slub.c |
971 | @@ -5819,10 +5819,8 @@ static int sysfs_slab_add(struct kmem_cache *s) |
972 | |
973 | s->kobj.kset = kset; |
974 | err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); |
975 | - if (err) { |
976 | - kobject_put(&s->kobj); |
977 | + if (err) |
978 | goto out; |
979 | - } |
980 | |
981 | err = sysfs_create_group(&s->kobj, &slab_attr_group); |
982 | if (err) |
983 | diff --git a/tools/build/Makefile b/tools/build/Makefile |
984 | index 727050c40f096..8a55378e8b7ce 100644 |
985 | --- a/tools/build/Makefile |
986 | +++ b/tools/build/Makefile |
987 | @@ -15,10 +15,6 @@ endef |
988 | $(call allow-override,CC,$(CROSS_COMPILE)gcc) |
989 | $(call allow-override,LD,$(CROSS_COMPILE)ld) |
990 | |
991 | -HOSTCC ?= gcc |
992 | -HOSTLD ?= ld |
993 | -HOSTAR ?= ar |
994 | - |
995 | export HOSTCC HOSTLD HOSTAR |
996 | |
997 | ifeq ($(V),1) |
998 | diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile |
999 | index f591c4d1b6fe2..9ae4a10438ee3 100644 |
1000 | --- a/tools/objtool/Makefile |
1001 | +++ b/tools/objtool/Makefile |
1002 | @@ -3,15 +3,6 @@ include ../scripts/Makefile.include |
1003 | include ../scripts/Makefile.arch |
1004 | |
1005 | # always use the host compiler |
1006 | -ifneq ($(LLVM),) |
1007 | -HOSTAR ?= llvm-ar |
1008 | -HOSTCC ?= clang |
1009 | -HOSTLD ?= ld.lld |
1010 | -else |
1011 | -HOSTAR ?= ar |
1012 | -HOSTCC ?= gcc |
1013 | -HOSTLD ?= ld |
1014 | -endif |
1015 | AR = $(HOSTAR) |
1016 | CC = $(HOSTCC) |
1017 | LD = $(HOSTLD) |
1018 | diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf |
1019 | index 902c792f326a5..961f5e4fd6566 100644 |
1020 | --- a/tools/perf/Makefile.perf |
1021 | +++ b/tools/perf/Makefile.perf |
1022 | @@ -163,10 +163,6 @@ endef |
1023 | |
1024 | LD += $(EXTRA_LDFLAGS) |
1025 | |
1026 | -HOSTCC ?= gcc |
1027 | -HOSTLD ?= ld |
1028 | -HOSTAR ?= ar |
1029 | - |
1030 | PKG_CONFIG = $(CROSS_COMPILE)pkg-config |
1031 | LLVM_CONFIG ?= llvm-config |
1032 | |
1033 | diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config |
1034 | index 54a2857c2510a..331f6d30f4726 100644 |
1035 | --- a/tools/power/acpi/Makefile.config |
1036 | +++ b/tools/power/acpi/Makefile.config |
1037 | @@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM} |
1038 | CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- |
1039 | CROSS_COMPILE ?= $(CROSS) |
1040 | LD = $(CC) |
1041 | -HOSTCC = gcc |
1042 | |
1043 | # check if compiler option is supported |
1044 | cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} |
1045 | diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include |
1046 | index 6d2f3a1b22493..812fc97bb1a97 100644 |
1047 | --- a/tools/scripts/Makefile.include |
1048 | +++ b/tools/scripts/Makefile.include |
1049 | @@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld) |
1050 | $(call allow-override,CXX,$(CROSS_COMPILE)g++) |
1051 | $(call allow-override,STRIP,$(CROSS_COMPILE)strip) |
1052 | |
1053 | +ifneq ($(LLVM),) |
1054 | +HOSTAR ?= llvm-ar |
1055 | +HOSTCC ?= clang |
1056 | +HOSTLD ?= ld.lld |
1057 | +else |
1058 | +HOSTAR ?= ar |
1059 | +HOSTCC ?= gcc |
1060 | +HOSTLD ?= ld |
1061 | +endif |
1062 | + |
1063 | ifeq ($(CC_NO_CLANG), 1) |
1064 | EXTRA_WARNINGS += -Wstrict-aliasing=3 |
1065 | endif |