Contents of /trunk/kernel-alx/patches-3.10/0163-3.10.64-all-fixes.patch
Parent Directory | Revision Log
Revision 2651 -
(show annotations)
(download)
Tue Jul 21 16:20:21 2015 UTC (9 years, 3 months ago) by niro
File size: 47383 byte(s)
Tue Jul 21 16:20:21 2015 UTC (9 years, 3 months ago) by niro
File size: 47383 byte(s)
-linux-3.10.64
1 | diff --git a/Makefile b/Makefile |
2 | index 9383fe24baa9..e5b63fb3d0e1 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 63 |
9 | +SUBLEVEL = 64 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c |
14 | index 8b6e4f5288a2..a98afed9348b 100644 |
15 | --- a/arch/s390/kernel/compat_linux.c |
16 | +++ b/arch/s390/kernel/compat_linux.c |
17 | @@ -248,7 +248,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) |
18 | struct group_info *group_info; |
19 | int retval; |
20 | |
21 | - if (!capable(CAP_SETGID)) |
22 | + if (!may_setgroups()) |
23 | return -EPERM; |
24 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
25 | return -EINVAL; |
26 | diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h |
27 | index 46727eb37bfe..6e1aaf73852a 100644 |
28 | --- a/arch/x86/include/uapi/asm/ldt.h |
29 | +++ b/arch/x86/include/uapi/asm/ldt.h |
30 | @@ -28,6 +28,13 @@ struct user_desc { |
31 | unsigned int seg_not_present:1; |
32 | unsigned int useable:1; |
33 | #ifdef __x86_64__ |
34 | + /* |
35 | + * Because this bit is not present in 32-bit user code, user |
36 | + * programs can pass uninitialized values here. Therefore, in |
37 | + * any context in which a user_desc comes from a 32-bit program, |
38 | + * the kernel must act as though lm == 0, regardless of the |
39 | + * actual value. |
40 | + */ |
41 | unsigned int lm:1; |
42 | #endif |
43 | }; |
44 | diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c |
45 | index cd6d9a5a42f6..c4ff2a916139 100644 |
46 | --- a/arch/x86/kernel/kvm.c |
47 | +++ b/arch/x86/kernel/kvm.c |
48 | @@ -279,7 +279,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) |
49 | static void __init paravirt_ops_setup(void) |
50 | { |
51 | pv_info.name = "KVM"; |
52 | - pv_info.paravirt_enabled = 1; |
53 | + |
54 | + /* |
55 | + * KVM isn't paravirt in the sense of paravirt_enabled. A KVM |
56 | + * guest kernel works like a bare metal kernel with additional |
57 | + * features, and paravirt_enabled is about features that are |
58 | + * missing. |
59 | + */ |
60 | + pv_info.paravirt_enabled = 0; |
61 | |
62 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) |
63 | pv_cpu_ops.io_delay = kvm_io_delay; |
64 | diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c |
65 | index 3dd37ebd591b..41514f56c241 100644 |
66 | --- a/arch/x86/kernel/kvmclock.c |
67 | +++ b/arch/x86/kernel/kvmclock.c |
68 | @@ -265,7 +265,6 @@ void __init kvmclock_init(void) |
69 | #endif |
70 | kvm_get_preset_lpj(); |
71 | clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); |
72 | - pv_info.paravirt_enabled = 1; |
73 | pv_info.name = "KVM"; |
74 | |
75 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) |
76 | diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c |
77 | index f99a242730e9..7099ab1e075b 100644 |
78 | --- a/arch/x86/kernel/process_64.c |
79 | +++ b/arch/x86/kernel/process_64.c |
80 | @@ -279,24 +279,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
81 | |
82 | fpu = switch_fpu_prepare(prev_p, next_p, cpu); |
83 | |
84 | - /* |
85 | - * Reload esp0, LDT and the page table pointer: |
86 | - */ |
87 | + /* Reload esp0 and ss1. */ |
88 | load_sp0(tss, next); |
89 | |
90 | - /* |
91 | - * Switch DS and ES. |
92 | - * This won't pick up thread selector changes, but I guess that is ok. |
93 | - */ |
94 | - savesegment(es, prev->es); |
95 | - if (unlikely(next->es | prev->es)) |
96 | - loadsegment(es, next->es); |
97 | - |
98 | - savesegment(ds, prev->ds); |
99 | - if (unlikely(next->ds | prev->ds)) |
100 | - loadsegment(ds, next->ds); |
101 | - |
102 | - |
103 | /* We must save %fs and %gs before load_TLS() because |
104 | * %fs and %gs may be cleared by load_TLS(). |
105 | * |
106 | @@ -305,41 +290,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
107 | savesegment(fs, fsindex); |
108 | savesegment(gs, gsindex); |
109 | |
110 | + /* |
111 | + * Load TLS before restoring any segments so that segment loads |
112 | + * reference the correct GDT entries. |
113 | + */ |
114 | load_TLS(next, cpu); |
115 | |
116 | /* |
117 | - * Leave lazy mode, flushing any hypercalls made here. |
118 | - * This must be done before restoring TLS segments so |
119 | - * the GDT and LDT are properly updated, and must be |
120 | - * done before math_state_restore, so the TS bit is up |
121 | - * to date. |
122 | + * Leave lazy mode, flushing any hypercalls made here. This |
123 | + * must be done after loading TLS entries in the GDT but before |
124 | + * loading segments that might reference them, and and it must |
125 | + * be done before math_state_restore, so the TS bit is up to |
126 | + * date. |
127 | */ |
128 | arch_end_context_switch(next_p); |
129 | |
130 | + /* Switch DS and ES. |
131 | + * |
132 | + * Reading them only returns the selectors, but writing them (if |
133 | + * nonzero) loads the full descriptor from the GDT or LDT. The |
134 | + * LDT for next is loaded in switch_mm, and the GDT is loaded |
135 | + * above. |
136 | + * |
137 | + * We therefore need to write new values to the segment |
138 | + * registers on every context switch unless both the new and old |
139 | + * values are zero. |
140 | + * |
141 | + * Note that we don't need to do anything for CS and SS, as |
142 | + * those are saved and restored as part of pt_regs. |
143 | + */ |
144 | + savesegment(es, prev->es); |
145 | + if (unlikely(next->es | prev->es)) |
146 | + loadsegment(es, next->es); |
147 | + |
148 | + savesegment(ds, prev->ds); |
149 | + if (unlikely(next->ds | prev->ds)) |
150 | + loadsegment(ds, next->ds); |
151 | + |
152 | /* |
153 | * Switch FS and GS. |
154 | * |
155 | - * Segment register != 0 always requires a reload. Also |
156 | - * reload when it has changed. When prev process used 64bit |
157 | - * base always reload to avoid an information leak. |
158 | + * These are even more complicated than FS and GS: they have |
159 | + * 64-bit bases are that controlled by arch_prctl. Those bases |
160 | + * only differ from the values in the GDT or LDT if the selector |
161 | + * is 0. |
162 | + * |
163 | + * Loading the segment register resets the hidden base part of |
164 | + * the register to 0 or the value from the GDT / LDT. If the |
165 | + * next base address zero, writing 0 to the segment register is |
166 | + * much faster than using wrmsr to explicitly zero the base. |
167 | + * |
168 | + * The thread_struct.fs and thread_struct.gs values are 0 |
169 | + * if the fs and gs bases respectively are not overridden |
170 | + * from the values implied by fsindex and gsindex. They |
171 | + * are nonzero, and store the nonzero base addresses, if |
172 | + * the bases are overridden. |
173 | + * |
174 | + * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should |
175 | + * be impossible. |
176 | + * |
177 | + * Therefore we need to reload the segment registers if either |
178 | + * the old or new selector is nonzero, and we need to override |
179 | + * the base address if next thread expects it to be overridden. |
180 | + * |
181 | + * This code is unnecessarily slow in the case where the old and |
182 | + * new indexes are zero and the new base is nonzero -- it will |
183 | + * unnecessarily write 0 to the selector before writing the new |
184 | + * base address. |
185 | + * |
186 | + * Note: This all depends on arch_prctl being the only way that |
187 | + * user code can override the segment base. Once wrfsbase and |
188 | + * wrgsbase are enabled, most of this code will need to change. |
189 | */ |
190 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
191 | loadsegment(fs, next->fsindex); |
192 | + |
193 | /* |
194 | - * Check if the user used a selector != 0; if yes |
195 | - * clear 64bit base, since overloaded base is always |
196 | - * mapped to the Null selector |
197 | + * If user code wrote a nonzero value to FS, then it also |
198 | + * cleared the overridden base address. |
199 | + * |
200 | + * XXX: if user code wrote 0 to FS and cleared the base |
201 | + * address itself, we won't notice and we'll incorrectly |
202 | + * restore the prior base address next time we reschdule |
203 | + * the process. |
204 | */ |
205 | if (fsindex) |
206 | prev->fs = 0; |
207 | } |
208 | - /* when next process has a 64bit base use it */ |
209 | if (next->fs) |
210 | wrmsrl(MSR_FS_BASE, next->fs); |
211 | prev->fsindex = fsindex; |
212 | |
213 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
214 | load_gs_index(next->gsindex); |
215 | + |
216 | + /* This works (and fails) the same way as fsindex above. */ |
217 | if (gsindex) |
218 | prev->gs = 0; |
219 | } |
220 | diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c |
221 | index f7fec09e3e3a..4e942f31b1a7 100644 |
222 | --- a/arch/x86/kernel/tls.c |
223 | +++ b/arch/x86/kernel/tls.c |
224 | @@ -27,6 +27,37 @@ static int get_free_idx(void) |
225 | return -ESRCH; |
226 | } |
227 | |
228 | +static bool tls_desc_okay(const struct user_desc *info) |
229 | +{ |
230 | + if (LDT_empty(info)) |
231 | + return true; |
232 | + |
233 | + /* |
234 | + * espfix is required for 16-bit data segments, but espfix |
235 | + * only works for LDT segments. |
236 | + */ |
237 | + if (!info->seg_32bit) |
238 | + return false; |
239 | + |
240 | + /* Only allow data segments in the TLS array. */ |
241 | + if (info->contents > 1) |
242 | + return false; |
243 | + |
244 | + /* |
245 | + * Non-present segments with DPL 3 present an interesting attack |
246 | + * surface. The kernel should handle such segments correctly, |
247 | + * but TLS is very difficult to protect in a sandbox, so prevent |
248 | + * such segments from being created. |
249 | + * |
250 | + * If userspace needs to remove a TLS entry, it can still delete |
251 | + * it outright. |
252 | + */ |
253 | + if (info->seg_not_present) |
254 | + return false; |
255 | + |
256 | + return true; |
257 | +} |
258 | + |
259 | static void set_tls_desc(struct task_struct *p, int idx, |
260 | const struct user_desc *info, int n) |
261 | { |
262 | @@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx, |
263 | if (copy_from_user(&info, u_info, sizeof(info))) |
264 | return -EFAULT; |
265 | |
266 | + if (!tls_desc_okay(&info)) |
267 | + return -EINVAL; |
268 | + |
269 | if (idx == -1) |
270 | idx = info.entry_number; |
271 | |
272 | @@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, |
273 | { |
274 | struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; |
275 | const struct user_desc *info; |
276 | + int i; |
277 | |
278 | if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || |
279 | (pos % sizeof(struct user_desc)) != 0 || |
280 | @@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, |
281 | else |
282 | info = infobuf; |
283 | |
284 | + for (i = 0; i < count / sizeof(struct user_desc); i++) |
285 | + if (!tls_desc_okay(info + i)) |
286 | + return -EINVAL; |
287 | + |
288 | set_tls_desc(target, |
289 | GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), |
290 | info, count / sizeof(struct user_desc)); |
291 | diff --git a/crypto/af_alg.c b/crypto/af_alg.c |
292 | index bf948e134981..6ef6e2ad344e 100644 |
293 | --- a/crypto/af_alg.c |
294 | +++ b/crypto/af_alg.c |
295 | @@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err) |
296 | { |
297 | struct af_alg_completion *completion = req->data; |
298 | |
299 | + if (err == -EINPROGRESS) |
300 | + return; |
301 | + |
302 | completion->err = err; |
303 | complete(&completion->completion); |
304 | } |
305 | diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c |
306 | index 5a2c75499824..a79cbd6038f6 100644 |
307 | --- a/drivers/md/bitmap.c |
308 | +++ b/drivers/md/bitmap.c |
309 | @@ -883,7 +883,6 @@ void bitmap_unplug(struct bitmap *bitmap) |
310 | { |
311 | unsigned long i; |
312 | int dirty, need_write; |
313 | - int wait = 0; |
314 | |
315 | if (!bitmap || !bitmap->storage.filemap || |
316 | test_bit(BITMAP_STALE, &bitmap->flags)) |
317 | @@ -901,16 +900,13 @@ void bitmap_unplug(struct bitmap *bitmap) |
318 | clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); |
319 | write_page(bitmap, bitmap->storage.filemap[i], 0); |
320 | } |
321 | - if (dirty) |
322 | - wait = 1; |
323 | - } |
324 | - if (wait) { /* if any writes were performed, we need to wait on them */ |
325 | - if (bitmap->storage.file) |
326 | - wait_event(bitmap->write_wait, |
327 | - atomic_read(&bitmap->pending_writes)==0); |
328 | - else |
329 | - md_super_wait(bitmap->mddev); |
330 | } |
331 | + if (bitmap->storage.file) |
332 | + wait_event(bitmap->write_wait, |
333 | + atomic_read(&bitmap->pending_writes)==0); |
334 | + else |
335 | + md_super_wait(bitmap->mddev); |
336 | + |
337 | if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) |
338 | bitmap_file_kick(bitmap); |
339 | } |
340 | diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c |
341 | index c9b4ca9e0696..e855a190270d 100644 |
342 | --- a/drivers/md/dm-bufio.c |
343 | +++ b/drivers/md/dm-bufio.c |
344 | @@ -529,6 +529,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, |
345 | end_io(&b->bio, r); |
346 | } |
347 | |
348 | +static void inline_endio(struct bio *bio, int error) |
349 | +{ |
350 | + bio_end_io_t *end_fn = bio->bi_private; |
351 | + |
352 | + /* |
353 | + * Reset the bio to free any attached resources |
354 | + * (e.g. bio integrity profiles). |
355 | + */ |
356 | + bio_reset(bio); |
357 | + |
358 | + end_fn(bio, error); |
359 | +} |
360 | + |
361 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, |
362 | bio_end_io_t *end_io) |
363 | { |
364 | @@ -540,7 +553,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, |
365 | b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; |
366 | b->bio.bi_sector = block << b->c->sectors_per_block_bits; |
367 | b->bio.bi_bdev = b->c->bdev; |
368 | - b->bio.bi_end_io = end_io; |
369 | + b->bio.bi_end_io = inline_endio; |
370 | + /* |
371 | + * Use of .bi_private isn't a problem here because |
372 | + * the dm_buffer's inline bio is local to bufio. |
373 | + */ |
374 | + b->bio.bi_private = end_io; |
375 | |
376 | /* |
377 | * We assume that if len >= PAGE_SIZE ptr is page-aligned. |
378 | diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c |
379 | index afb419e514bf..056d09c33af1 100644 |
380 | --- a/drivers/md/persistent-data/dm-space-map-metadata.c |
381 | +++ b/drivers/md/persistent-data/dm-space-map-metadata.c |
382 | @@ -493,7 +493,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count |
383 | { |
384 | struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); |
385 | |
386 | - return smm->ll.nr_blocks; |
387 | + *count = smm->ll.nr_blocks; |
388 | + |
389 | + return 0; |
390 | } |
391 | |
392 | static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count) |
393 | diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c |
394 | index 15e1463e5e13..17fe83e81ea4 100644 |
395 | --- a/drivers/mfd/tc6393xb.c |
396 | +++ b/drivers/mfd/tc6393xb.c |
397 | @@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev) |
398 | return 0; |
399 | } |
400 | |
401 | +static int tc6393xb_ohci_suspend(struct platform_device *dev) |
402 | +{ |
403 | + struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent); |
404 | + |
405 | + /* We can't properly store/restore OHCI state, so fail here */ |
406 | + if (tcpd->resume_restore) |
407 | + return -EBUSY; |
408 | + |
409 | + return tc6393xb_ohci_disable(dev); |
410 | +} |
411 | + |
412 | static int tc6393xb_fb_enable(struct platform_device *dev) |
413 | { |
414 | struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent); |
415 | @@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = { |
416 | .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources), |
417 | .resources = tc6393xb_ohci_resources, |
418 | .enable = tc6393xb_ohci_enable, |
419 | - .suspend = tc6393xb_ohci_disable, |
420 | + .suspend = tc6393xb_ohci_suspend, |
421 | .resume = tc6393xb_ohci_enable, |
422 | .disable = tc6393xb_ohci_disable, |
423 | }, |
424 | diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c |
425 | index 9aca9462a12f..7ad66823d022 100644 |
426 | --- a/drivers/mmc/card/block.c |
427 | +++ b/drivers/mmc/card/block.c |
428 | @@ -257,7 +257,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
429 | int ret; |
430 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
431 | |
432 | - ret = snprintf(buf, PAGE_SIZE, "%d", |
433 | + ret = snprintf(buf, PAGE_SIZE, "%d\n", |
434 | get_disk_ro(dev_to_disk(dev)) ^ |
435 | md->read_only); |
436 | mmc_blk_put(md); |
437 | diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c |
438 | index 4956c99ed90e..78b4fe845245 100644 |
439 | --- a/drivers/scsi/megaraid/megaraid_sas_base.c |
440 | +++ b/drivers/scsi/megaraid/megaraid_sas_base.c |
441 | @@ -933,7 +933,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, |
442 | abort_fr->abort_mfi_phys_addr_hi = 0; |
443 | |
444 | cmd->sync_cmd = 1; |
445 | - cmd->cmd_status = 0xFF; |
446 | + cmd->cmd_status = ENODATA; |
447 | |
448 | instance->instancet->issue_dcmd(instance, cmd); |
449 | |
450 | diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
451 | index abecce399354..7360f03ddbe1 100644 |
452 | --- a/fs/btrfs/disk-io.c |
453 | +++ b/fs/btrfs/disk-io.c |
454 | @@ -3857,12 +3857,6 @@ again: |
455 | if (ret) |
456 | break; |
457 | |
458 | - /* opt_discard */ |
459 | - if (btrfs_test_opt(root, DISCARD)) |
460 | - ret = btrfs_error_discard_extent(root, start, |
461 | - end + 1 - start, |
462 | - NULL); |
463 | - |
464 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
465 | btrfs_error_unpin_extent_range(root, start, end); |
466 | cond_resched(); |
467 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
468 | index bbafa05519da..f99c71e40f8b 100644 |
469 | --- a/fs/btrfs/extent-tree.c |
470 | +++ b/fs/btrfs/extent-tree.c |
471 | @@ -5277,7 +5277,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, |
472 | update_global_block_rsv(fs_info); |
473 | } |
474 | |
475 | -static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) |
476 | +static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end, |
477 | + const bool return_free_space) |
478 | { |
479 | struct btrfs_fs_info *fs_info = root->fs_info; |
480 | struct btrfs_block_group_cache *cache = NULL; |
481 | @@ -5301,7 +5302,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) |
482 | |
483 | if (start < cache->last_byte_to_unpin) { |
484 | len = min(len, cache->last_byte_to_unpin - start); |
485 | - btrfs_add_free_space(cache, start, len); |
486 | + if (return_free_space) |
487 | + btrfs_add_free_space(cache, start, len); |
488 | } |
489 | |
490 | start += len; |
491 | @@ -5364,7 +5366,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, |
492 | end + 1 - start, NULL); |
493 | |
494 | clear_extent_dirty(unpin, start, end, GFP_NOFS); |
495 | - unpin_extent_range(root, start, end); |
496 | + unpin_extent_range(root, start, end, true); |
497 | cond_resched(); |
498 | } |
499 | |
500 | @@ -8564,7 +8566,7 @@ out: |
501 | |
502 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) |
503 | { |
504 | - return unpin_extent_range(root, start, end); |
505 | + return unpin_extent_range(root, start, end, false); |
506 | } |
507 | |
508 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
509 | diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c |
510 | index a4a7a1a8da95..0a3809500599 100644 |
511 | --- a/fs/btrfs/extent_map.c |
512 | +++ b/fs/btrfs/extent_map.c |
513 | @@ -263,8 +263,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, |
514 | if (!em) |
515 | goto out; |
516 | |
517 | - if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) |
518 | - list_move(&em->list, &tree->modified_extents); |
519 | em->generation = gen; |
520 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
521 | em->mod_start = em->start; |
522 | diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c |
523 | index f71ec125290d..1da2446bf6b0 100644 |
524 | --- a/fs/ecryptfs/crypto.c |
525 | +++ b/fs/ecryptfs/crypto.c |
526 | @@ -2102,7 +2102,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, |
527 | break; |
528 | case 2: |
529 | dst[dst_byte_offset++] |= (src_byte); |
530 | - dst[dst_byte_offset] = 0; |
531 | current_bit_offset = 0; |
532 | break; |
533 | } |
534 | diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c |
535 | index a7abbea2c096..9ff3664bb3ea 100644 |
536 | --- a/fs/ecryptfs/file.c |
537 | +++ b/fs/ecryptfs/file.c |
538 | @@ -196,23 +196,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file) |
539 | { |
540 | int rc = 0; |
541 | struct ecryptfs_crypt_stat *crypt_stat = NULL; |
542 | - struct ecryptfs_mount_crypt_stat *mount_crypt_stat; |
543 | struct dentry *ecryptfs_dentry = file->f_path.dentry; |
544 | /* Private value of ecryptfs_dentry allocated in |
545 | * ecryptfs_lookup() */ |
546 | struct ecryptfs_file_info *file_info; |
547 | |
548 | - mount_crypt_stat = &ecryptfs_superblock_to_private( |
549 | - ecryptfs_dentry->d_sb)->mount_crypt_stat; |
550 | - if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) |
551 | - && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR) |
552 | - || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC) |
553 | - || (file->f_flags & O_APPEND))) { |
554 | - printk(KERN_WARNING "Mount has encrypted view enabled; " |
555 | - "files may only be read\n"); |
556 | - rc = -EPERM; |
557 | - goto out; |
558 | - } |
559 | /* Released in ecryptfs_release or end of function if failure */ |
560 | file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); |
561 | ecryptfs_set_file_private(file, file_info); |
562 | diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c |
563 | index e924cf45aad9..329a9cc2b2eb 100644 |
564 | --- a/fs/ecryptfs/main.c |
565 | +++ b/fs/ecryptfs/main.c |
566 | @@ -494,6 +494,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags |
567 | { |
568 | struct super_block *s; |
569 | struct ecryptfs_sb_info *sbi; |
570 | + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; |
571 | struct ecryptfs_dentry_info *root_info; |
572 | const char *err = "Getting sb failed"; |
573 | struct inode *inode; |
574 | @@ -512,6 +513,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags |
575 | err = "Error parsing options"; |
576 | goto out; |
577 | } |
578 | + mount_crypt_stat = &sbi->mount_crypt_stat; |
579 | |
580 | s = sget(fs_type, NULL, set_anon_super, flags, NULL); |
581 | if (IS_ERR(s)) { |
582 | @@ -558,11 +560,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags |
583 | |
584 | /** |
585 | * Set the POSIX ACL flag based on whether they're enabled in the lower |
586 | - * mount. Force a read-only eCryptfs mount if the lower mount is ro. |
587 | - * Allow a ro eCryptfs mount even when the lower mount is rw. |
588 | + * mount. |
589 | */ |
590 | s->s_flags = flags & ~MS_POSIXACL; |
591 | - s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); |
592 | + s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL; |
593 | + |
594 | + /** |
595 | + * Force a read-only eCryptfs mount when: |
596 | + * 1) The lower mount is ro |
597 | + * 2) The ecryptfs_encrypted_view mount option is specified |
598 | + */ |
599 | + if (path.dentry->d_sb->s_flags & MS_RDONLY || |
600 | + mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) |
601 | + s->s_flags |= MS_RDONLY; |
602 | |
603 | s->s_maxbytes = path.dentry->d_sb->s_maxbytes; |
604 | s->s_blocksize = path.dentry->d_sb->s_blocksize; |
605 | diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c |
606 | index f488bbae541a..735d7522a3a9 100644 |
607 | --- a/fs/isofs/rock.c |
608 | +++ b/fs/isofs/rock.c |
609 | @@ -30,6 +30,7 @@ struct rock_state { |
610 | int cont_size; |
611 | int cont_extent; |
612 | int cont_offset; |
613 | + int cont_loops; |
614 | struct inode *inode; |
615 | }; |
616 | |
617 | @@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode) |
618 | rs->inode = inode; |
619 | } |
620 | |
621 | +/* Maximum number of Rock Ridge continuation entries */ |
622 | +#define RR_MAX_CE_ENTRIES 32 |
623 | + |
624 | /* |
625 | * Returns 0 if the caller should continue scanning, 1 if the scan must end |
626 | * and -ve on error. |
627 | @@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs) |
628 | goto out; |
629 | } |
630 | ret = -EIO; |
631 | + if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) |
632 | + goto out; |
633 | bh = sb_bread(rs->inode->i_sb, rs->cont_extent); |
634 | if (bh) { |
635 | memcpy(rs->buffer, bh->b_data + rs->cont_offset, |
636 | @@ -356,6 +362,9 @@ repeat: |
637 | rs.cont_size = isonum_733(rr->u.CE.size); |
638 | break; |
639 | case SIG('E', 'R'): |
640 | + /* Invalid length of ER tag id? */ |
641 | + if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len) |
642 | + goto out; |
643 | ISOFS_SB(inode->i_sb)->s_rock = 1; |
644 | printk(KERN_DEBUG "ISO 9660 Extensions: "); |
645 | { |
646 | diff --git a/fs/namespace.c b/fs/namespace.c |
647 | index 154822397780..d0244c8ba09c 100644 |
648 | --- a/fs/namespace.c |
649 | +++ b/fs/namespace.c |
650 | @@ -1342,6 +1342,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) |
651 | goto dput_and_out; |
652 | if (!check_mnt(mnt)) |
653 | goto dput_and_out; |
654 | + retval = -EPERM; |
655 | + if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) |
656 | + goto dput_and_out; |
657 | |
658 | retval = do_umount(mnt, flags); |
659 | dput_and_out: |
660 | @@ -1816,7 +1819,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags, |
661 | } |
662 | if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && |
663 | !(mnt_flags & MNT_NODEV)) { |
664 | - return -EPERM; |
665 | + /* Was the nodev implicitly added in mount? */ |
666 | + if ((mnt->mnt_ns->user_ns != &init_user_ns) && |
667 | + !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) { |
668 | + mnt_flags |= MNT_NODEV; |
669 | + } else { |
670 | + return -EPERM; |
671 | + } |
672 | } |
673 | if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && |
674 | !(mnt_flags & MNT_NOSUID)) { |
675 | diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c |
676 | index 60426ccb3b65..2f970de02b16 100644 |
677 | --- a/fs/ncpfs/ioctl.c |
678 | +++ b/fs/ncpfs/ioctl.c |
679 | @@ -448,7 +448,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg |
680 | result = -EIO; |
681 | } |
682 | } |
683 | - result = 0; |
684 | } |
685 | mutex_unlock(&server->root_setup_lock); |
686 | |
687 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
688 | index 78787948f69d..20ebcfa3c92e 100644 |
689 | --- a/fs/nfs/nfs4proc.c |
690 | +++ b/fs/nfs/nfs4proc.c |
691 | @@ -6418,6 +6418,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) |
692 | |
693 | dprintk("--> %s\n", __func__); |
694 | |
695 | + /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ |
696 | + pnfs_get_layout_hdr(NFS_I(inode)->layout); |
697 | + |
698 | lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); |
699 | if (!lgp->args.layout.pages) { |
700 | nfs4_layoutget_release(lgp); |
701 | @@ -6430,9 +6433,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) |
702 | lgp->res.seq_res.sr_slot = NULL; |
703 | nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); |
704 | |
705 | - /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ |
706 | - pnfs_get_layout_hdr(NFS_I(inode)->layout); |
707 | - |
708 | task = rpc_run_task(&task_setup_data); |
709 | if (IS_ERR(task)) |
710 | return ERR_CAST(task); |
711 | diff --git a/fs/proc/base.c b/fs/proc/base.c |
712 | index de12b8128b95..8fc784aef0b8 100644 |
713 | --- a/fs/proc/base.c |
714 | +++ b/fs/proc/base.c |
715 | @@ -2612,6 +2612,57 @@ static const struct file_operations proc_projid_map_operations = { |
716 | .llseek = seq_lseek, |
717 | .release = proc_id_map_release, |
718 | }; |
719 | + |
720 | +static int proc_setgroups_open(struct inode *inode, struct file *file) |
721 | +{ |
722 | + struct user_namespace *ns = NULL; |
723 | + struct task_struct *task; |
724 | + int ret; |
725 | + |
726 | + ret = -ESRCH; |
727 | + task = get_proc_task(inode); |
728 | + if (task) { |
729 | + rcu_read_lock(); |
730 | + ns = get_user_ns(task_cred_xxx(task, user_ns)); |
731 | + rcu_read_unlock(); |
732 | + put_task_struct(task); |
733 | + } |
734 | + if (!ns) |
735 | + goto err; |
736 | + |
737 | + if (file->f_mode & FMODE_WRITE) { |
738 | + ret = -EACCES; |
739 | + if (!ns_capable(ns, CAP_SYS_ADMIN)) |
740 | + goto err_put_ns; |
741 | + } |
742 | + |
743 | + ret = single_open(file, &proc_setgroups_show, ns); |
744 | + if (ret) |
745 | + goto err_put_ns; |
746 | + |
747 | + return 0; |
748 | +err_put_ns: |
749 | + put_user_ns(ns); |
750 | +err: |
751 | + return ret; |
752 | +} |
753 | + |
754 | +static int proc_setgroups_release(struct inode *inode, struct file *file) |
755 | +{ |
756 | + struct seq_file *seq = file->private_data; |
757 | + struct user_namespace *ns = seq->private; |
758 | + int ret = single_release(inode, file); |
759 | + put_user_ns(ns); |
760 | + return ret; |
761 | +} |
762 | + |
763 | +static const struct file_operations proc_setgroups_operations = { |
764 | + .open = proc_setgroups_open, |
765 | + .write = proc_setgroups_write, |
766 | + .read = seq_read, |
767 | + .llseek = seq_lseek, |
768 | + .release = proc_setgroups_release, |
769 | +}; |
770 | #endif /* CONFIG_USER_NS */ |
771 | |
772 | static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, |
773 | @@ -2720,6 +2771,7 @@ static const struct pid_entry tgid_base_stuff[] = { |
774 | REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), |
775 | REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), |
776 | REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), |
777 | + REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), |
778 | #endif |
779 | #ifdef CONFIG_CHECKPOINT_RESTORE |
780 | REG("timers", S_IRUGO, proc_timers_operations), |
781 | @@ -3073,6 +3125,7 @@ static const struct pid_entry tid_base_stuff[] = { |
782 | REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), |
783 | REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), |
784 | REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), |
785 | + REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), |
786 | #endif |
787 | }; |
788 | |
789 | diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c |
790 | index d7c6dbe4194b..d89f324bc387 100644 |
791 | --- a/fs/udf/symlink.c |
792 | +++ b/fs/udf/symlink.c |
793 | @@ -80,11 +80,17 @@ static int udf_symlink_filler(struct file *file, struct page *page) |
794 | struct inode *inode = page->mapping->host; |
795 | struct buffer_head *bh = NULL; |
796 | unsigned char *symlink; |
797 | - int err = -EIO; |
798 | + int err; |
799 | unsigned char *p = kmap(page); |
800 | struct udf_inode_info *iinfo; |
801 | uint32_t pos; |
802 | |
803 | + /* We don't support symlinks longer than one block */ |
804 | + if (inode->i_size > inode->i_sb->s_blocksize) { |
805 | + err = -ENAMETOOLONG; |
806 | + goto out_unmap; |
807 | + } |
808 | + |
809 | iinfo = UDF_I(inode); |
810 | pos = udf_block_map(inode, 0); |
811 | |
812 | @@ -94,8 +100,10 @@ static int udf_symlink_filler(struct file *file, struct page *page) |
813 | } else { |
814 | bh = sb_bread(inode->i_sb, pos); |
815 | |
816 | - if (!bh) |
817 | - goto out; |
818 | + if (!bh) { |
819 | + err = -EIO; |
820 | + goto out_unlock_inode; |
821 | + } |
822 | |
823 | symlink = bh->b_data; |
824 | } |
825 | @@ -109,9 +117,10 @@ static int udf_symlink_filler(struct file *file, struct page *page) |
826 | unlock_page(page); |
827 | return 0; |
828 | |
829 | -out: |
830 | +out_unlock_inode: |
831 | up_read(&iinfo->i_data_sem); |
832 | SetPageError(page); |
833 | +out_unmap: |
834 | kunmap(page); |
835 | unlock_page(page); |
836 | return err; |
837 | diff --git a/include/linux/cred.h b/include/linux/cred.h |
838 | index 04421e825365..6c58dd7cb9ac 100644 |
839 | --- a/include/linux/cred.h |
840 | +++ b/include/linux/cred.h |
841 | @@ -68,6 +68,7 @@ extern void groups_free(struct group_info *); |
842 | extern int set_current_groups(struct group_info *); |
843 | extern int set_groups(struct cred *, struct group_info *); |
844 | extern int groups_search(const struct group_info *, kgid_t); |
845 | +extern bool may_setgroups(void); |
846 | |
847 | /* access the groups "array" with this macro */ |
848 | #define GROUP_AT(gi, i) \ |
849 | diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h |
850 | index 14105c26a836..a37081cf59da 100644 |
851 | --- a/include/linux/user_namespace.h |
852 | +++ b/include/linux/user_namespace.h |
853 | @@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */ |
854 | } extent[UID_GID_MAP_MAX_EXTENTS]; |
855 | }; |
856 | |
857 | +#define USERNS_SETGROUPS_ALLOWED 1UL |
858 | + |
859 | +#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED |
860 | + |
861 | struct user_namespace { |
862 | struct uid_gid_map uid_map; |
863 | struct uid_gid_map gid_map; |
864 | @@ -27,6 +31,7 @@ struct user_namespace { |
865 | kuid_t owner; |
866 | kgid_t group; |
867 | unsigned int proc_inum; |
868 | + unsigned long flags; |
869 | bool may_mount_sysfs; |
870 | bool may_mount_proc; |
871 | }; |
872 | @@ -59,6 +64,9 @@ extern struct seq_operations proc_projid_seq_operations; |
873 | extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); |
874 | extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); |
875 | extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); |
876 | +extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); |
877 | +extern int proc_setgroups_show(struct seq_file *m, void *v); |
878 | +extern bool userns_may_setgroups(const struct user_namespace *ns); |
879 | #else |
880 | |
881 | static inline struct user_namespace *get_user_ns(struct user_namespace *ns) |
882 | @@ -83,6 +91,10 @@ static inline void put_user_ns(struct user_namespace *ns) |
883 | { |
884 | } |
885 | |
886 | +static inline bool userns_may_setgroups(const struct user_namespace *ns) |
887 | +{ |
888 | + return true; |
889 | +} |
890 | #endif |
891 | |
892 | void update_mnt_policy(struct user_namespace *userns); |
893 | diff --git a/kernel/groups.c b/kernel/groups.c |
894 | index 6b2588dd04ff..67b4ba30475f 100644 |
895 | --- a/kernel/groups.c |
896 | +++ b/kernel/groups.c |
897 | @@ -6,6 +6,7 @@ |
898 | #include <linux/slab.h> |
899 | #include <linux/security.h> |
900 | #include <linux/syscalls.h> |
901 | +#include <linux/user_namespace.h> |
902 | #include <asm/uaccess.h> |
903 | |
904 | /* init to 2 - one for init_task, one to ensure it is never freed */ |
905 | @@ -223,6 +224,14 @@ out: |
906 | return i; |
907 | } |
908 | |
909 | +bool may_setgroups(void) |
910 | +{ |
911 | + struct user_namespace *user_ns = current_user_ns(); |
912 | + |
913 | + return ns_capable(user_ns, CAP_SETGID) && |
914 | + userns_may_setgroups(user_ns); |
915 | +} |
916 | + |
917 | /* |
918 | * SMP: Our groups are copy-on-write. We can set them safely |
919 | * without another task interfering. |
920 | @@ -233,7 +242,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) |
921 | struct group_info *group_info; |
922 | int retval; |
923 | |
924 | - if (!nsown_capable(CAP_SETGID)) |
925 | + if (!may_setgroups()) |
926 | return -EPERM; |
927 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
928 | return -EINVAL; |
929 | diff --git a/kernel/pid.c b/kernel/pid.c |
930 | index 0eb6d8e8b1da..3cdba5173600 100644 |
931 | --- a/kernel/pid.c |
932 | +++ b/kernel/pid.c |
933 | @@ -335,6 +335,8 @@ out: |
934 | |
935 | out_unlock: |
936 | spin_unlock_irq(&pidmap_lock); |
937 | + put_pid_ns(ns); |
938 | + |
939 | out_free: |
940 | while (++i <= ns->level) |
941 | free_pidmap(pid->numbers + i); |
942 | diff --git a/kernel/uid16.c b/kernel/uid16.c |
943 | index f6c83d7ef000..d58cc4d8f0d1 100644 |
944 | --- a/kernel/uid16.c |
945 | +++ b/kernel/uid16.c |
946 | @@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) |
947 | struct group_info *group_info; |
948 | int retval; |
949 | |
950 | - if (!nsown_capable(CAP_SETGID)) |
951 | + if (!may_setgroups()) |
952 | return -EPERM; |
953 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
954 | return -EINVAL; |
955 | diff --git a/kernel/user.c b/kernel/user.c |
956 | index 69b4c3d48cde..6bbef5604101 100644 |
957 | --- a/kernel/user.c |
958 | +++ b/kernel/user.c |
959 | @@ -51,6 +51,7 @@ struct user_namespace init_user_ns = { |
960 | .owner = GLOBAL_ROOT_UID, |
961 | .group = GLOBAL_ROOT_GID, |
962 | .proc_inum = PROC_USER_INIT_INO, |
963 | + .flags = USERNS_INIT_FLAGS, |
964 | .may_mount_sysfs = true, |
965 | .may_mount_proc = true, |
966 | }; |
967 | diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c |
968 | index 9bea1d7dd21f..3f2fb33d291a 100644 |
969 | --- a/kernel/user_namespace.c |
970 | +++ b/kernel/user_namespace.c |
971 | @@ -24,6 +24,7 @@ |
972 | #include <linux/fs_struct.h> |
973 | |
974 | static struct kmem_cache *user_ns_cachep __read_mostly; |
975 | +static DEFINE_MUTEX(userns_state_mutex); |
976 | |
977 | static bool new_idmap_permitted(const struct file *file, |
978 | struct user_namespace *ns, int cap_setid, |
979 | @@ -99,6 +100,11 @@ int create_user_ns(struct cred *new) |
980 | ns->owner = owner; |
981 | ns->group = group; |
982 | |
983 | + /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */ |
984 | + mutex_lock(&userns_state_mutex); |
985 | + ns->flags = parent_ns->flags; |
986 | + mutex_unlock(&userns_state_mutex); |
987 | + |
988 | set_cred_user_ns(new, ns); |
989 | |
990 | update_mnt_policy(ns); |
991 | @@ -577,9 +583,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent |
992 | return false; |
993 | } |
994 | |
995 | - |
996 | -static DEFINE_MUTEX(id_map_mutex); |
997 | - |
998 | static ssize_t map_write(struct file *file, const char __user *buf, |
999 | size_t count, loff_t *ppos, |
1000 | int cap_setid, |
1001 | @@ -596,7 +599,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
1002 | ssize_t ret = -EINVAL; |
1003 | |
1004 | /* |
1005 | - * The id_map_mutex serializes all writes to any given map. |
1006 | + * The userns_state_mutex serializes all writes to any given map. |
1007 | * |
1008 | * Any map is only ever written once. |
1009 | * |
1010 | @@ -614,7 +617,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
1011 | * order and smp_rmb() is guaranteed that we don't have crazy |
1012 | * architectures returning stale data. |
1013 | */ |
1014 | - mutex_lock(&id_map_mutex); |
1015 | + mutex_lock(&userns_state_mutex); |
1016 | |
1017 | ret = -EPERM; |
1018 | /* Only allow one successful write to the map */ |
1019 | @@ -741,7 +744,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
1020 | *ppos = count; |
1021 | ret = count; |
1022 | out: |
1023 | - mutex_unlock(&id_map_mutex); |
1024 | + mutex_unlock(&userns_state_mutex); |
1025 | if (page) |
1026 | free_page(page); |
1027 | return ret; |
1028 | @@ -800,17 +803,21 @@ static bool new_idmap_permitted(const struct file *file, |
1029 | struct user_namespace *ns, int cap_setid, |
1030 | struct uid_gid_map *new_map) |
1031 | { |
1032 | - /* Allow mapping to your own filesystem ids */ |
1033 | - if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) { |
1034 | + const struct cred *cred = file->f_cred; |
1035 | + /* Don't allow mappings that would allow anything that wouldn't |
1036 | + * be allowed without the establishment of unprivileged mappings. |
1037 | + */ |
1038 | + if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) && |
1039 | + uid_eq(ns->owner, cred->euid)) { |
1040 | u32 id = new_map->extent[0].lower_first; |
1041 | if (cap_setid == CAP_SETUID) { |
1042 | kuid_t uid = make_kuid(ns->parent, id); |
1043 | - if (uid_eq(uid, file->f_cred->fsuid)) |
1044 | + if (uid_eq(uid, cred->euid)) |
1045 | return true; |
1046 | - } |
1047 | - else if (cap_setid == CAP_SETGID) { |
1048 | + } else if (cap_setid == CAP_SETGID) { |
1049 | kgid_t gid = make_kgid(ns->parent, id); |
1050 | - if (gid_eq(gid, file->f_cred->fsgid)) |
1051 | + if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) && |
1052 | + gid_eq(gid, cred->egid)) |
1053 | return true; |
1054 | } |
1055 | } |
1056 | @@ -830,6 +837,100 @@ static bool new_idmap_permitted(const struct file *file, |
1057 | return false; |
1058 | } |
1059 | |
1060 | +int proc_setgroups_show(struct seq_file *seq, void *v) |
1061 | +{ |
1062 | + struct user_namespace *ns = seq->private; |
1063 | + unsigned long userns_flags = ACCESS_ONCE(ns->flags); |
1064 | + |
1065 | + seq_printf(seq, "%s\n", |
1066 | + (userns_flags & USERNS_SETGROUPS_ALLOWED) ? |
1067 | + "allow" : "deny"); |
1068 | + return 0; |
1069 | +} |
1070 | + |
1071 | +ssize_t proc_setgroups_write(struct file *file, const char __user *buf, |
1072 | + size_t count, loff_t *ppos) |
1073 | +{ |
1074 | + struct seq_file *seq = file->private_data; |
1075 | + struct user_namespace *ns = seq->private; |
1076 | + char kbuf[8], *pos; |
1077 | + bool setgroups_allowed; |
1078 | + ssize_t ret; |
1079 | + |
1080 | + /* Only allow a very narrow range of strings to be written */ |
1081 | + ret = -EINVAL; |
1082 | + if ((*ppos != 0) || (count >= sizeof(kbuf))) |
1083 | + goto out; |
1084 | + |
1085 | + /* What was written? */ |
1086 | + ret = -EFAULT; |
1087 | + if (copy_from_user(kbuf, buf, count)) |
1088 | + goto out; |
1089 | + kbuf[count] = '\0'; |
1090 | + pos = kbuf; |
1091 | + |
1092 | + /* What is being requested? */ |
1093 | + ret = -EINVAL; |
1094 | + if (strncmp(pos, "allow", 5) == 0) { |
1095 | + pos += 5; |
1096 | + setgroups_allowed = true; |
1097 | + } |
1098 | + else if (strncmp(pos, "deny", 4) == 0) { |
1099 | + pos += 4; |
1100 | + setgroups_allowed = false; |
1101 | + } |
1102 | + else |
1103 | + goto out; |
1104 | + |
1105 | + /* Verify there is not trailing junk on the line */ |
1106 | + pos = skip_spaces(pos); |
1107 | + if (*pos != '\0') |
1108 | + goto out; |
1109 | + |
1110 | + ret = -EPERM; |
1111 | + mutex_lock(&userns_state_mutex); |
1112 | + if (setgroups_allowed) { |
1113 | + /* Enabling setgroups after setgroups has been disabled |
1114 | + * is not allowed. |
1115 | + */ |
1116 | + if (!(ns->flags & USERNS_SETGROUPS_ALLOWED)) |
1117 | + goto out_unlock; |
1118 | + } else { |
1119 | + /* Permanently disabling setgroups after setgroups has |
1120 | + * been enabled by writing the gid_map is not allowed. |
1121 | + */ |
1122 | + if (ns->gid_map.nr_extents != 0) |
1123 | + goto out_unlock; |
1124 | + ns->flags &= ~USERNS_SETGROUPS_ALLOWED; |
1125 | + } |
1126 | + mutex_unlock(&userns_state_mutex); |
1127 | + |
1128 | + /* Report a successful write */ |
1129 | + *ppos = count; |
1130 | + ret = count; |
1131 | +out: |
1132 | + return ret; |
1133 | +out_unlock: |
1134 | + mutex_unlock(&userns_state_mutex); |
1135 | + goto out; |
1136 | +} |
1137 | + |
1138 | +bool userns_may_setgroups(const struct user_namespace *ns) |
1139 | +{ |
1140 | + bool allowed; |
1141 | + |
1142 | + mutex_lock(&userns_state_mutex); |
1143 | + /* It is not safe to use setgroups until a gid mapping in |
1144 | + * the user namespace has been established. |
1145 | + */ |
1146 | + allowed = ns->gid_map.nr_extents != 0; |
1147 | + /* Is setgroups allowed? */ |
1148 | + allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED); |
1149 | + mutex_unlock(&userns_state_mutex); |
1150 | + |
1151 | + return allowed; |
1152 | +} |
1153 | + |
1154 | static void *userns_get(struct task_struct *task) |
1155 | { |
1156 | struct user_namespace *user_ns; |
1157 | diff --git a/net/mac80211/key.c b/net/mac80211/key.c |
1158 | index 67059b88fea5..635d0972b688 100644 |
1159 | --- a/net/mac80211/key.c |
1160 | +++ b/net/mac80211/key.c |
1161 | @@ -607,7 +607,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, |
1162 | int i; |
1163 | |
1164 | mutex_lock(&local->key_mtx); |
1165 | - for (i = 0; i < NUM_DEFAULT_KEYS; i++) { |
1166 | + for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) { |
1167 | key = key_mtx_dereference(local, sta->gtk[i]); |
1168 | if (!key) |
1169 | continue; |
1170 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
1171 | index 85bc6d498b46..9299a38c372e 100644 |
1172 | --- a/net/mac80211/rx.c |
1173 | +++ b/net/mac80211/rx.c |
1174 | @@ -1585,14 +1585,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
1175 | sc = le16_to_cpu(hdr->seq_ctrl); |
1176 | frag = sc & IEEE80211_SCTL_FRAG; |
1177 | |
1178 | - if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) |
1179 | - goto out; |
1180 | - |
1181 | if (is_multicast_ether_addr(hdr->addr1)) { |
1182 | rx->local->dot11MulticastReceivedFrameCount++; |
1183 | - goto out; |
1184 | + goto out_no_led; |
1185 | } |
1186 | |
1187 | + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) |
1188 | + goto out; |
1189 | + |
1190 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); |
1191 | |
1192 | if (skb_linearize(rx->skb)) |
1193 | @@ -1683,9 +1683,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) |
1194 | status->rx_flags |= IEEE80211_RX_FRAGMENTED; |
1195 | |
1196 | out: |
1197 | + ieee80211_led_rx(rx->local); |
1198 | + out_no_led: |
1199 | if (rx->sta) |
1200 | rx->sta->rx_packets++; |
1201 | - ieee80211_led_rx(rx->local); |
1202 | return RX_CONTINUE; |
1203 | } |
1204 | |
1205 | diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c |
1206 | index 9e1e005c7596..c4c8df4b214d 100644 |
1207 | --- a/security/keys/encrypted-keys/encrypted.c |
1208 | +++ b/security/keys/encrypted-keys/encrypted.c |
1209 | @@ -1018,10 +1018,13 @@ static int __init init_encrypted(void) |
1210 | ret = encrypted_shash_alloc(); |
1211 | if (ret < 0) |
1212 | return ret; |
1213 | + ret = aes_get_sizes(); |
1214 | + if (ret < 0) |
1215 | + goto out; |
1216 | ret = register_key_type(&key_type_encrypted); |
1217 | if (ret < 0) |
1218 | goto out; |
1219 | - return aes_get_sizes(); |
1220 | + return 0; |
1221 | out: |
1222 | encrypted_shash_release(); |
1223 | return ret; |
1224 | diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c |
1225 | index 1b3ff2fda4d0..517785052f1c 100644 |
1226 | --- a/tools/testing/selftests/mount/unprivileged-remount-test.c |
1227 | +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c |
1228 | @@ -6,6 +6,8 @@ |
1229 | #include <sys/types.h> |
1230 | #include <sys/mount.h> |
1231 | #include <sys/wait.h> |
1232 | +#include <sys/vfs.h> |
1233 | +#include <sys/statvfs.h> |
1234 | #include <stdlib.h> |
1235 | #include <unistd.h> |
1236 | #include <fcntl.h> |
1237 | @@ -32,11 +34,14 @@ |
1238 | # define CLONE_NEWPID 0x20000000 |
1239 | #endif |
1240 | |
1241 | +#ifndef MS_REC |
1242 | +# define MS_REC 16384 |
1243 | +#endif |
1244 | #ifndef MS_RELATIME |
1245 | -#define MS_RELATIME (1 << 21) |
1246 | +# define MS_RELATIME (1 << 21) |
1247 | #endif |
1248 | #ifndef MS_STRICTATIME |
1249 | -#define MS_STRICTATIME (1 << 24) |
1250 | +# define MS_STRICTATIME (1 << 24) |
1251 | #endif |
1252 | |
1253 | static void die(char *fmt, ...) |
1254 | @@ -48,17 +53,14 @@ static void die(char *fmt, ...) |
1255 | exit(EXIT_FAILURE); |
1256 | } |
1257 | |
1258 | -static void write_file(char *filename, char *fmt, ...) |
1259 | +static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap) |
1260 | { |
1261 | char buf[4096]; |
1262 | int fd; |
1263 | ssize_t written; |
1264 | int buf_len; |
1265 | - va_list ap; |
1266 | |
1267 | - va_start(ap, fmt); |
1268 | buf_len = vsnprintf(buf, sizeof(buf), fmt, ap); |
1269 | - va_end(ap); |
1270 | if (buf_len < 0) { |
1271 | die("vsnprintf failed: %s\n", |
1272 | strerror(errno)); |
1273 | @@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...) |
1274 | |
1275 | fd = open(filename, O_WRONLY); |
1276 | if (fd < 0) { |
1277 | + if ((errno == ENOENT) && enoent_ok) |
1278 | + return; |
1279 | die("open of %s failed: %s\n", |
1280 | filename, strerror(errno)); |
1281 | } |
1282 | @@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...) |
1283 | } |
1284 | } |
1285 | |
1286 | +static void maybe_write_file(char *filename, char *fmt, ...) |
1287 | +{ |
1288 | + va_list ap; |
1289 | + |
1290 | + va_start(ap, fmt); |
1291 | + vmaybe_write_file(true, filename, fmt, ap); |
1292 | + va_end(ap); |
1293 | + |
1294 | +} |
1295 | + |
1296 | +static void write_file(char *filename, char *fmt, ...) |
1297 | +{ |
1298 | + va_list ap; |
1299 | + |
1300 | + va_start(ap, fmt); |
1301 | + vmaybe_write_file(false, filename, fmt, ap); |
1302 | + va_end(ap); |
1303 | + |
1304 | +} |
1305 | + |
1306 | +static int read_mnt_flags(const char *path) |
1307 | +{ |
1308 | + int ret; |
1309 | + struct statvfs stat; |
1310 | + int mnt_flags; |
1311 | + |
1312 | + ret = statvfs(path, &stat); |
1313 | + if (ret != 0) { |
1314 | + die("statvfs of %s failed: %s\n", |
1315 | + path, strerror(errno)); |
1316 | + } |
1317 | + if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \ |
1318 | + ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \ |
1319 | + ST_SYNCHRONOUS | ST_MANDLOCK)) { |
1320 | + die("Unrecognized mount flags\n"); |
1321 | + } |
1322 | + mnt_flags = 0; |
1323 | + if (stat.f_flag & ST_RDONLY) |
1324 | + mnt_flags |= MS_RDONLY; |
1325 | + if (stat.f_flag & ST_NOSUID) |
1326 | + mnt_flags |= MS_NOSUID; |
1327 | + if (stat.f_flag & ST_NODEV) |
1328 | + mnt_flags |= MS_NODEV; |
1329 | + if (stat.f_flag & ST_NOEXEC) |
1330 | + mnt_flags |= MS_NOEXEC; |
1331 | + if (stat.f_flag & ST_NOATIME) |
1332 | + mnt_flags |= MS_NOATIME; |
1333 | + if (stat.f_flag & ST_NODIRATIME) |
1334 | + mnt_flags |= MS_NODIRATIME; |
1335 | + if (stat.f_flag & ST_RELATIME) |
1336 | + mnt_flags |= MS_RELATIME; |
1337 | + if (stat.f_flag & ST_SYNCHRONOUS) |
1338 | + mnt_flags |= MS_SYNCHRONOUS; |
1339 | + if (stat.f_flag & ST_MANDLOCK) |
1340 | + mnt_flags |= ST_MANDLOCK; |
1341 | + |
1342 | + return mnt_flags; |
1343 | +} |
1344 | + |
1345 | static void create_and_enter_userns(void) |
1346 | { |
1347 | uid_t uid; |
1348 | @@ -100,13 +163,10 @@ static void create_and_enter_userns(void) |
1349 | strerror(errno)); |
1350 | } |
1351 | |
1352 | + maybe_write_file("/proc/self/setgroups", "deny"); |
1353 | write_file("/proc/self/uid_map", "0 %d 1", uid); |
1354 | write_file("/proc/self/gid_map", "0 %d 1", gid); |
1355 | |
1356 | - if (setgroups(0, NULL) != 0) { |
1357 | - die("setgroups failed: %s\n", |
1358 | - strerror(errno)); |
1359 | - } |
1360 | if (setgid(0) != 0) { |
1361 | die ("setgid(0) failed %s\n", |
1362 | strerror(errno)); |
1363 | @@ -118,7 +178,8 @@ static void create_and_enter_userns(void) |
1364 | } |
1365 | |
1366 | static |
1367 | -bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) |
1368 | +bool test_unpriv_remount(const char *fstype, const char *mount_options, |
1369 | + int mount_flags, int remount_flags, int invalid_flags) |
1370 | { |
1371 | pid_t child; |
1372 | |
1373 | @@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) |
1374 | strerror(errno)); |
1375 | } |
1376 | |
1377 | - if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) { |
1378 | - die("mount of /tmp failed: %s\n", |
1379 | - strerror(errno)); |
1380 | + if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) { |
1381 | + die("mount of %s with options '%s' on /tmp failed: %s\n", |
1382 | + fstype, |
1383 | + mount_options? mount_options : "", |
1384 | + strerror(errno)); |
1385 | } |
1386 | |
1387 | create_and_enter_userns(); |
1388 | @@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) |
1389 | |
1390 | static bool test_unpriv_remount_simple(int mount_flags) |
1391 | { |
1392 | - return test_unpriv_remount(mount_flags, mount_flags, 0); |
1393 | + return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0); |
1394 | } |
1395 | |
1396 | static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags) |
1397 | { |
1398 | - return test_unpriv_remount(mount_flags, mount_flags, invalid_flags); |
1399 | + return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, |
1400 | + invalid_flags); |
1401 | +} |
1402 | + |
1403 | +static bool test_priv_mount_unpriv_remount(void) |
1404 | +{ |
1405 | + pid_t child; |
1406 | + int ret; |
1407 | + const char *orig_path = "/dev"; |
1408 | + const char *dest_path = "/tmp"; |
1409 | + int orig_mnt_flags, remount_mnt_flags; |
1410 | + |
1411 | + child = fork(); |
1412 | + if (child == -1) { |
1413 | + die("fork failed: %s\n", |
1414 | + strerror(errno)); |
1415 | + } |
1416 | + if (child != 0) { /* parent */ |
1417 | + pid_t pid; |
1418 | + int status; |
1419 | + pid = waitpid(child, &status, 0); |
1420 | + if (pid == -1) { |
1421 | + die("waitpid failed: %s\n", |
1422 | + strerror(errno)); |
1423 | + } |
1424 | + if (pid != child) { |
1425 | + die("waited for %d got %d\n", |
1426 | + child, pid); |
1427 | + } |
1428 | + if (!WIFEXITED(status)) { |
1429 | + die("child did not terminate cleanly\n"); |
1430 | + } |
1431 | + return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; |
1432 | + } |
1433 | + |
1434 | + orig_mnt_flags = read_mnt_flags(orig_path); |
1435 | + |
1436 | + create_and_enter_userns(); |
1437 | + ret = unshare(CLONE_NEWNS); |
1438 | + if (ret != 0) { |
1439 | + die("unshare(CLONE_NEWNS) failed: %s\n", |
1440 | + strerror(errno)); |
1441 | + } |
1442 | + |
1443 | + ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL); |
1444 | + if (ret != 0) { |
1445 | + die("recursive bind mount of %s onto %s failed: %s\n", |
1446 | + orig_path, dest_path, strerror(errno)); |
1447 | + } |
1448 | + |
1449 | + ret = mount(dest_path, dest_path, "none", |
1450 | + MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL); |
1451 | + if (ret != 0) { |
1452 | + /* system("cat /proc/self/mounts"); */ |
1453 | + die("remount of /tmp failed: %s\n", |
1454 | + strerror(errno)); |
1455 | + } |
1456 | + |
1457 | + remount_mnt_flags = read_mnt_flags(dest_path); |
1458 | + if (orig_mnt_flags != remount_mnt_flags) { |
1459 | + die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n", |
1460 | + dest_path, orig_path); |
1461 | + } |
1462 | + exit(EXIT_SUCCESS); |
1463 | } |
1464 | |
1465 | int main(int argc, char **argv) |
1466 | { |
1467 | - if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) { |
1468 | + if (!test_unpriv_remount_simple(MS_RDONLY)) { |
1469 | die("MS_RDONLY malfunctions\n"); |
1470 | } |
1471 | - if (!test_unpriv_remount_simple(MS_NODEV)) { |
1472 | + if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) { |
1473 | die("MS_NODEV malfunctions\n"); |
1474 | } |
1475 | - if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) { |
1476 | + if (!test_unpriv_remount_simple(MS_NOSUID)) { |
1477 | die("MS_NOSUID malfunctions\n"); |
1478 | } |
1479 | - if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) { |
1480 | + if (!test_unpriv_remount_simple(MS_NOEXEC)) { |
1481 | die("MS_NOEXEC malfunctions\n"); |
1482 | } |
1483 | - if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV, |
1484 | - MS_NOATIME|MS_NODEV)) |
1485 | + if (!test_unpriv_remount_atime(MS_RELATIME, |
1486 | + MS_NOATIME)) |
1487 | { |
1488 | die("MS_RELATIME malfunctions\n"); |
1489 | } |
1490 | - if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV, |
1491 | - MS_NOATIME|MS_NODEV)) |
1492 | + if (!test_unpriv_remount_atime(MS_STRICTATIME, |
1493 | + MS_NOATIME)) |
1494 | { |
1495 | die("MS_STRICTATIME malfunctions\n"); |
1496 | } |
1497 | - if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV, |
1498 | - MS_STRICTATIME|MS_NODEV)) |
1499 | + if (!test_unpriv_remount_atime(MS_NOATIME, |
1500 | + MS_STRICTATIME)) |
1501 | { |
1502 | - die("MS_RELATIME malfunctions\n"); |
1503 | + die("MS_NOATIME malfunctions\n"); |
1504 | } |
1505 | - if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV, |
1506 | - MS_NOATIME|MS_NODEV)) |
1507 | + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME, |
1508 | + MS_NOATIME)) |
1509 | { |
1510 | - die("MS_RELATIME malfunctions\n"); |
1511 | + die("MS_RELATIME|MS_NODIRATIME malfunctions\n"); |
1512 | } |
1513 | - if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV, |
1514 | - MS_NOATIME|MS_NODEV)) |
1515 | + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME, |
1516 | + MS_NOATIME)) |
1517 | { |
1518 | - die("MS_RELATIME malfunctions\n"); |
1519 | + die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n"); |
1520 | } |
1521 | - if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV, |
1522 | - MS_STRICTATIME|MS_NODEV)) |
1523 | + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME, |
1524 | + MS_STRICTATIME)) |
1525 | { |
1526 | - die("MS_RELATIME malfunctions\n"); |
1527 | + die("MS_NOATIME|MS_DIRATIME malfunctions\n"); |
1528 | } |
1529 | - if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV, |
1530 | - MS_NOATIME|MS_NODEV)) |
1531 | + if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME)) |
1532 | { |
1533 | die("Default atime malfunctions\n"); |
1534 | } |
1535 | + if (!test_priv_mount_unpriv_remount()) { |
1536 | + die("Mount flags unexpectedly changed after remount\n"); |
1537 | + } |
1538 | return EXIT_SUCCESS; |
1539 | } |