Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.14/0111-4.14.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3072 - (show annotations) (download)
Wed Jan 17 13:27:05 2018 UTC (6 years, 3 months ago) by niro
File size: 16563 byte(s)
-linux-4.14.12
1 diff --git a/Makefile b/Makefile
2 index 655887067dc7..20f7d4de0f1c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 11
10 +SUBLEVEL = 12
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
15 index 40f17009ec20..98d5358e4041 100644
16 --- a/arch/x86/entry/entry_64_compat.S
17 +++ b/arch/x86/entry/entry_64_compat.S
18 @@ -190,8 +190,13 @@ ENTRY(entry_SYSCALL_compat)
19 /* Interrupts are off on entry. */
20 swapgs
21
22 - /* Stash user ESP and switch to the kernel stack. */
23 + /* Stash user ESP */
24 movl %esp, %r8d
25 +
26 + /* Use %rsp as scratch reg. User ESP is stashed in r8 */
27 + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
28 +
29 + /* Switch to the kernel stack */
30 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
31
32 /* Construct struct pt_regs on stack */
33 @@ -219,12 +224,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
34 pushq $0 /* pt_regs->r14 = 0 */
35 pushq $0 /* pt_regs->r15 = 0 */
36
37 - /*
38 - * We just saved %rdi so it is safe to clobber. It is not
39 - * preserved during the C calls inside TRACE_IRQS_OFF anyway.
40 - */
41 - SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
42 -
43 /*
44 * User mode is traced as though IRQs are on, and SYSENTER
45 * turned them off.
46 diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
47 index c1688c2d0a12..1f86e1b0a5cd 100644
48 --- a/arch/x86/include/asm/unwind.h
49 +++ b/arch/x86/include/asm/unwind.h
50 @@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
51
52 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
53 /*
54 - * WARNING: The entire pt_regs may not be safe to dereference. In some cases,
55 - * only the iret frame registers are accessible. Use with caution!
56 + * If 'partial' returns true, only the iret frame registers are valid.
57 */
58 -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
59 +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
60 + bool *partial)
61 {
62 if (unwind_done(state))
63 return NULL;
64
65 + if (partial) {
66 +#ifdef CONFIG_UNWINDER_ORC
67 + *partial = !state->full_regs;
68 +#else
69 + *partial = false;
70 +#endif
71 + }
72 +
73 return state->regs;
74 }
75 #else
76 -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
77 +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state,
78 + bool *partial)
79 {
80 return NULL;
81 }
82 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
83 index f2a94dfb434e..b1be494ab4e8 100644
84 --- a/arch/x86/kernel/cpu/common.c
85 +++ b/arch/x86/kernel/cpu/common.c
86 @@ -899,8 +899,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
87
88 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
89
90 - /* Assume for now that ALL x86 CPUs are insecure */
91 - setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
92 + if (c->x86_vendor != X86_VENDOR_AMD)
93 + setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
94
95 fpu__init_system(c);
96
97 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
98 index 5fa110699ed2..afbecff161d1 100644
99 --- a/arch/x86/kernel/dumpstack.c
100 +++ b/arch/x86/kernel/dumpstack.c
101 @@ -76,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs)
102 regs->sp, regs->flags);
103 }
104
105 -static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
106 +static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
107 + bool partial)
108 {
109 - if (on_stack(info, regs, sizeof(*regs)))
110 + /*
111 + * These on_stack() checks aren't strictly necessary: the unwind code
112 + * has already validated the 'regs' pointer. The checks are done for
113 + * ordering reasons: if the registers are on the next stack, we don't
114 + * want to print them out yet. Otherwise they'll be shown as part of
115 + * the wrong stack. Later, when show_trace_log_lvl() switches to the
116 + * next stack, this function will be called again with the same regs so
117 + * they can be printed in the right context.
118 + */
119 + if (!partial && on_stack(info, regs, sizeof(*regs))) {
120 __show_regs(regs, 0);
121 - else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
122 - IRET_FRAME_SIZE)) {
123 +
124 + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
125 + IRET_FRAME_SIZE)) {
126 /*
127 * When an interrupt or exception occurs in entry code, the
128 * full pt_regs might not have been saved yet. In that case
129 @@ -98,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
130 struct stack_info stack_info = {0};
131 unsigned long visit_mask = 0;
132 int graph_idx = 0;
133 + bool partial;
134
135 printk("%sCall Trace:\n", log_lvl);
136
137 unwind_start(&state, task, regs, stack);
138 stack = stack ? : get_stack_pointer(task, regs);
139 + regs = unwind_get_entry_regs(&state, &partial);
140
141 /*
142 * Iterate through the stacks, starting with the current stack pointer.
143 @@ -120,7 +133,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
144 * - hardirq stack
145 * - entry stack
146 */
147 - for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
148 + for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
149 const char *stack_name;
150
151 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
152 @@ -140,7 +153,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
153 printk("%s <%s>\n", log_lvl, stack_name);
154
155 if (regs)
156 - show_regs_safe(&stack_info, regs);
157 + show_regs_if_on_stack(&stack_info, regs, partial);
158
159 /*
160 * Scan the stack, printing any text addresses we find. At the
161 @@ -164,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
162
163 /*
164 * Don't print regs->ip again if it was already printed
165 - * by show_regs_safe() below.
166 + * by show_regs_if_on_stack().
167 */
168 if (regs && stack == &regs->ip)
169 goto next;
170 @@ -199,9 +212,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
171 unwind_next_frame(&state);
172
173 /* if the frame has entry regs, print them */
174 - regs = unwind_get_entry_regs(&state);
175 + regs = unwind_get_entry_regs(&state, &partial);
176 if (regs)
177 - show_regs_safe(&stack_info, regs);
178 + show_regs_if_on_stack(&stack_info, regs, partial);
179 }
180
181 if (stack_name)
182 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
183 index 517415978409..3cb2486c47e4 100644
184 --- a/arch/x86/kernel/process.c
185 +++ b/arch/x86/kernel/process.c
186 @@ -47,7 +47,7 @@
187 * section. Since TSS's are completely CPU-local, we want them
188 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
189 */
190 -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
191 +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
192 .x86_tss = {
193 /*
194 * .sp0 is only used when entering ring 0 from a lower
195 diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
196 index 8dabd7bf1673..60244bfaf88f 100644
197 --- a/arch/x86/kernel/stacktrace.c
198 +++ b/arch/x86/kernel/stacktrace.c
199 @@ -98,7 +98,7 @@ static int __save_stack_trace_reliable(struct stack_trace *trace,
200 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
201 unwind_next_frame(&state)) {
202
203 - regs = unwind_get_entry_regs(&state);
204 + regs = unwind_get_entry_regs(&state, NULL);
205 if (regs) {
206 /*
207 * Kernel mode registers on the stack indicate an
208 diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
209 index bce8aea65606..2da28ba97508 100644
210 --- a/arch/x86/mm/pti.c
211 +++ b/arch/x86/mm/pti.c
212 @@ -367,7 +367,8 @@ static void __init pti_setup_espfix64(void)
213 static void __init pti_clone_entry_text(void)
214 {
215 pti_clone_pmds((unsigned long) __entry_text_start,
216 - (unsigned long) __irqentry_text_end, _PAGE_RW);
217 + (unsigned long) __irqentry_text_end,
218 + _PAGE_RW | _PAGE_GLOBAL);
219 }
220
221 /*
222 diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
223 index f4c070ea8384..c90fba3ed861 100644
224 --- a/drivers/rtc/rtc-m41t80.c
225 +++ b/drivers/rtc/rtc-m41t80.c
226 @@ -154,6 +154,8 @@ struct m41t80_data {
227 struct rtc_device *rtc;
228 #ifdef CONFIG_COMMON_CLK
229 struct clk_hw sqw;
230 + unsigned long freq;
231 + unsigned int sqwe;
232 #endif
233 };
234
235 @@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
236 #ifdef CONFIG_COMMON_CLK
237 #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
238
239 -static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
240 - unsigned long parent_rate)
241 +static unsigned long m41t80_decode_freq(int setting)
242 +{
243 + return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
244 + M41T80_SQW_MAX_FREQ >> setting;
245 +}
246 +
247 +static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
248 {
249 - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
250 struct i2c_client *client = m41t80->client;
251 int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
252 M41T80_REG_WDAY : M41T80_REG_SQW;
253 int ret = i2c_smbus_read_byte_data(client, reg_sqw);
254 - unsigned long val = M41T80_SQW_MAX_FREQ;
255
256 if (ret < 0)
257 return 0;
258 + return m41t80_decode_freq(ret >> 4);
259 +}
260
261 - ret >>= 4;
262 - if (ret == 0)
263 - val = 0;
264 - else if (ret > 1)
265 - val = val / (1 << ret);
266 -
267 - return val;
268 +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
269 + unsigned long parent_rate)
270 +{
271 + return sqw_to_m41t80_data(hw)->freq;
272 }
273
274 static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
275 unsigned long *prate)
276 {
277 - int i, freq = M41T80_SQW_MAX_FREQ;
278 -
279 - if (freq <= rate)
280 - return freq;
281 -
282 - for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
283 - freq /= 1 << i;
284 - if (freq <= rate)
285 - return freq;
286 - }
287 -
288 - return 0;
289 + if (rate >= M41T80_SQW_MAX_FREQ)
290 + return M41T80_SQW_MAX_FREQ;
291 + if (rate >= M41T80_SQW_MAX_FREQ / 4)
292 + return M41T80_SQW_MAX_FREQ / 4;
293 + if (!rate)
294 + return 0;
295 + return 1 << ilog2(rate);
296 }
297
298 static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
299 @@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
300 M41T80_REG_WDAY : M41T80_REG_SQW;
301 int reg, ret, val = 0;
302
303 - if (rate) {
304 - if (!is_power_of_2(rate))
305 - return -EINVAL;
306 - val = ilog2(rate);
307 - if (val == ilog2(M41T80_SQW_MAX_FREQ))
308 - val = 1;
309 - else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
310 - val = ilog2(M41T80_SQW_MAX_FREQ) - val;
311 - else
312 - return -EINVAL;
313 - }
314 + if (rate >= M41T80_SQW_MAX_FREQ)
315 + val = 1;
316 + else if (rate >= M41T80_SQW_MAX_FREQ / 4)
317 + val = 2;
318 + else if (rate)
319 + val = 15 - ilog2(rate);
320
321 reg = i2c_smbus_read_byte_data(client, reg_sqw);
322 if (reg < 0)
323 @@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
324 reg = (reg & 0x0f) | (val << 4);
325
326 ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
327 - if (ret < 0)
328 - return ret;
329 -
330 - return -EINVAL;
331 + if (!ret)
332 + m41t80->freq = m41t80_decode_freq(val);
333 + return ret;
334 }
335
336 static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
337 @@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
338 else
339 ret &= ~M41T80_ALMON_SQWE;
340
341 - return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
342 + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
343 + if (!ret)
344 + m41t80->sqwe = enable;
345 + return ret;
346 }
347
348 static int m41t80_sqw_prepare(struct clk_hw *hw)
349 @@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
350
351 static int m41t80_sqw_is_prepared(struct clk_hw *hw)
352 {
353 - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
354 - struct i2c_client *client = m41t80->client;
355 - int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
356 -
357 - if (ret < 0)
358 - return ret;
359 -
360 - return !!(ret & M41T80_ALMON_SQWE);
361 + return sqw_to_m41t80_data(hw)->sqwe;
362 }
363
364 static const struct clk_ops m41t80_sqw_ops = {
365 @@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
366 init.parent_names = NULL;
367 init.num_parents = 0;
368 m41t80->sqw.init = &init;
369 + m41t80->freq = m41t80_get_freq(m41t80);
370
371 /* optional override of the clockname */
372 of_property_read_string(node, "clock-output-names", &init.name);
373 diff --git a/fs/exec.c b/fs/exec.c
374 index 3e14ba25f678..acec119fcc31 100644
375 --- a/fs/exec.c
376 +++ b/fs/exec.c
377 @@ -1350,9 +1350,14 @@ void setup_new_exec(struct linux_binprm * bprm)
378
379 current->sas_ss_sp = current->sas_ss_size = 0;
380
381 - /* Figure out dumpability. */
382 + /*
383 + * Figure out dumpability. Note that this checking only of current
384 + * is wrong, but userspace depends on it. This should be testing
385 + * bprm->secureexec instead.
386 + */
387 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
388 - bprm->secureexec)
389 + !(uid_eq(current_euid(), current_uid()) &&
390 + gid_eq(current_egid(), current_gid())))
391 set_dumpable(current->mm, suid_dumpable);
392 else
393 set_dumpable(current->mm, SUID_DUMP_USER);
394 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
395 index 2a6093840e7e..6bc16bb61b55 100644
396 --- a/net/xfrm/xfrm_policy.c
397 +++ b/net/xfrm/xfrm_policy.c
398 @@ -1362,29 +1362,36 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
399 struct net *net = xp_net(policy);
400 int nx;
401 int i, error;
402 + xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
403 + xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
404 xfrm_address_t tmp;
405
406 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
407 struct xfrm_state *x;
408 - xfrm_address_t *local;
409 - xfrm_address_t *remote;
410 + xfrm_address_t *remote = daddr;
411 + xfrm_address_t *local = saddr;
412 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
413
414 - remote = &tmpl->id.daddr;
415 - local = &tmpl->saddr;
416 - if (xfrm_addr_any(local, tmpl->encap_family)) {
417 - error = xfrm_get_saddr(net, fl->flowi_oif,
418 - &tmp, remote,
419 - tmpl->encap_family, 0);
420 - if (error)
421 - goto fail;
422 - local = &tmp;
423 + if (tmpl->mode == XFRM_MODE_TUNNEL ||
424 + tmpl->mode == XFRM_MODE_BEET) {
425 + remote = &tmpl->id.daddr;
426 + local = &tmpl->saddr;
427 + if (xfrm_addr_any(local, tmpl->encap_family)) {
428 + error = xfrm_get_saddr(net, fl->flowi_oif,
429 + &tmp, remote,
430 + tmpl->encap_family, 0);
431 + if (error)
432 + goto fail;
433 + local = &tmp;
434 + }
435 }
436
437 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
438
439 if (x && x->km.state == XFRM_STATE_VALID) {
440 xfrm[nx++] = x;
441 + daddr = remote;
442 + saddr = local;
443 continue;
444 }
445 if (x) {
446 diff --git a/security/commoncap.c b/security/commoncap.c
447 index fc46f5b85251..7b01431d1e19 100644
448 --- a/security/commoncap.c
449 +++ b/security/commoncap.c
450 @@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m)
451 return m & ~VFS_CAP_FLAGS_EFFECTIVE;
452 }
453
454 -static bool is_v2header(size_t size, __le32 magic)
455 +static bool is_v2header(size_t size, const struct vfs_cap_data *cap)
456 {
457 - __u32 m = le32_to_cpu(magic);
458 if (size != XATTR_CAPS_SZ_2)
459 return false;
460 - return sansflags(m) == VFS_CAP_REVISION_2;
461 + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
462 }
463
464 -static bool is_v3header(size_t size, __le32 magic)
465 +static bool is_v3header(size_t size, const struct vfs_cap_data *cap)
466 {
467 - __u32 m = le32_to_cpu(magic);
468 -
469 if (size != XATTR_CAPS_SZ_3)
470 return false;
471 - return sansflags(m) == VFS_CAP_REVISION_3;
472 + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
473 }
474
475 /*
476 @@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
477
478 fs_ns = inode->i_sb->s_user_ns;
479 cap = (struct vfs_cap_data *) tmpbuf;
480 - if (is_v2header((size_t) ret, cap->magic_etc)) {
481 + if (is_v2header((size_t) ret, cap)) {
482 /* If this is sizeof(vfs_cap_data) then we're ok with the
483 * on-disk value, so return that. */
484 if (alloc)
485 @@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
486 else
487 kfree(tmpbuf);
488 return ret;
489 - } else if (!is_v3header((size_t) ret, cap->magic_etc)) {
490 + } else if (!is_v3header((size_t) ret, cap)) {
491 kfree(tmpbuf);
492 return -EINVAL;
493 }
494 @@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size,
495 return make_kuid(task_ns, rootid);
496 }
497
498 -static bool validheader(size_t size, __le32 magic)
499 +static bool validheader(size_t size, const struct vfs_cap_data *cap)
500 {
501 - return is_v2header(size, magic) || is_v3header(size, magic);
502 + return is_v2header(size, cap) || is_v3header(size, cap);
503 }
504
505 /*
506 @@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
507
508 if (!*ivalue)
509 return -EINVAL;
510 - if (!validheader(size, cap->magic_etc))
511 + if (!validheader(size, cap))
512 return -EINVAL;
513 if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP))
514 return -EPERM;