Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0156-5.4.57-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3558 - (hide annotations) (download)
Tue Aug 11 08:35:00 2020 UTC (3 years, 10 months ago) by niro
File size: 14501 byte(s)
-linux-5.4.57
1 niro 3558 diff --git a/Makefile b/Makefile
2     index c33fb4eebd4d..dd753ef637fd 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 56
10     +SUBLEVEL = 57
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
15     index f44f448537f2..1a3eedbac4a2 100644
16     --- a/arch/arm/include/asm/percpu.h
17     +++ b/arch/arm/include/asm/percpu.h
18     @@ -5,6 +5,8 @@
19     #ifndef _ASM_ARM_PERCPU_H_
20     #define _ASM_ARM_PERCPU_H_
21    
22     +#include <asm/thread_info.h>
23     +
24     /*
25     * Same as asm-generic/percpu.h, except that we store the per cpu offset
26     * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
27     diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
28     index 7a24bad1a58b..076a4157a74f 100644
29     --- a/arch/arm64/include/asm/pointer_auth.h
30     +++ b/arch/arm64/include/asm/pointer_auth.h
31     @@ -3,7 +3,6 @@
32     #define __ASM_POINTER_AUTH_H
33    
34     #include <linux/bitops.h>
35     -#include <linux/random.h>
36    
37     #include <asm/cpufeature.h>
38     #include <asm/memory.h>
39     @@ -30,6 +29,13 @@ struct ptrauth_keys {
40     struct ptrauth_key apga;
41     };
42    
43     +/*
44     + * Only include random.h once ptrauth_keys_* structures are defined
45     + * to avoid yet another circular include hell (random.h * ends up
46     + * including asm/smp.h, which requires ptrauth_keys_kernel).
47     + */
48     +#include <linux/random.h>
49     +
50     static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
51     {
52     if (system_supports_address_auth()) {
53     diff --git a/drivers/char/random.c b/drivers/char/random.c
54     index 8ff28c14af7e..e877c20e0ee0 100644
55     --- a/drivers/char/random.c
56     +++ b/drivers/char/random.c
57     @@ -1330,6 +1330,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
58    
59     fast_mix(fast_pool);
60     add_interrupt_bench(cycles);
61     + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
62    
63     if (unlikely(crng_init == 0)) {
64     if ((fast_pool->count >= 64) &&
65     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
66     index 7e0c77de551b..a284d99a1ee5 100644
67     --- a/fs/ext4/inode.c
68     +++ b/fs/ext4/inode.c
69     @@ -3836,6 +3836,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
70     struct inode *inode = mapping->host;
71     size_t count = iov_iter_count(iter);
72     ssize_t ret;
73     + loff_t offset = iocb->ki_pos;
74     + loff_t size = i_size_read(inode);
75     +
76     + if (offset >= size)
77     + return 0;
78    
79     /*
80     * Shared inode_lock is enough for us - it protects against concurrent
81     diff --git a/include/linux/bpf.h b/include/linux/bpf.h
82     index 3bf3835d0e86..7aa0d8b5aaf0 100644
83     --- a/include/linux/bpf.h
84     +++ b/include/linux/bpf.h
85     @@ -956,11 +956,14 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
86     #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
87    
88     #if defined(CONFIG_BPF_STREAM_PARSER)
89     -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
90     +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
91     + struct bpf_prog *old, u32 which);
92     int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
93     +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
94     #else
95     static inline int sock_map_prog_update(struct bpf_map *map,
96     - struct bpf_prog *prog, u32 which)
97     + struct bpf_prog *prog,
98     + struct bpf_prog *old, u32 which)
99     {
100     return -EOPNOTSUPP;
101     }
102     @@ -970,6 +973,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
103     {
104     return -EINVAL;
105     }
106     +
107     +static inline int sock_map_prog_detach(const union bpf_attr *attr,
108     + enum bpf_prog_type ptype)
109     +{
110     + return -EOPNOTSUPP;
111     +}
112     #endif
113    
114     #if defined(CONFIG_XDP_SOCKETS)
115     diff --git a/include/linux/prandom.h b/include/linux/prandom.h
116     new file mode 100644
117     index 000000000000..aa16e6468f91
118     --- /dev/null
119     +++ b/include/linux/prandom.h
120     @@ -0,0 +1,78 @@
121     +/* SPDX-License-Identifier: GPL-2.0 */
122     +/*
123     + * include/linux/prandom.h
124     + *
125     + * Include file for the fast pseudo-random 32-bit
126     + * generation.
127     + */
128     +#ifndef _LINUX_PRANDOM_H
129     +#define _LINUX_PRANDOM_H
130     +
131     +#include <linux/types.h>
132     +#include <linux/percpu.h>
133     +
134     +u32 prandom_u32(void);
135     +void prandom_bytes(void *buf, size_t nbytes);
136     +void prandom_seed(u32 seed);
137     +void prandom_reseed_late(void);
138     +
139     +struct rnd_state {
140     + __u32 s1, s2, s3, s4;
141     +};
142     +
143     +DECLARE_PER_CPU(struct rnd_state, net_rand_state);
144     +
145     +u32 prandom_u32_state(struct rnd_state *state);
146     +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
147     +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
148     +
149     +#define prandom_init_once(pcpu_state) \
150     + DO_ONCE(prandom_seed_full_state, (pcpu_state))
151     +
152     +/**
153     + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
154     + * @ep_ro: right open interval endpoint
155     + *
156     + * Returns a pseudo-random number that is in interval [0, ep_ro). Note
157     + * that the result depends on PRNG being well distributed in [0, ~0U]
158     + * u32 space. Here we use maximally equidistributed combined Tausworthe
159     + * generator, that is, prandom_u32(). This is useful when requesting a
160     + * random index of an array containing ep_ro elements, for example.
161     + *
162     + * Returns: pseudo-random number in interval [0, ep_ro)
163     + */
164     +static inline u32 prandom_u32_max(u32 ep_ro)
165     +{
166     + return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
167     +}
168     +
169     +/*
170     + * Handle minimum values for seeds
171     + */
172     +static inline u32 __seed(u32 x, u32 m)
173     +{
174     + return (x < m) ? x + m : x;
175     +}
176     +
177     +/**
178     + * prandom_seed_state - set seed for prandom_u32_state().
179     + * @state: pointer to state structure to receive the seed.
180     + * @seed: arbitrary 64-bit value to use as a seed.
181     + */
182     +static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
183     +{
184     + u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
185     +
186     + state->s1 = __seed(i, 2U);
187     + state->s2 = __seed(i, 8U);
188     + state->s3 = __seed(i, 16U);
189     + state->s4 = __seed(i, 128U);
190     +}
191     +
192     +/* Pseudo random number generator from numerical recipes. */
193     +static inline u32 next_pseudo_random32(u32 seed)
194     +{
195     + return seed * 1664525 + 1013904223;
196     +}
197     +
198     +#endif
199     diff --git a/include/linux/random.h b/include/linux/random.h
200     index f189c927fdea..5b3ec7d2791f 100644
201     --- a/include/linux/random.h
202     +++ b/include/linux/random.h
203     @@ -108,61 +108,12 @@ declare_get_random_var_wait(long)
204    
205     unsigned long randomize_page(unsigned long start, unsigned long range);
206    
207     -u32 prandom_u32(void);
208     -void prandom_bytes(void *buf, size_t nbytes);
209     -void prandom_seed(u32 seed);
210     -void prandom_reseed_late(void);
211     -
212     -struct rnd_state {
213     - __u32 s1, s2, s3, s4;
214     -};
215     -
216     -u32 prandom_u32_state(struct rnd_state *state);
217     -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
218     -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
219     -
220     -#define prandom_init_once(pcpu_state) \
221     - DO_ONCE(prandom_seed_full_state, (pcpu_state))
222     -
223     -/**
224     - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
225     - * @ep_ro: right open interval endpoint
226     - *
227     - * Returns a pseudo-random number that is in interval [0, ep_ro). Note
228     - * that the result depends on PRNG being well distributed in [0, ~0U]
229     - * u32 space. Here we use maximally equidistributed combined Tausworthe
230     - * generator, that is, prandom_u32(). This is useful when requesting a
231     - * random index of an array containing ep_ro elements, for example.
232     - *
233     - * Returns: pseudo-random number in interval [0, ep_ro)
234     - */
235     -static inline u32 prandom_u32_max(u32 ep_ro)
236     -{
237     - return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
238     -}
239     -
240     /*
241     - * Handle minimum values for seeds
242     + * This is designed to be standalone for just prandom
243     + * users, but for now we include it from <linux/random.h>
244     + * for legacy reasons.
245     */
246     -static inline u32 __seed(u32 x, u32 m)
247     -{
248     - return (x < m) ? x + m : x;
249     -}
250     -
251     -/**
252     - * prandom_seed_state - set seed for prandom_u32_state().
253     - * @state: pointer to state structure to receive the seed.
254     - * @seed: arbitrary 64-bit value to use as a seed.
255     - */
256     -static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
257     -{
258     - u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
259     -
260     - state->s1 = __seed(i, 2U);
261     - state->s2 = __seed(i, 8U);
262     - state->s3 = __seed(i, 16U);
263     - state->s4 = __seed(i, 128U);
264     -}
265     +#include <linux/prandom.h>
266    
267     #ifdef CONFIG_ARCH_RANDOM
268     # include <asm/archrandom.h>
269     @@ -193,10 +144,4 @@ static inline bool arch_has_random_seed(void)
270     }
271     #endif
272    
273     -/* Pseudo random number generator from numerical recipes. */
274     -static inline u32 next_pseudo_random32(u32 seed)
275     -{
276     - return seed * 1664525 + 1013904223;
277     -}
278     -
279     #endif /* _LINUX_RANDOM_H */
280     diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
281     index 4bdb5e4bbd6a..20f3550b0b11 100644
282     --- a/include/linux/skmsg.h
283     +++ b/include/linux/skmsg.h
284     @@ -450,6 +450,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
285     bpf_prog_put(prog);
286     }
287    
288     +static inline int psock_replace_prog(struct bpf_prog **pprog,
289     + struct bpf_prog *prog,
290     + struct bpf_prog *old)
291     +{
292     + if (cmpxchg(pprog, old, prog) != old)
293     + return -ENOENT;
294     +
295     + if (old)
296     + bpf_prog_put(old);
297     +
298     + return 0;
299     +}
300     +
301     static inline void psock_progs_drop(struct sk_psock_progs *progs)
302     {
303     psock_set_prog(&progs->msg_parser, NULL);
304     diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
305     index 8bc904f9badb..bf03d04a9e2f 100644
306     --- a/kernel/bpf/syscall.c
307     +++ b/kernel/bpf/syscall.c
308     @@ -2029,10 +2029,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
309     ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
310     break;
311     case BPF_SK_MSG_VERDICT:
312     - return sock_map_get_from_fd(attr, NULL);
313     + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_MSG);
314     case BPF_SK_SKB_STREAM_PARSER:
315     case BPF_SK_SKB_STREAM_VERDICT:
316     - return sock_map_get_from_fd(attr, NULL);
317     + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_SKB);
318     case BPF_LIRC_MODE2:
319     return lirc_prog_detach(attr);
320     case BPF_FLOW_DISSECTOR:
321     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
322     index 1e9b81a930c0..a3ae244b1bcd 100644
323     --- a/kernel/time/timer.c
324     +++ b/kernel/time/timer.c
325     @@ -43,6 +43,7 @@
326     #include <linux/sched/debug.h>
327     #include <linux/slab.h>
328     #include <linux/compat.h>
329     +#include <linux/random.h>
330    
331     #include <linux/uaccess.h>
332     #include <asm/unistd.h>
333     @@ -1742,6 +1743,13 @@ void update_process_times(int user_tick)
334     scheduler_tick();
335     if (IS_ENABLED(CONFIG_POSIX_TIMERS))
336     run_posix_cpu_timers();
337     +
338     + /* The current CPU might make use of net randoms without receiving IRQs
339     + * to renew them often enough. Let's update the net_rand_state from a
340     + * non-constant value that's not affine to the number of calls to make
341     + * sure it's updated when there's some activity (we don't care in idle).
342     + */
343     + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
344     }
345    
346     /**
347     diff --git a/lib/random32.c b/lib/random32.c
348     index 763b920a6206..3d749abb9e80 100644
349     --- a/lib/random32.c
350     +++ b/lib/random32.c
351     @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
352     }
353     #endif
354    
355     -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
356     +DEFINE_PER_CPU(struct rnd_state, net_rand_state);
357    
358     /**
359     * prandom_u32_state - seeded pseudo-random number generator.
360     diff --git a/net/core/sock_map.c b/net/core/sock_map.c
361     index 6bbc118bf00e..df52061f99f7 100644
362     --- a/net/core/sock_map.c
363     +++ b/net/core/sock_map.c
364     @@ -71,7 +71,42 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
365     map = __bpf_map_get(f);
366     if (IS_ERR(map))
367     return PTR_ERR(map);
368     - ret = sock_map_prog_update(map, prog, attr->attach_type);
369     + ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
370     + fdput(f);
371     + return ret;
372     +}
373     +
374     +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
375     +{
376     + u32 ufd = attr->target_fd;
377     + struct bpf_prog *prog;
378     + struct bpf_map *map;
379     + struct fd f;
380     + int ret;
381     +
382     + if (attr->attach_flags)
383     + return -EINVAL;
384     +
385     + f = fdget(ufd);
386     + map = __bpf_map_get(f);
387     + if (IS_ERR(map))
388     + return PTR_ERR(map);
389     +
390     + prog = bpf_prog_get(attr->attach_bpf_fd);
391     + if (IS_ERR(prog)) {
392     + ret = PTR_ERR(prog);
393     + goto put_map;
394     + }
395     +
396     + if (prog->type != ptype) {
397     + ret = -EINVAL;
398     + goto put_prog;
399     + }
400     +
401     + ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
402     +put_prog:
403     + bpf_prog_put(prog);
404     +put_map:
405     fdput(f);
406     return ret;
407     }
408     @@ -1015,27 +1050,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
409     }
410    
411     int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
412     - u32 which)
413     + struct bpf_prog *old, u32 which)
414     {
415     struct sk_psock_progs *progs = sock_map_progs(map);
416     + struct bpf_prog **pprog;
417    
418     if (!progs)
419     return -EOPNOTSUPP;
420    
421     switch (which) {
422     case BPF_SK_MSG_VERDICT:
423     - psock_set_prog(&progs->msg_parser, prog);
424     + pprog = &progs->msg_parser;
425     break;
426     case BPF_SK_SKB_STREAM_PARSER:
427     - psock_set_prog(&progs->skb_parser, prog);
428     + pprog = &progs->skb_parser;
429     break;
430     case BPF_SK_SKB_STREAM_VERDICT:
431     - psock_set_prog(&progs->skb_verdict, prog);
432     + pprog = &progs->skb_verdict;
433     break;
434     default:
435     return -EOPNOTSUPP;
436     }
437    
438     + if (old)
439     + return psock_replace_prog(pprog, prog, old);
440     +
441     + psock_set_prog(pprog, prog);
442     return 0;
443     }
444    
445     diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
446     index e1f1becda529..c812f0178b64 100644
447     --- a/tools/testing/selftests/bpf/test_maps.c
448     +++ b/tools/testing/selftests/bpf/test_maps.c
449     @@ -793,19 +793,19 @@ static void test_sockmap(unsigned int tasks, void *data)
450     }
451    
452     err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
453     - if (err) {
454     + if (!err) {
455     printf("Failed empty parser prog detach\n");
456     goto out_sockmap;
457     }
458    
459     err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
460     - if (err) {
461     + if (!err) {
462     printf("Failed empty verdict prog detach\n");
463     goto out_sockmap;
464     }
465    
466     err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
467     - if (err) {
468     + if (!err) {
469     printf("Failed empty msg verdict prog detach\n");
470     goto out_sockmap;
471     }
472     @@ -1094,19 +1094,19 @@ static void test_sockmap(unsigned int tasks, void *data)
473     assert(status == 0);
474     }
475    
476     - err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
477     + err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
478     if (!err) {
479     printf("Detached an invalid prog type.\n");
480     goto out_sockmap;
481     }
482    
483     - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
484     + err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
485     if (err) {
486     printf("Failed parser prog detach\n");
487     goto out_sockmap;
488     }
489    
490     - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
491     + err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
492     if (err) {
493     printf("Failed parser prog detach\n");
494     goto out_sockmap;