Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0125-4.14.26-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 19823 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
2     index 3c65feb83010..a81c97a4b4a5 100644
3     --- a/Documentation/virtual/kvm/cpuid.txt
4     +++ b/Documentation/virtual/kvm/cpuid.txt
5     @@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit
6     || || before enabling paravirtualized
7     || || spinlock support.
8     ------------------------------------------------------------------------------
9     +KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit
10     + || || can be enabled by setting bit 2
11     + || || when writing to msr 0x4b564d02
12     +------------------------------------------------------------------------------
13     KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
14     || || per-cpu warps are expected in
15     || || kvmclock.
16     diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
17     index 1ebecc115dc6..f3f0d57ced8e 100644
18     --- a/Documentation/virtual/kvm/msr.txt
19     +++ b/Documentation/virtual/kvm/msr.txt
20     @@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
21     when asynchronous page faults are enabled on the vcpu 0 when
22     disabled. Bit 1 is 1 if asynchronous page faults can be injected
23     when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
24     - are delivered to L1 as #PF vmexits.
25     + are delivered to L1 as #PF vmexits. Bit 2 can be set only if
26     + KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
27    
28     First 4 byte of 64 byte memory location will be written to by
29     the hypervisor at the time of asynchronous page fault (APF)
30     diff --git a/Makefile b/Makefile
31     index 0fdae0f455ef..666182dda187 100644
32     --- a/Makefile
33     +++ b/Makefile
34     @@ -1,7 +1,7 @@
35     # SPDX-License-Identifier: GPL-2.0
36     VERSION = 4
37     PATCHLEVEL = 14
38     -SUBLEVEL = 25
39     +SUBLEVEL = 26
40     EXTRAVERSION =
41     NAME = Petit Gorille
42    
43     diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
44     index bb32f7f6dd0f..be155f70f108 100644
45     --- a/arch/arm64/net/bpf_jit_comp.c
46     +++ b/arch/arm64/net/bpf_jit_comp.c
47     @@ -238,8 +238,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
48     off = offsetof(struct bpf_array, map.max_entries);
49     emit_a64_mov_i64(tmp, off, ctx);
50     emit(A64_LDR32(tmp, r2, tmp), ctx);
51     + emit(A64_MOV(0, r3, r3), ctx);
52     emit(A64_CMP(0, r3, tmp), ctx);
53     - emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
54     + emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
55    
56     /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
57     * goto out;
58     @@ -247,7 +248,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
59     */
60     emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
61     emit(A64_CMP(1, tcc, tmp), ctx);
62     - emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
63     + emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
64     emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
65    
66     /* prog = array->ptrs[index];
67     diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
68     index 5d115bd32539..bd0786c23109 100644
69     --- a/arch/powerpc/net/bpf_jit_comp64.c
70     +++ b/arch/powerpc/net/bpf_jit_comp64.c
71     @@ -241,6 +241,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
72     * goto out;
73     */
74     PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
75     + PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
76     PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
77     PPC_BCC(COND_GE, out);
78    
79     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
80     index 76b058533e47..81a1be326571 100644
81     --- a/arch/x86/include/asm/nospec-branch.h
82     +++ b/arch/x86/include/asm/nospec-branch.h
83     @@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
84     }
85    
86     #endif /* __ASSEMBLY__ */
87     +
88     +/*
89     + * Below is used in the eBPF JIT compiler and emits the byte sequence
90     + * for the following assembly:
91     + *
92     + * With retpolines configured:
93     + *
94     + * callq do_rop
95     + * spec_trap:
96     + * pause
97     + * lfence
98     + * jmp spec_trap
99     + * do_rop:
100     + * mov %rax,(%rsp)
101     + * retq
102     + *
103     + * Without retpolines configured:
104     + *
105     + * jmp *%rax
106     + */
107     +#ifdef CONFIG_RETPOLINE
108     +# define RETPOLINE_RAX_BPF_JIT_SIZE 17
109     +# define RETPOLINE_RAX_BPF_JIT() \
110     + EMIT1_off32(0xE8, 7); /* callq do_rop */ \
111     + /* spec_trap: */ \
112     + EMIT2(0xF3, 0x90); /* pause */ \
113     + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
114     + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
115     + /* do_rop: */ \
116     + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
117     + EMIT1(0xC3); /* retq */
118     +#else
119     +# define RETPOLINE_RAX_BPF_JIT_SIZE 2
120     +# define RETPOLINE_RAX_BPF_JIT() \
121     + EMIT2(0xFF, 0xE0); /* jmp *%rax */
122     +#endif
123     +
124     #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
125     diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
126     index 554aa8f24f91..341db0462b85 100644
127     --- a/arch/x86/include/uapi/asm/kvm_para.h
128     +++ b/arch/x86/include/uapi/asm/kvm_para.h
129     @@ -25,6 +25,7 @@
130     #define KVM_FEATURE_STEAL_TIME 5
131     #define KVM_FEATURE_PV_EOI 6
132     #define KVM_FEATURE_PV_UNHALT 7
133     +#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
134    
135     /* The last 8 bits are used to indicate how to interpret the flags field
136     * in pvclock structure. If no bits are set, all flags are ignored.
137     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
138     index a94de09edbed..652bdd867782 100644
139     --- a/arch/x86/kernel/kvm.c
140     +++ b/arch/x86/kernel/kvm.c
141     @@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void)
142     #endif
143     pa |= KVM_ASYNC_PF_ENABLED;
144    
145     - /* Async page fault support for L1 hypervisor is optional */
146     - if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
147     - (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
148     - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
149     + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
150     + pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
151     +
152     + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
153     __this_cpu_write(apf_reason.enabled, 1);
154     printk(KERN_INFO"KVM setup async PF for cpu %d\n",
155     smp_processor_id());
156     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
157     index 13f5d4217e4f..4f544f2a7b06 100644
158     --- a/arch/x86/kvm/cpuid.c
159     +++ b/arch/x86/kvm/cpuid.c
160     @@ -597,7 +597,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
161     (1 << KVM_FEATURE_ASYNC_PF) |
162     (1 << KVM_FEATURE_PV_EOI) |
163     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
164     - (1 << KVM_FEATURE_PV_UNHALT);
165     + (1 << KVM_FEATURE_PV_UNHALT) |
166     + (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
167    
168     if (sched_info_on())
169     entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
170     diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
171     index 0554e8aef4d5..940aac70b4da 100644
172     --- a/arch/x86/net/bpf_jit_comp.c
173     +++ b/arch/x86/net/bpf_jit_comp.c
174     @@ -13,6 +13,7 @@
175     #include <linux/if_vlan.h>
176     #include <asm/cacheflush.h>
177     #include <asm/set_memory.h>
178     +#include <asm/nospec-branch.h>
179     #include <linux/bpf.h>
180    
181     int bpf_jit_enable __read_mostly;
182     @@ -287,7 +288,7 @@ static void emit_bpf_tail_call(u8 **pprog)
183     EMIT2(0x89, 0xD2); /* mov edx, edx */
184     EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
185     offsetof(struct bpf_array, map.max_entries));
186     -#define OFFSET1 43 /* number of bytes to jump */
187     +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
188     EMIT2(X86_JBE, OFFSET1); /* jbe out */
189     label1 = cnt;
190    
191     @@ -296,7 +297,7 @@ static void emit_bpf_tail_call(u8 **pprog)
192     */
193     EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
194     EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
195     -#define OFFSET2 32
196     +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
197     EMIT2(X86_JA, OFFSET2); /* ja out */
198     label2 = cnt;
199     EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
200     @@ -310,7 +311,7 @@ static void emit_bpf_tail_call(u8 **pprog)
201     * goto out;
202     */
203     EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
204     -#define OFFSET3 10
205     +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
206     EMIT2(X86_JE, OFFSET3); /* je out */
207     label3 = cnt;
208    
209     @@ -323,7 +324,7 @@ static void emit_bpf_tail_call(u8 **pprog)
210     * rdi == ctx (1st arg)
211     * rax == prog->bpf_func + prologue_size
212     */
213     - EMIT2(0xFF, 0xE0); /* jmp rax */
214     + RETPOLINE_RAX_BPF_JIT();
215    
216     /* out: */
217     BUILD_BUG_ON(cnt - label1 != OFFSET1);
218     diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
219     index a4ae1ca44a57..f57d0bdf3c9e 100644
220     --- a/kernel/bpf/arraymap.c
221     +++ b/kernel/bpf/arraymap.c
222     @@ -23,8 +23,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
223     {
224     int i;
225    
226     - for (i = 0; i < array->map.max_entries; i++)
227     + for (i = 0; i < array->map.max_entries; i++) {
228     free_percpu(array->pptrs[i]);
229     + cond_resched();
230     + }
231     }
232    
233     static int bpf_array_alloc_percpu(struct bpf_array *array)
234     @@ -40,6 +42,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
235     return -ENOMEM;
236     }
237     array->pptrs[i] = ptr;
238     + cond_resched();
239     }
240    
241     return 0;
242     @@ -49,11 +52,11 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
243     static struct bpf_map *array_map_alloc(union bpf_attr *attr)
244     {
245     bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
246     - int numa_node = bpf_map_attr_numa_node(attr);
247     + int ret, numa_node = bpf_map_attr_numa_node(attr);
248     u32 elem_size, index_mask, max_entries;
249     bool unpriv = !capable(CAP_SYS_ADMIN);
250     + u64 cost, array_size, mask64;
251     struct bpf_array *array;
252     - u64 array_size, mask64;
253    
254     /* check sanity of attributes */
255     if (attr->max_entries == 0 || attr->key_size != 4 ||
256     @@ -97,8 +100,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
257     array_size += (u64) max_entries * elem_size;
258    
259     /* make sure there is no u32 overflow later in round_up() */
260     - if (array_size >= U32_MAX - PAGE_SIZE)
261     + cost = array_size;
262     + if (cost >= U32_MAX - PAGE_SIZE)
263     return ERR_PTR(-ENOMEM);
264     + if (percpu) {
265     + cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
266     + if (cost >= U32_MAX - PAGE_SIZE)
267     + return ERR_PTR(-ENOMEM);
268     + }
269     + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
270     +
271     + ret = bpf_map_precharge_memlock(cost);
272     + if (ret < 0)
273     + return ERR_PTR(ret);
274    
275     /* allocate all map elements and zero-initialize them */
276     array = bpf_map_area_alloc(array_size, numa_node);
277     @@ -114,20 +128,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
278     array->map.max_entries = attr->max_entries;
279     array->map.map_flags = attr->map_flags;
280     array->map.numa_node = numa_node;
281     + array->map.pages = cost;
282     array->elem_size = elem_size;
283    
284     - if (!percpu)
285     - goto out;
286     -
287     - array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
288     -
289     - if (array_size >= U32_MAX - PAGE_SIZE ||
290     - bpf_array_alloc_percpu(array)) {
291     + if (percpu && bpf_array_alloc_percpu(array)) {
292     bpf_map_area_free(array);
293     return ERR_PTR(-ENOMEM);
294     }
295     -out:
296     - array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
297    
298     return &array->map;
299     }
300     diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
301     index 1b767844a76f..c28c584b734e 100644
302     --- a/kernel/bpf/lpm_trie.c
303     +++ b/kernel/bpf/lpm_trie.c
304     @@ -470,7 +470,10 @@ static void trie_free(struct bpf_map *map)
305     struct lpm_trie_node __rcu **slot;
306     struct lpm_trie_node *node;
307    
308     - raw_spin_lock(&trie->lock);
309     + /* Wait for outstanding programs to complete
310     + * update/lookup/delete/get_next_key and free the trie.
311     + */
312     + synchronize_rcu();
313    
314     /* Always start at the root and walk down to a node that has no
315     * children. Then free that node, nullify its reference in the parent
316     @@ -481,10 +484,9 @@ static void trie_free(struct bpf_map *map)
317     slot = &trie->root;
318    
319     for (;;) {
320     - node = rcu_dereference_protected(*slot,
321     - lockdep_is_held(&trie->lock));
322     + node = rcu_dereference_protected(*slot, 1);
323     if (!node)
324     - goto unlock;
325     + goto out;
326    
327     if (rcu_access_pointer(node->child[0])) {
328     slot = &node->child[0];
329     @@ -502,8 +504,8 @@ static void trie_free(struct bpf_map *map)
330     }
331     }
332    
333     -unlock:
334     - raw_spin_unlock(&trie->lock);
335     +out:
336     + kfree(trie);
337     }
338    
339     static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
340     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
341     index b5ae6488b890..3ceb269c0ebd 100644
342     --- a/kernel/bpf/verifier.c
343     +++ b/kernel/bpf/verifier.c
344     @@ -993,6 +993,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
345     return reg->type == PTR_TO_CTX;
346     }
347    
348     +static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
349     +{
350     + const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
351     +
352     + return reg->type == PTR_TO_PACKET;
353     +}
354     +
355     static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
356     int off, int size, bool strict)
357     {
358     @@ -1050,10 +1057,10 @@ static int check_generic_ptr_alignment(const struct bpf_reg_state *reg,
359     }
360    
361     static int check_ptr_alignment(struct bpf_verifier_env *env,
362     - const struct bpf_reg_state *reg,
363     - int off, int size)
364     + const struct bpf_reg_state *reg, int off,
365     + int size, bool strict_alignment_once)
366     {
367     - bool strict = env->strict_alignment;
368     + bool strict = env->strict_alignment || strict_alignment_once;
369     const char *pointer_desc = "";
370    
371     switch (reg->type) {
372     @@ -1109,9 +1116,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
373     * if t==write && value_regno==-1, some unknown value is stored into memory
374     * if t==read && value_regno==-1, don't care what we read from memory
375     */
376     -static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
377     - int bpf_size, enum bpf_access_type t,
378     - int value_regno)
379     +static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
380     + int off, int bpf_size, enum bpf_access_type t,
381     + int value_regno, bool strict_alignment_once)
382     {
383     struct bpf_verifier_state *state = &env->cur_state;
384     struct bpf_reg_state *reg = &state->regs[regno];
385     @@ -1122,7 +1129,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
386     return size;
387    
388     /* alignment checks will add in reg->off themselves */
389     - err = check_ptr_alignment(env, reg, off, size);
390     + err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
391     if (err)
392     return err;
393    
394     @@ -1265,21 +1272,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
395     return -EACCES;
396     }
397    
398     - if (is_ctx_reg(env, insn->dst_reg)) {
399     - verbose("BPF_XADD stores into R%d context is not allowed\n",
400     - insn->dst_reg);
401     + if (is_ctx_reg(env, insn->dst_reg) ||
402     + is_pkt_reg(env, insn->dst_reg)) {
403     + verbose("BPF_XADD stores into R%d %s is not allowed\n",
404     + insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
405     + "context" : "packet");
406     return -EACCES;
407     }
408    
409     /* check whether atomic_add can read the memory */
410     err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
411     - BPF_SIZE(insn->code), BPF_READ, -1);
412     + BPF_SIZE(insn->code), BPF_READ, -1, true);
413     if (err)
414     return err;
415    
416     /* check whether atomic_add can write into the same memory */
417     return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
418     - BPF_SIZE(insn->code), BPF_WRITE, -1);
419     + BPF_SIZE(insn->code), BPF_WRITE, -1, true);
420     }
421    
422     /* Does this register contain a constant zero? */
423     @@ -1735,7 +1744,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
424     * is inferred from register state.
425     */
426     for (i = 0; i < meta.access_size; i++) {
427     - err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
428     + err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
429     + BPF_WRITE, -1, false);
430     if (err)
431     return err;
432     }
433     @@ -3801,7 +3811,7 @@ static int do_check(struct bpf_verifier_env *env)
434     */
435     err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
436     BPF_SIZE(insn->code), BPF_READ,
437     - insn->dst_reg);
438     + insn->dst_reg, false);
439     if (err)
440     return err;
441    
442     @@ -3853,7 +3863,7 @@ static int do_check(struct bpf_verifier_env *env)
443     /* check that memory (dst_reg + off) is writeable */
444     err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
445     BPF_SIZE(insn->code), BPF_WRITE,
446     - insn->src_reg);
447     + insn->src_reg, false);
448     if (err)
449     return err;
450    
451     @@ -3888,7 +3898,7 @@ static int do_check(struct bpf_verifier_env *env)
452     /* check that memory (dst_reg + off) is writeable */
453     err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
454     BPF_SIZE(insn->code), BPF_WRITE,
455     - -1);
456     + -1, false);
457     if (err)
458     return err;
459    
460     diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
461     index c55d265489ca..9167ee976314 100644
462     --- a/tools/testing/selftests/bpf/test_verifier.c
463     +++ b/tools/testing/selftests/bpf/test_verifier.c
464     @@ -2257,6 +2257,32 @@ static struct bpf_test tests[] = {
465     .result_unpriv = REJECT,
466     .result = ACCEPT,
467     },
468     + {
469     + "runtime/jit: pass negative index to tail_call",
470     + .insns = {
471     + BPF_MOV64_IMM(BPF_REG_3, -1),
472     + BPF_LD_MAP_FD(BPF_REG_2, 0),
473     + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
474     + BPF_FUNC_tail_call),
475     + BPF_MOV64_IMM(BPF_REG_0, 0),
476     + BPF_EXIT_INSN(),
477     + },
478     + .fixup_prog = { 1 },
479     + .result = ACCEPT,
480     + },
481     + {
482     + "runtime/jit: pass > 32bit index to tail_call",
483     + .insns = {
484     + BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
485     + BPF_LD_MAP_FD(BPF_REG_2, 0),
486     + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
487     + BPF_FUNC_tail_call),
488     + BPF_MOV64_IMM(BPF_REG_0, 0),
489     + BPF_EXIT_INSN(),
490     + },
491     + .fixup_prog = { 2 },
492     + .result = ACCEPT,
493     + },
494     {
495     "stack pointer arithmetic",
496     .insns = {
497     @@ -7854,6 +7880,64 @@ static struct bpf_test tests[] = {
498     .prog_type = BPF_PROG_TYPE_XDP,
499     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
500     },
501     + {
502     + "xadd/w check unaligned stack",
503     + .insns = {
504     + BPF_MOV64_IMM(BPF_REG_0, 1),
505     + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
506     + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
507     + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
508     + BPF_EXIT_INSN(),
509     + },
510     + .result = REJECT,
511     + .errstr = "misaligned stack access off",
512     + .prog_type = BPF_PROG_TYPE_SCHED_CLS,
513     + },
514     + {
515     + "xadd/w check unaligned map",
516     + .insns = {
517     + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
518     + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
519     + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
520     + BPF_LD_MAP_FD(BPF_REG_1, 0),
521     + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
522     + BPF_FUNC_map_lookup_elem),
523     + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
524     + BPF_EXIT_INSN(),
525     + BPF_MOV64_IMM(BPF_REG_1, 1),
526     + BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
527     + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
528     + BPF_EXIT_INSN(),
529     + },
530     + .fixup_map1 = { 3 },
531     + .result = REJECT,
532     + .errstr = "misaligned value access off",
533     + .prog_type = BPF_PROG_TYPE_SCHED_CLS,
534     + },
535     + {
536     + "xadd/w check unaligned pkt",
537     + .insns = {
538     + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
539     + offsetof(struct xdp_md, data)),
540     + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
541     + offsetof(struct xdp_md, data_end)),
542     + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
543     + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
544     + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
545     + BPF_MOV64_IMM(BPF_REG_0, 99),
546     + BPF_JMP_IMM(BPF_JA, 0, 0, 6),
547     + BPF_MOV64_IMM(BPF_REG_0, 1),
548     + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
549     + BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
550     + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
551     + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
552     + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
553     + BPF_EXIT_INSN(),
554     + },
555     + .result = REJECT,
556     + .errstr = "BPF_XADD stores into R2 packet",
557     + .prog_type = BPF_PROG_TYPE_XDP,
558     + },
559     };
560    
561     static int probe_filter_length(const struct bpf_insn *fp)