Contents of /trunk/kernel-magellan/patches-4.11/0100-4.11.1-all-fixes.patch
Parent Directory | Revision Log
Revision 2920 -
(show annotations)
(download)
Fri May 19 09:22:52 2017 UTC (7 years, 4 months ago) by niro
File size: 54861 byte(s)
Fri May 19 09:22:52 2017 UTC (7 years, 4 months ago) by niro
File size: 54861 byte(s)
-added 4.11.1 patch
1 | diff --git a/Makefile b/Makefile |
2 | index 4b074a904106..9dc2aec1c2e5 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 11 |
8 | -SUBLEVEL = 0 |
9 | +SUBLEVEL = 1 |
10 | EXTRAVERSION = |
11 | NAME = Fearless Coyote |
12 | |
13 | diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h |
14 | index 220ba207be91..36ec9c8f6e16 100644 |
15 | --- a/arch/arm/include/asm/device.h |
16 | +++ b/arch/arm/include/asm/device.h |
17 | @@ -16,6 +16,9 @@ struct dev_archdata { |
18 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
19 | struct dma_iommu_mapping *mapping; |
20 | #endif |
21 | +#ifdef CONFIG_XEN |
22 | + const struct dma_map_ops *dev_dma_ops; |
23 | +#endif |
24 | bool dma_coherent; |
25 | }; |
26 | |
27 | diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h |
28 | index 716656925975..680d3f3889e7 100644 |
29 | --- a/arch/arm/include/asm/dma-mapping.h |
30 | +++ b/arch/arm/include/asm/dma-mapping.h |
31 | @@ -16,19 +16,9 @@ |
32 | extern const struct dma_map_ops arm_dma_ops; |
33 | extern const struct dma_map_ops arm_coherent_dma_ops; |
34 | |
35 | -static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) |
36 | -{ |
37 | - if (dev && dev->dma_ops) |
38 | - return dev->dma_ops; |
39 | - return &arm_dma_ops; |
40 | -} |
41 | - |
42 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
43 | { |
44 | - if (xen_initial_domain()) |
45 | - return xen_dma_ops; |
46 | - else |
47 | - return __generic_dma_ops(NULL); |
48 | + return &arm_dma_ops; |
49 | } |
50 | |
51 | #define HAVE_ARCH_DMA_SUPPORTED 1 |
52 | diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c |
53 | index 475811f5383a..0268584f1fa0 100644 |
54 | --- a/arch/arm/mm/dma-mapping.c |
55 | +++ b/arch/arm/mm/dma-mapping.c |
56 | @@ -2414,6 +2414,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
57 | dma_ops = arm_get_dma_map_ops(coherent); |
58 | |
59 | set_dma_ops(dev, dma_ops); |
60 | + |
61 | +#ifdef CONFIG_XEN |
62 | + if (xen_initial_domain()) { |
63 | + dev->archdata.dev_dma_ops = dev->dma_ops; |
64 | + dev->dma_ops = xen_dma_ops; |
65 | + } |
66 | +#endif |
67 | } |
68 | |
69 | void arch_teardown_dma_ops(struct device *dev) |
70 | diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h |
71 | index 73d5bab015eb..5a5fa47a6b18 100644 |
72 | --- a/arch/arm64/include/asm/device.h |
73 | +++ b/arch/arm64/include/asm/device.h |
74 | @@ -20,6 +20,9 @@ struct dev_archdata { |
75 | #ifdef CONFIG_IOMMU_API |
76 | void *iommu; /* private IOMMU data */ |
77 | #endif |
78 | +#ifdef CONFIG_XEN |
79 | + const struct dma_map_ops *dev_dma_ops; |
80 | +#endif |
81 | bool dma_coherent; |
82 | }; |
83 | |
84 | diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h |
85 | index 505756cdc67a..5392dbeffa45 100644 |
86 | --- a/arch/arm64/include/asm/dma-mapping.h |
87 | +++ b/arch/arm64/include/asm/dma-mapping.h |
88 | @@ -27,11 +27,8 @@ |
89 | #define DMA_ERROR_CODE (~(dma_addr_t)0) |
90 | extern const struct dma_map_ops dummy_dma_ops; |
91 | |
92 | -static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) |
93 | +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
94 | { |
95 | - if (dev && dev->dma_ops) |
96 | - return dev->dma_ops; |
97 | - |
98 | /* |
99 | * We expect no ISA devices, and all other DMA masters are expected to |
100 | * have someone call arch_setup_dma_ops at device creation time. |
101 | @@ -39,14 +36,6 @@ static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) |
102 | return &dummy_dma_ops; |
103 | } |
104 | |
105 | -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
106 | -{ |
107 | - if (xen_initial_domain()) |
108 | - return xen_dma_ops; |
109 | - else |
110 | - return __generic_dma_ops(NULL); |
111 | -} |
112 | - |
113 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
114 | const struct iommu_ops *iommu, bool coherent); |
115 | #define arch_setup_dma_ops arch_setup_dma_ops |
116 | diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c |
117 | index 81cdb2e844ed..7f8b37e85a2b 100644 |
118 | --- a/arch/arm64/mm/dma-mapping.c |
119 | +++ b/arch/arm64/mm/dma-mapping.c |
120 | @@ -977,4 +977,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
121 | |
122 | dev->archdata.dma_coherent = coherent; |
123 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); |
124 | + |
125 | +#ifdef CONFIG_XEN |
126 | + if (xen_initial_domain()) { |
127 | + dev->archdata.dev_dma_ops = dev->dma_ops; |
128 | + dev->dma_ops = xen_dma_ops; |
129 | + } |
130 | +#endif |
131 | } |
132 | diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c |
133 | index a785554916c0..ce8ab0409deb 100644 |
134 | --- a/arch/arm64/net/bpf_jit_comp.c |
135 | +++ b/arch/arm64/net/bpf_jit_comp.c |
136 | @@ -779,14 +779,14 @@ static int build_body(struct jit_ctx *ctx) |
137 | int ret; |
138 | |
139 | ret = build_insn(insn, ctx); |
140 | - |
141 | - if (ctx->image == NULL) |
142 | - ctx->offset[i] = ctx->idx; |
143 | - |
144 | if (ret > 0) { |
145 | i++; |
146 | + if (ctx->image == NULL) |
147 | + ctx->offset[i] = ctx->idx; |
148 | continue; |
149 | } |
150 | + if (ctx->image == NULL) |
151 | + ctx->offset[i] = ctx->idx; |
152 | if (ret) |
153 | return ret; |
154 | } |
155 | diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S |
156 | index 44101196d02b..41a407328667 100644 |
157 | --- a/arch/sparc/kernel/head_64.S |
158 | +++ b/arch/sparc/kernel/head_64.S |
159 | @@ -939,3 +939,9 @@ ENTRY(__retl_o1) |
160 | retl |
161 | mov %o1, %o0 |
162 | ENDPROC(__retl_o1) |
163 | + |
164 | +ENTRY(__retl_o1_asi) |
165 | + wr %o5, 0x0, %asi |
166 | + retl |
167 | + mov %o1, %o0 |
168 | +ENDPROC(__retl_o1_asi) |
169 | diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S |
170 | index 8e7a843ddd88..2fbf6297d57c 100644 |
171 | --- a/arch/sparc/lib/GENbzero.S |
172 | +++ b/arch/sparc/lib/GENbzero.S |
173 | @@ -8,7 +8,7 @@ |
174 | 98: x,y; \ |
175 | .section __ex_table,"a";\ |
176 | .align 4; \ |
177 | - .word 98b, __retl_o1; \ |
178 | + .word 98b, __retl_o1_asi;\ |
179 | .text; \ |
180 | .align 4; |
181 | |
182 | diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S |
183 | index beab29bf419b..33053bdf3766 100644 |
184 | --- a/arch/sparc/lib/NGbzero.S |
185 | +++ b/arch/sparc/lib/NGbzero.S |
186 | @@ -8,7 +8,7 @@ |
187 | 98: x,y; \ |
188 | .section __ex_table,"a";\ |
189 | .align 4; \ |
190 | - .word 98b, __retl_o1; \ |
191 | + .word 98b, __retl_o1_asi;\ |
192 | .text; \ |
193 | .align 4; |
194 | |
195 | diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h |
196 | index 608a79d5a466..e6911caf5bbf 100644 |
197 | --- a/arch/x86/include/asm/xen/events.h |
198 | +++ b/arch/x86/include/asm/xen/events.h |
199 | @@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) |
200 | /* No need for a barrier -- XCHG is a barrier on x86. */ |
201 | #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) |
202 | |
203 | +extern int xen_have_vector_callback; |
204 | + |
205 | +/* |
206 | + * Events delivered via platform PCI interrupts are always |
207 | + * routed to vcpu 0 and hence cannot be rebound. |
208 | + */ |
209 | +static inline bool xen_support_evtchn_rebind(void) |
210 | +{ |
211 | + return (!xen_hvm_domain() || xen_have_vector_callback); |
212 | +} |
213 | + |
214 | #endif /* _ASM_X86_XEN_EVENTS_H */ |
215 | diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c |
216 | index 292ab0364a89..c4b3646bd04c 100644 |
217 | --- a/arch/x86/pci/xen.c |
218 | +++ b/arch/x86/pci/xen.c |
219 | @@ -447,7 +447,7 @@ void __init xen_msi_init(void) |
220 | |
221 | int __init pci_xen_hvm_init(void) |
222 | { |
223 | - if (!xen_feature(XENFEAT_hvm_pirqs)) |
224 | + if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) |
225 | return 0; |
226 | |
227 | #ifdef CONFIG_ACPI |
228 | diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c |
229 | index ec1d5c46e58f..29b239025b57 100644 |
230 | --- a/arch/x86/xen/enlighten.c |
231 | +++ b/arch/x86/xen/enlighten.c |
232 | @@ -138,6 +138,8 @@ struct shared_info xen_dummy_shared_info; |
233 | void *xen_initial_gdt; |
234 | |
235 | RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); |
236 | +__read_mostly int xen_have_vector_callback; |
237 | +EXPORT_SYMBOL_GPL(xen_have_vector_callback); |
238 | |
239 | static int xen_cpu_up_prepare(unsigned int cpu); |
240 | static int xen_cpu_up_online(unsigned int cpu); |
241 | @@ -1861,7 +1863,9 @@ static int xen_cpu_up_prepare(unsigned int cpu) |
242 | xen_vcpu_setup(cpu); |
243 | } |
244 | |
245 | - if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) |
246 | + if (xen_pv_domain() || |
247 | + (xen_have_vector_callback && |
248 | + xen_feature(XENFEAT_hvm_safe_pvclock))) |
249 | xen_setup_timer(cpu); |
250 | |
251 | rc = xen_smp_intr_init(cpu); |
252 | @@ -1877,7 +1881,9 @@ static int xen_cpu_dead(unsigned int cpu) |
253 | { |
254 | xen_smp_intr_free(cpu); |
255 | |
256 | - if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) |
257 | + if (xen_pv_domain() || |
258 | + (xen_have_vector_callback && |
259 | + xen_feature(XENFEAT_hvm_safe_pvclock))) |
260 | xen_teardown_timer(cpu); |
261 | |
262 | return 0; |
263 | @@ -1916,8 +1922,8 @@ static void __init xen_hvm_guest_init(void) |
264 | |
265 | xen_panic_handler_init(); |
266 | |
267 | - BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector)); |
268 | - |
269 | + if (xen_feature(XENFEAT_hvm_callback_vector)) |
270 | + xen_have_vector_callback = 1; |
271 | xen_hvm_smp_init(); |
272 | WARN_ON(xen_cpuhp_setup()); |
273 | xen_unplug_emulated_devices(); |
274 | @@ -1958,7 +1964,7 @@ bool xen_hvm_need_lapic(void) |
275 | return false; |
276 | if (!xen_hvm_domain()) |
277 | return false; |
278 | - if (xen_feature(XENFEAT_hvm_pirqs)) |
279 | + if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) |
280 | return false; |
281 | return true; |
282 | } |
283 | diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c |
284 | index 7ff2f1bfb7ec..4e6b65baf8e2 100644 |
285 | --- a/arch/x86/xen/smp.c |
286 | +++ b/arch/x86/xen/smp.c |
287 | @@ -742,6 +742,8 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) |
288 | |
289 | void __init xen_hvm_smp_init(void) |
290 | { |
291 | + if (!xen_have_vector_callback) |
292 | + return; |
293 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
294 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
295 | smp_ops.cpu_die = xen_cpu_die; |
296 | diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c |
297 | index 1e69956d7852..4535627cf532 100644 |
298 | --- a/arch/x86/xen/time.c |
299 | +++ b/arch/x86/xen/time.c |
300 | @@ -432,6 +432,11 @@ static void xen_hvm_setup_cpu_clockevents(void) |
301 | |
302 | void __init xen_hvm_init_time_ops(void) |
303 | { |
304 | + /* vector callback is needed otherwise we cannot receive interrupts |
305 | + * on cpu > 0 and at this point we don't know how many cpus are |
306 | + * available */ |
307 | + if (!xen_have_vector_callback) |
308 | + return; |
309 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { |
310 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," |
311 | "disable pv timer\n"); |
312 | diff --git a/block/blk-integrity.c b/block/blk-integrity.c |
313 | index 9f0ff5ba4f84..35c5af1ea068 100644 |
314 | --- a/block/blk-integrity.c |
315 | +++ b/block/blk-integrity.c |
316 | @@ -417,7 +417,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template |
317 | bi->tuple_size = template->tuple_size; |
318 | bi->tag_size = template->tag_size; |
319 | |
320 | - blk_integrity_revalidate(disk); |
321 | + disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
322 | } |
323 | EXPORT_SYMBOL(blk_integrity_register); |
324 | |
325 | @@ -430,26 +430,11 @@ EXPORT_SYMBOL(blk_integrity_register); |
326 | */ |
327 | void blk_integrity_unregister(struct gendisk *disk) |
328 | { |
329 | - blk_integrity_revalidate(disk); |
330 | + disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; |
331 | memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); |
332 | } |
333 | EXPORT_SYMBOL(blk_integrity_unregister); |
334 | |
335 | -void blk_integrity_revalidate(struct gendisk *disk) |
336 | -{ |
337 | - struct blk_integrity *bi = &disk->queue->integrity; |
338 | - |
339 | - if (!(disk->flags & GENHD_FL_UP)) |
340 | - return; |
341 | - |
342 | - if (bi->profile) |
343 | - disk->queue->backing_dev_info->capabilities |= |
344 | - BDI_CAP_STABLE_WRITES; |
345 | - else |
346 | - disk->queue->backing_dev_info->capabilities &= |
347 | - ~BDI_CAP_STABLE_WRITES; |
348 | -} |
349 | - |
350 | void blk_integrity_add(struct gendisk *disk) |
351 | { |
352 | if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype, |
353 | diff --git a/block/partition-generic.c b/block/partition-generic.c |
354 | index 7afb9907821f..0171a2faad68 100644 |
355 | --- a/block/partition-generic.c |
356 | +++ b/block/partition-generic.c |
357 | @@ -497,7 +497,6 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) |
358 | |
359 | if (disk->fops->revalidate_disk) |
360 | disk->fops->revalidate_disk(disk); |
361 | - blk_integrity_revalidate(disk); |
362 | check_disk_size_change(disk, bdev); |
363 | bdev->bd_invalidated = 0; |
364 | if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) |
365 | diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c |
366 | index 86279f5022c2..88f16cdf6a4b 100644 |
367 | --- a/drivers/gpu/drm/sti/sti_gdp.c |
368 | +++ b/drivers/gpu/drm/sti/sti_gdp.c |
369 | @@ -66,7 +66,9 @@ static struct gdp_format_to_str { |
370 | #define GAM_GDP_ALPHARANGE_255 BIT(5) |
371 | #define GAM_GDP_AGC_FULL_RANGE 0x00808080 |
372 | #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) |
373 | -#define GAM_GDP_SIZE_MAX 0x7FF |
374 | + |
375 | +#define GAM_GDP_SIZE_MAX_WIDTH 3840 |
376 | +#define GAM_GDP_SIZE_MAX_HEIGHT 2160 |
377 | |
378 | #define GDP_NODE_NB_BANK 2 |
379 | #define GDP_NODE_PER_FIELD 2 |
380 | @@ -632,8 +634,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, |
381 | /* src_x are in 16.16 format */ |
382 | src_x = state->src_x >> 16; |
383 | src_y = state->src_y >> 16; |
384 | - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); |
385 | - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); |
386 | + src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); |
387 | + src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); |
388 | |
389 | format = sti_gdp_fourcc2format(fb->format->format); |
390 | if (format == -1) { |
391 | @@ -741,8 +743,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, |
392 | /* src_x are in 16.16 format */ |
393 | src_x = state->src_x >> 16; |
394 | src_y = state->src_y >> 16; |
395 | - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); |
396 | - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); |
397 | + src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); |
398 | + src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); |
399 | |
400 | list = sti_gdp_get_free_nodes(gdp); |
401 | top_field = list->top_field; |
402 | diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c |
403 | index 4da6fc6b1ffd..3d040f52539c 100644 |
404 | --- a/drivers/md/dm-ioctl.c |
405 | +++ b/drivers/md/dm-ioctl.c |
406 | @@ -1848,7 +1848,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user) |
407 | if (r) |
408 | goto out; |
409 | |
410 | - param->data_size = sizeof(*param); |
411 | + param->data_size = offsetof(struct dm_ioctl, data); |
412 | r = fn(param, input_param_size); |
413 | |
414 | if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && |
415 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
416 | index 1f1e54ba0ecb..2c02a4cebc24 100644 |
417 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
418 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
419 | @@ -3000,7 +3000,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) |
420 | INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); |
421 | |
422 | bp->ntp_fltr_count = 0; |
423 | - bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), |
424 | + bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), |
425 | + sizeof(long), |
426 | GFP_KERNEL); |
427 | |
428 | if (!bp->ntp_fltr_bmap) |
429 | diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c |
430 | index 30606b11b128..377fb0f22a5b 100644 |
431 | --- a/drivers/net/ethernet/cadence/macb.c |
432 | +++ b/drivers/net/ethernet/cadence/macb.c |
433 | @@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev) |
434 | } |
435 | |
436 | pdata = dev_get_platdata(&bp->pdev->dev); |
437 | - if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { |
438 | - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, |
439 | - "phy int"); |
440 | - if (!ret) { |
441 | - phy_irq = gpio_to_irq(pdata->phy_irq_pin); |
442 | - phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; |
443 | + if (pdata) { |
444 | + if (gpio_is_valid(pdata->phy_irq_pin)) { |
445 | + ret = devm_gpio_request(&bp->pdev->dev, |
446 | + pdata->phy_irq_pin, "phy int"); |
447 | + if (!ret) { |
448 | + phy_irq = gpio_to_irq(pdata->phy_irq_pin); |
449 | + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; |
450 | + } |
451 | + } else { |
452 | + phydev->irq = PHY_POLL; |
453 | } |
454 | - } else { |
455 | - phydev->irq = PHY_POLL; |
456 | } |
457 | |
458 | /* attach the mac to the phy */ |
459 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
460 | index 7074b40ebd7f..dec5d563ab19 100644 |
461 | --- a/drivers/net/geneve.c |
462 | +++ b/drivers/net/geneve.c |
463 | @@ -1244,7 +1244,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, |
464 | metadata = true; |
465 | |
466 | if (data[IFLA_GENEVE_UDP_CSUM] && |
467 | - !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) |
468 | + nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) |
469 | info.key.tun_flags |= TUNNEL_CSUM; |
470 | |
471 | if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && |
472 | diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c |
473 | index 0a0412524cec..0a5f62e0efcc 100644 |
474 | --- a/drivers/net/phy/mdio-mux-bcm-iproc.c |
475 | +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c |
476 | @@ -203,11 +203,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) |
477 | &md->mux_handle, md, md->mii_bus); |
478 | if (rc) { |
479 | dev_info(md->dev, "mdiomux initialization failed\n"); |
480 | - goto out; |
481 | + goto out_register; |
482 | } |
483 | |
484 | dev_info(md->dev, "iProc mdiomux registered\n"); |
485 | return 0; |
486 | + |
487 | +out_register: |
488 | + mdiobus_unregister(bus); |
489 | out: |
490 | mdiobus_free(bus); |
491 | return rc; |
492 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
493 | index 2474618404f5..4e34568db64f 100644 |
494 | --- a/drivers/net/usb/qmi_wwan.c |
495 | +++ b/drivers/net/usb/qmi_wwan.c |
496 | @@ -907,6 +907,7 @@ static const struct usb_device_id products[] = { |
497 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
498 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
499 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ |
500 | + {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
501 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
502 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
503 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ |
504 | diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c |
505 | index 60da86a8d95b..f6b17fb58877 100644 |
506 | --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c |
507 | +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c |
508 | @@ -198,7 +198,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, |
509 | int ret; |
510 | struct brcmf_if *ifp = netdev_priv(ndev); |
511 | struct brcmf_pub *drvr = ifp->drvr; |
512 | - struct ethhdr *eh = (struct ethhdr *)(skb->data); |
513 | + struct ethhdr *eh; |
514 | |
515 | brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); |
516 | |
517 | @@ -211,22 +211,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, |
518 | goto done; |
519 | } |
520 | |
521 | - /* Make sure there's enough room for any header */ |
522 | - if (skb_headroom(skb) < drvr->hdrlen) { |
523 | - struct sk_buff *skb2; |
524 | - |
525 | - brcmf_dbg(INFO, "%s: insufficient headroom\n", |
526 | + /* Make sure there's enough writable headroom*/ |
527 | + ret = skb_cow_head(skb, drvr->hdrlen); |
528 | + if (ret < 0) { |
529 | + brcmf_err("%s: skb_cow_head failed\n", |
530 | brcmf_ifname(ifp)); |
531 | - drvr->bus_if->tx_realloc++; |
532 | - skb2 = skb_realloc_headroom(skb, drvr->hdrlen); |
533 | dev_kfree_skb(skb); |
534 | - skb = skb2; |
535 | - if (skb == NULL) { |
536 | - brcmf_err("%s: skb_realloc_headroom failed\n", |
537 | - brcmf_ifname(ifp)); |
538 | - ret = -ENOMEM; |
539 | - goto done; |
540 | - } |
541 | + goto done; |
542 | } |
543 | |
544 | /* validate length for ether packet */ |
545 | @@ -236,6 +227,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, |
546 | goto done; |
547 | } |
548 | |
549 | + eh = (struct ethhdr *)(skb->data); |
550 | + |
551 | if (eh->h_proto == htons(ETH_P_PAE)) |
552 | atomic_inc(&ifp->pend_8021x_cnt); |
553 | |
554 | diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c |
555 | index 509e2b341bd6..677f7c40b25a 100644 |
556 | --- a/drivers/power/supply/lp8788-charger.c |
557 | +++ b/drivers/power/supply/lp8788-charger.c |
558 | @@ -651,7 +651,7 @@ static ssize_t lp8788_show_eoc_time(struct device *dev, |
559 | { |
560 | struct lp8788_charger *pchg = dev_get_drvdata(dev); |
561 | char *stime[] = { "400ms", "5min", "10min", "15min", |
562 | - "20min", "25min", "30min" "No timeout" }; |
563 | + "20min", "25min", "30min", "No timeout" }; |
564 | u8 val; |
565 | |
566 | lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val); |
567 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
568 | index 6a53577772c9..42807ce11c42 100644 |
569 | --- a/drivers/xen/events/events_base.c |
570 | +++ b/drivers/xen/events/events_base.c |
571 | @@ -1312,6 +1312,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
572 | if (!VALID_EVTCHN(evtchn)) |
573 | return -1; |
574 | |
575 | + if (!xen_support_evtchn_rebind()) |
576 | + return -1; |
577 | + |
578 | /* Send future instances of this interrupt to other vcpu. */ |
579 | bind_vcpu.port = evtchn; |
580 | bind_vcpu.vcpu = xen_vcpu_nr(tcpu); |
581 | @@ -1645,15 +1648,20 @@ void xen_callback_vector(void) |
582 | { |
583 | int rc; |
584 | uint64_t callback_via; |
585 | - |
586 | - callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); |
587 | - rc = xen_set_callback_via(callback_via); |
588 | - BUG_ON(rc); |
589 | - pr_info("Xen HVM callback vector for event delivery is enabled\n"); |
590 | - /* in the restore case the vector has already been allocated */ |
591 | - if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) |
592 | - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
593 | - xen_hvm_callback_vector); |
594 | + if (xen_have_vector_callback) { |
595 | + callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); |
596 | + rc = xen_set_callback_via(callback_via); |
597 | + if (rc) { |
598 | + pr_err("Request for Xen HVM callback vector failed\n"); |
599 | + xen_have_vector_callback = 0; |
600 | + return; |
601 | + } |
602 | + pr_info("Xen HVM callback vector for event delivery is enabled\n"); |
603 | + /* in the restore case the vector has already been allocated */ |
604 | + if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) |
605 | + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
606 | + xen_hvm_callback_vector); |
607 | + } |
608 | } |
609 | #else |
610 | void xen_callback_vector(void) {} |
611 | diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c |
612 | index 2a165cc8a43c..1c4deac9b0f8 100644 |
613 | --- a/drivers/xen/platform-pci.c |
614 | +++ b/drivers/xen/platform-pci.c |
615 | @@ -67,7 +67,7 @@ static uint64_t get_callback_via(struct pci_dev *pdev) |
616 | pin = pdev->pin; |
617 | |
618 | /* We don't know the GSI. Specify the PCI INTx line instead. */ |
619 | - return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */ |
620 | + return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */ |
621 | ((uint64_t)pci_domain_nr(pdev->bus) << 32) | |
622 | ((uint64_t)pdev->bus->number << 16) | |
623 | ((uint64_t)(pdev->devfn & 0xff) << 8) | |
624 | @@ -90,7 +90,7 @@ static int xen_allocate_irq(struct pci_dev *pdev) |
625 | static int platform_pci_resume(struct pci_dev *pdev) |
626 | { |
627 | int err; |
628 | - if (!xen_pv_domain()) |
629 | + if (xen_have_vector_callback) |
630 | return 0; |
631 | err = xen_set_callback_via(callback_via); |
632 | if (err) { |
633 | @@ -138,14 +138,7 @@ static int platform_pci_probe(struct pci_dev *pdev, |
634 | platform_mmio = mmio_addr; |
635 | platform_mmiolen = mmio_len; |
636 | |
637 | - /* |
638 | - * Xen HVM guests always use the vector callback mechanism. |
639 | - * L1 Dom0 in a nested Xen environment is a PV guest inside in an |
640 | - * HVM environment. It needs the platform-pci driver to get |
641 | - * notifications from L0 Xen, but it cannot use the vector callback |
642 | - * as it is not exported by L1 Xen. |
643 | - */ |
644 | - if (xen_pv_domain()) { |
645 | + if (!xen_have_vector_callback) { |
646 | ret = xen_allocate_irq(pdev); |
647 | if (ret) { |
648 | dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); |
649 | diff --git a/fs/block_dev.c b/fs/block_dev.c |
650 | index 2eca00ec4370..56039dfbc674 100644 |
651 | --- a/fs/block_dev.c |
652 | +++ b/fs/block_dev.c |
653 | @@ -1451,7 +1451,6 @@ int revalidate_disk(struct gendisk *disk) |
654 | |
655 | if (disk->fops->revalidate_disk) |
656 | ret = disk->fops->revalidate_disk(disk); |
657 | - blk_integrity_revalidate(disk); |
658 | bdev = bdget_disk(disk, 0); |
659 | if (!bdev) |
660 | return ret; |
661 | diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c |
662 | index 96fe8ed73100..858aef564a58 100644 |
663 | --- a/fs/f2fs/super.c |
664 | +++ b/fs/f2fs/super.c |
665 | @@ -1483,6 +1483,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, |
666 | return 1; |
667 | } |
668 | |
669 | + if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) { |
670 | + f2fs_msg(sb, KERN_INFO, |
671 | + "Invalid segment count (%u)", |
672 | + le32_to_cpu(raw_super->segment_count)); |
673 | + return 1; |
674 | + } |
675 | + |
676 | /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ |
677 | if (sanity_check_area_boundary(sbi, bh)) |
678 | return 1; |
679 | diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h |
680 | index e2d239ed4c60..661200e6d281 100644 |
681 | --- a/include/linux/f2fs_fs.h |
682 | +++ b/include/linux/f2fs_fs.h |
683 | @@ -302,6 +302,12 @@ struct f2fs_nat_block { |
684 | #define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) |
685 | |
686 | /* |
687 | + * F2FS uses 4 bytes to represent block address. As a result, supported size of |
688 | + * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. |
689 | + */ |
690 | +#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) |
691 | + |
692 | +/* |
693 | * Note that f2fs_sit_entry->vblocks has the following bit-field information. |
694 | * [15:10] : allocation type such as CURSEG_XXXX_TYPE |
695 | * [9:0] : valid block count |
696 | diff --git a/include/linux/genhd.h b/include/linux/genhd.h |
697 | index 76f39754e7b0..76d6a1cd4153 100644 |
698 | --- a/include/linux/genhd.h |
699 | +++ b/include/linux/genhd.h |
700 | @@ -722,11 +722,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) |
701 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
702 | extern void blk_integrity_add(struct gendisk *); |
703 | extern void blk_integrity_del(struct gendisk *); |
704 | -extern void blk_integrity_revalidate(struct gendisk *); |
705 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
706 | static inline void blk_integrity_add(struct gendisk *disk) { } |
707 | static inline void blk_integrity_del(struct gendisk *disk) { } |
708 | -static inline void blk_integrity_revalidate(struct gendisk *disk) { } |
709 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
710 | |
711 | #else /* CONFIG_BLOCK */ |
712 | diff --git a/include/net/addrconf.h b/include/net/addrconf.h |
713 | index 17c6fd84e287..4d93c5ec9b12 100644 |
714 | --- a/include/net/addrconf.h |
715 | +++ b/include/net/addrconf.h |
716 | @@ -20,6 +20,8 @@ |
717 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) |
718 | #define ADDRCONF_TIMER_FUZZ_MAX (HZ) |
719 | |
720 | +#define ADDRCONF_NOTIFY_PRIORITY 0 |
721 | + |
722 | #include <linux/in.h> |
723 | #include <linux/in6.h> |
724 | |
725 | diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h |
726 | index 9dc2c182a263..f5e625f53367 100644 |
727 | --- a/include/net/ip6_route.h |
728 | +++ b/include/net/ip6_route.h |
729 | @@ -84,6 +84,7 @@ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, |
730 | struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, |
731 | int ifindex, struct flowi6 *fl6, int flags); |
732 | |
733 | +void ip6_route_init_special_entries(void); |
734 | int ip6_route_init(void); |
735 | void ip6_route_cleanup(void); |
736 | |
737 | diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h |
738 | index 0caee631a836..b94006f6fbdd 100644 |
739 | --- a/include/net/secure_seq.h |
740 | +++ b/include/net/secure_seq.h |
741 | @@ -6,10 +6,12 @@ |
742 | u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); |
743 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
744 | __be16 dport); |
745 | -u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
746 | - __be16 sport, __be16 dport, u32 *tsoff); |
747 | -u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, |
748 | - __be16 sport, __be16 dport, u32 *tsoff); |
749 | +u32 secure_tcp_seq(__be32 saddr, __be32 daddr, |
750 | + __be16 sport, __be16 dport); |
751 | +u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr); |
752 | +u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, |
753 | + __be16 sport, __be16 dport); |
754 | +u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr); |
755 | u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, |
756 | __be16 sport, __be16 dport); |
757 | u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
758 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
759 | index 6ec4ea652f3f..6423b4698880 100644 |
760 | --- a/include/net/tcp.h |
761 | +++ b/include/net/tcp.h |
762 | @@ -471,7 +471,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); |
763 | /* From syncookies.c */ |
764 | struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, |
765 | struct request_sock *req, |
766 | - struct dst_entry *dst); |
767 | + struct dst_entry *dst, u32 tsoff); |
768 | int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, |
769 | u32 cookie); |
770 | struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); |
771 | @@ -1816,7 +1816,8 @@ struct tcp_request_sock_ops { |
772 | struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, |
773 | const struct request_sock *req, |
774 | bool *strict); |
775 | - __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff); |
776 | + u32 (*init_seq)(const struct sk_buff *skb); |
777 | + u32 (*init_ts_off)(const struct sk_buff *skb); |
778 | int (*send_synack)(const struct sock *sk, struct dst_entry *dst, |
779 | struct flowi *fl, struct request_sock *req, |
780 | struct tcp_fastopen_cookie *foc, |
781 | diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h |
782 | index 95ce6ac3a971..b0a2bfc8d647 100644 |
783 | --- a/include/xen/arm/page-coherent.h |
784 | +++ b/include/xen/arm/page-coherent.h |
785 | @@ -2,8 +2,16 @@ |
786 | #define _ASM_ARM_XEN_PAGE_COHERENT_H |
787 | |
788 | #include <asm/page.h> |
789 | +#include <asm/dma-mapping.h> |
790 | #include <linux/dma-mapping.h> |
791 | |
792 | +static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) |
793 | +{ |
794 | + if (dev && dev->archdata.dev_dma_ops) |
795 | + return dev->archdata.dev_dma_ops; |
796 | + return get_arch_dma_ops(NULL); |
797 | +} |
798 | + |
799 | void __xen_dma_map_page(struct device *hwdev, struct page *page, |
800 | dma_addr_t dev_addr, unsigned long offset, size_t size, |
801 | enum dma_data_direction dir, unsigned long attrs); |
802 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
803 | index a834068a400e..6fd78d4c4164 100644 |
804 | --- a/kernel/bpf/verifier.c |
805 | +++ b/kernel/bpf/verifier.c |
806 | @@ -296,7 +296,8 @@ static const char *const bpf_jmp_string[16] = { |
807 | [BPF_EXIT >> 4] = "exit", |
808 | }; |
809 | |
810 | -static void print_bpf_insn(struct bpf_insn *insn) |
811 | +static void print_bpf_insn(const struct bpf_verifier_env *env, |
812 | + const struct bpf_insn *insn) |
813 | { |
814 | u8 class = BPF_CLASS(insn->code); |
815 | |
816 | @@ -360,9 +361,19 @@ static void print_bpf_insn(struct bpf_insn *insn) |
817 | insn->code, |
818 | bpf_ldst_string[BPF_SIZE(insn->code) >> 3], |
819 | insn->src_reg, insn->imm); |
820 | - } else if (BPF_MODE(insn->code) == BPF_IMM) { |
821 | - verbose("(%02x) r%d = 0x%x\n", |
822 | - insn->code, insn->dst_reg, insn->imm); |
823 | + } else if (BPF_MODE(insn->code) == BPF_IMM && |
824 | + BPF_SIZE(insn->code) == BPF_DW) { |
825 | + /* At this point, we already made sure that the second |
826 | + * part of the ldimm64 insn is accessible. |
827 | + */ |
828 | + u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; |
829 | + bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; |
830 | + |
831 | + if (map_ptr && !env->allow_ptr_leaks) |
832 | + imm = 0; |
833 | + |
834 | + verbose("(%02x) r%d = 0x%llx\n", insn->code, |
835 | + insn->dst_reg, (unsigned long long)imm); |
836 | } else { |
837 | verbose("BUG_ld_%02x\n", insn->code); |
838 | return; |
839 | @@ -1911,6 +1922,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
840 | return 0; |
841 | } else if (opcode == BPF_ADD && |
842 | BPF_CLASS(insn->code) == BPF_ALU64 && |
843 | + dst_reg->type == PTR_TO_STACK && |
844 | + ((BPF_SRC(insn->code) == BPF_X && |
845 | + regs[insn->src_reg].type == CONST_IMM) || |
846 | + BPF_SRC(insn->code) == BPF_K)) { |
847 | + if (BPF_SRC(insn->code) == BPF_X) |
848 | + dst_reg->imm += regs[insn->src_reg].imm; |
849 | + else |
850 | + dst_reg->imm += insn->imm; |
851 | + return 0; |
852 | + } else if (opcode == BPF_ADD && |
853 | + BPF_CLASS(insn->code) == BPF_ALU64 && |
854 | (dst_reg->type == PTR_TO_PACKET || |
855 | (BPF_SRC(insn->code) == BPF_X && |
856 | regs[insn->src_reg].type == PTR_TO_PACKET))) { |
857 | @@ -2824,7 +2846,7 @@ static int do_check(struct bpf_verifier_env *env) |
858 | |
859 | if (log_level) { |
860 | verbose("%d: ", insn_idx); |
861 | - print_bpf_insn(insn); |
862 | + print_bpf_insn(env, insn); |
863 | } |
864 | |
865 | err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); |
866 | diff --git a/lib/refcount.c b/lib/refcount.c |
867 | index aa09ad3c30b0..26dffb7e4c04 100644 |
868 | --- a/lib/refcount.c |
869 | +++ b/lib/refcount.c |
870 | @@ -62,13 +62,13 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) |
871 | |
872 | return true; |
873 | } |
874 | -EXPORT_SYMBOL_GPL(refcount_add_not_zero); |
875 | +EXPORT_SYMBOL(refcount_add_not_zero); |
876 | |
877 | void refcount_add(unsigned int i, refcount_t *r) |
878 | { |
879 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
880 | } |
881 | -EXPORT_SYMBOL_GPL(refcount_add); |
882 | +EXPORT_SYMBOL(refcount_add); |
883 | |
884 | /* |
885 | * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. |
886 | @@ -101,7 +101,7 @@ bool refcount_inc_not_zero(refcount_t *r) |
887 | |
888 | return true; |
889 | } |
890 | -EXPORT_SYMBOL_GPL(refcount_inc_not_zero); |
891 | +EXPORT_SYMBOL(refcount_inc_not_zero); |
892 | |
893 | /* |
894 | * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. |
895 | @@ -113,7 +113,7 @@ void refcount_inc(refcount_t *r) |
896 | { |
897 | WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); |
898 | } |
899 | -EXPORT_SYMBOL_GPL(refcount_inc); |
900 | +EXPORT_SYMBOL(refcount_inc); |
901 | |
902 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
903 | { |
904 | @@ -138,7 +138,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
905 | |
906 | return !new; |
907 | } |
908 | -EXPORT_SYMBOL_GPL(refcount_sub_and_test); |
909 | +EXPORT_SYMBOL(refcount_sub_and_test); |
910 | |
911 | /* |
912 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
913 | @@ -152,7 +152,7 @@ bool refcount_dec_and_test(refcount_t *r) |
914 | { |
915 | return refcount_sub_and_test(1, r); |
916 | } |
917 | -EXPORT_SYMBOL_GPL(refcount_dec_and_test); |
918 | +EXPORT_SYMBOL(refcount_dec_and_test); |
919 | |
920 | /* |
921 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
922 | @@ -166,7 +166,7 @@ void refcount_dec(refcount_t *r) |
923 | { |
924 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
925 | } |
926 | -EXPORT_SYMBOL_GPL(refcount_dec); |
927 | +EXPORT_SYMBOL(refcount_dec); |
928 | |
929 | /* |
930 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
931 | @@ -183,7 +183,7 @@ bool refcount_dec_if_one(refcount_t *r) |
932 | { |
933 | return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; |
934 | } |
935 | -EXPORT_SYMBOL_GPL(refcount_dec_if_one); |
936 | +EXPORT_SYMBOL(refcount_dec_if_one); |
937 | |
938 | /* |
939 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
940 | @@ -217,7 +217,7 @@ bool refcount_dec_not_one(refcount_t *r) |
941 | |
942 | return true; |
943 | } |
944 | -EXPORT_SYMBOL_GPL(refcount_dec_not_one); |
945 | +EXPORT_SYMBOL(refcount_dec_not_one); |
946 | |
947 | /* |
948 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
949 | @@ -240,7 +240,7 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
950 | |
951 | return true; |
952 | } |
953 | -EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); |
954 | +EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
955 | |
956 | /* |
957 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
958 | @@ -263,5 +263,5 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
959 | |
960 | return true; |
961 | } |
962 | -EXPORT_SYMBOL_GPL(refcount_dec_and_lock); |
963 | +EXPORT_SYMBOL(refcount_dec_and_lock); |
964 | |
965 | diff --git a/lib/test_bpf.c b/lib/test_bpf.c |
966 | index 0362da0b66c3..2e385026915c 100644 |
967 | --- a/lib/test_bpf.c |
968 | +++ b/lib/test_bpf.c |
969 | @@ -4656,6 +4656,51 @@ static struct bpf_test tests[] = { |
970 | { }, |
971 | { { 0, 1 } }, |
972 | }, |
973 | + { |
974 | + /* Mainly testing JIT + imm64 here. */ |
975 | + "JMP_JGE_X: ldimm64 test 1", |
976 | + .u.insns_int = { |
977 | + BPF_ALU32_IMM(BPF_MOV, R0, 0), |
978 | + BPF_LD_IMM64(R1, 3), |
979 | + BPF_LD_IMM64(R2, 2), |
980 | + BPF_JMP_REG(BPF_JGE, R1, R2, 2), |
981 | + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), |
982 | + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), |
983 | + BPF_EXIT_INSN(), |
984 | + }, |
985 | + INTERNAL, |
986 | + { }, |
987 | + { { 0, 0xeeeeeeeeU } }, |
988 | + }, |
989 | + { |
990 | + "JMP_JGE_X: ldimm64 test 2", |
991 | + .u.insns_int = { |
992 | + BPF_ALU32_IMM(BPF_MOV, R0, 0), |
993 | + BPF_LD_IMM64(R1, 3), |
994 | + BPF_LD_IMM64(R2, 2), |
995 | + BPF_JMP_REG(BPF_JGE, R1, R2, 0), |
996 | + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), |
997 | + BPF_EXIT_INSN(), |
998 | + }, |
999 | + INTERNAL, |
1000 | + { }, |
1001 | + { { 0, 0xffffffffU } }, |
1002 | + }, |
1003 | + { |
1004 | + "JMP_JGE_X: ldimm64 test 3", |
1005 | + .u.insns_int = { |
1006 | + BPF_ALU32_IMM(BPF_MOV, R0, 1), |
1007 | + BPF_LD_IMM64(R1, 3), |
1008 | + BPF_LD_IMM64(R2, 2), |
1009 | + BPF_JMP_REG(BPF_JGE, R1, R2, 4), |
1010 | + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), |
1011 | + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), |
1012 | + BPF_EXIT_INSN(), |
1013 | + }, |
1014 | + INTERNAL, |
1015 | + { }, |
1016 | + { { 0, 1 } }, |
1017 | + }, |
1018 | /* BPF_JMP | BPF_JNE | BPF_X */ |
1019 | { |
1020 | "JMP_JNE_X: if (3 != 2) return 1", |
1021 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1022 | index c4e84c558240..69daf393cbe1 100644 |
1023 | --- a/net/core/rtnetlink.c |
1024 | +++ b/net/core/rtnetlink.c |
1025 | @@ -1056,7 +1056,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) |
1026 | return err; |
1027 | } |
1028 | |
1029 | - if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) |
1030 | + if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name)) |
1031 | return -EMSGSIZE; |
1032 | |
1033 | return 0; |
1034 | diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c |
1035 | index d28da7d363f1..ae35cce3a40d 100644 |
1036 | --- a/net/core/secure_seq.c |
1037 | +++ b/net/core/secure_seq.c |
1038 | @@ -24,9 +24,13 @@ static siphash_key_t ts_secret __read_mostly; |
1039 | |
1040 | static __always_inline void net_secret_init(void) |
1041 | { |
1042 | - net_get_random_once(&ts_secret, sizeof(ts_secret)); |
1043 | net_get_random_once(&net_secret, sizeof(net_secret)); |
1044 | } |
1045 | + |
1046 | +static __always_inline void ts_secret_init(void) |
1047 | +{ |
1048 | + net_get_random_once(&ts_secret, sizeof(ts_secret)); |
1049 | +} |
1050 | #endif |
1051 | |
1052 | #ifdef CONFIG_INET |
1053 | @@ -47,7 +51,7 @@ static u32 seq_scale(u32 seq) |
1054 | #endif |
1055 | |
1056 | #if IS_ENABLED(CONFIG_IPV6) |
1057 | -static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) |
1058 | +u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) |
1059 | { |
1060 | const struct { |
1061 | struct in6_addr saddr; |
1062 | @@ -60,12 +64,14 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) |
1063 | if (sysctl_tcp_timestamps != 1) |
1064 | return 0; |
1065 | |
1066 | + ts_secret_init(); |
1067 | return siphash(&combined, offsetofend(typeof(combined), daddr), |
1068 | &ts_secret); |
1069 | } |
1070 | +EXPORT_SYMBOL(secure_tcpv6_ts_off); |
1071 | |
1072 | -u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, |
1073 | - __be16 sport, __be16 dport, u32 *tsoff) |
1074 | +u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, |
1075 | + __be16 sport, __be16 dport) |
1076 | { |
1077 | const struct { |
1078 | struct in6_addr saddr; |
1079 | @@ -78,14 +84,14 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, |
1080 | .sport = sport, |
1081 | .dport = dport |
1082 | }; |
1083 | - u64 hash; |
1084 | + u32 hash; |
1085 | + |
1086 | net_secret_init(); |
1087 | hash = siphash(&combined, offsetofend(typeof(combined), dport), |
1088 | &net_secret); |
1089 | - *tsoff = secure_tcpv6_ts_off(saddr, daddr); |
1090 | return seq_scale(hash); |
1091 | } |
1092 | -EXPORT_SYMBOL(secure_tcpv6_sequence_number); |
1093 | +EXPORT_SYMBOL(secure_tcpv6_seq); |
1094 | |
1095 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
1096 | __be16 dport) |
1097 | @@ -107,30 +113,30 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); |
1098 | #endif |
1099 | |
1100 | #ifdef CONFIG_INET |
1101 | -static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) |
1102 | +u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) |
1103 | { |
1104 | if (sysctl_tcp_timestamps != 1) |
1105 | return 0; |
1106 | |
1107 | + ts_secret_init(); |
1108 | return siphash_2u32((__force u32)saddr, (__force u32)daddr, |
1109 | &ts_secret); |
1110 | } |
1111 | |
1112 | -/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), |
1113 | +/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), |
1114 | * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, |
1115 | * it would be easy enough to have the former function use siphash_4u32, passing |
1116 | * the arguments as separate u32. |
1117 | */ |
1118 | - |
1119 | -u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, |
1120 | - __be16 sport, __be16 dport, u32 *tsoff) |
1121 | +u32 secure_tcp_seq(__be32 saddr, __be32 daddr, |
1122 | + __be16 sport, __be16 dport) |
1123 | { |
1124 | - u64 hash; |
1125 | + u32 hash; |
1126 | + |
1127 | net_secret_init(); |
1128 | hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, |
1129 | (__force u32)sport << 16 | (__force u32)dport, |
1130 | &net_secret); |
1131 | - *tsoff = secure_tcp_ts_off(saddr, daddr); |
1132 | return seq_scale(hash); |
1133 | } |
1134 | |
1135 | diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c |
1136 | index 9d943974de2b..bdffad875691 100644 |
1137 | --- a/net/ipv4/raw.c |
1138 | +++ b/net/ipv4/raw.c |
1139 | @@ -358,6 +358,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, |
1140 | rt->dst.dev->mtu); |
1141 | return -EMSGSIZE; |
1142 | } |
1143 | + if (length < sizeof(struct iphdr)) |
1144 | + return -EINVAL; |
1145 | + |
1146 | if (flags&MSG_PROBE) |
1147 | goto out; |
1148 | |
1149 | diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c |
1150 | index 496b97e17aaf..0257d965f111 100644 |
1151 | --- a/net/ipv4/syncookies.c |
1152 | +++ b/net/ipv4/syncookies.c |
1153 | @@ -16,6 +16,7 @@ |
1154 | #include <linux/siphash.h> |
1155 | #include <linux/kernel.h> |
1156 | #include <linux/export.h> |
1157 | +#include <net/secure_seq.h> |
1158 | #include <net/tcp.h> |
1159 | #include <net/route.h> |
1160 | |
1161 | @@ -203,7 +204,7 @@ EXPORT_SYMBOL_GPL(__cookie_v4_check); |
1162 | |
1163 | struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, |
1164 | struct request_sock *req, |
1165 | - struct dst_entry *dst) |
1166 | + struct dst_entry *dst, u32 tsoff) |
1167 | { |
1168 | struct inet_connection_sock *icsk = inet_csk(sk); |
1169 | struct sock *child; |
1170 | @@ -213,6 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, |
1171 | NULL, &own_req); |
1172 | if (child) { |
1173 | atomic_set(&req->rsk_refcnt, 1); |
1174 | + tcp_sk(child)->tsoffset = tsoff; |
1175 | sock_rps_save_rxhash(child, skb); |
1176 | inet_csk_reqsk_queue_add(sk, req, child); |
1177 | } else { |
1178 | @@ -292,6 +294,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
1179 | struct rtable *rt; |
1180 | __u8 rcv_wscale; |
1181 | struct flowi4 fl4; |
1182 | + u32 tsoff = 0; |
1183 | |
1184 | if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) |
1185 | goto out; |
1186 | @@ -311,6 +314,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
1187 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
1188 | tcp_parse_options(skb, &tcp_opt, 0, NULL); |
1189 | |
1190 | + if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { |
1191 | + tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); |
1192 | + tcp_opt.rcv_tsecr -= tsoff; |
1193 | + } |
1194 | + |
1195 | if (!cookie_timestamp_decode(&tcp_opt)) |
1196 | goto out; |
1197 | |
1198 | @@ -381,7 +389,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
1199 | ireq->rcv_wscale = rcv_wscale; |
1200 | ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); |
1201 | |
1202 | - ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst); |
1203 | + ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff); |
1204 | /* ip_queue_xmit() depends on our flow being setup |
1205 | * Normal sockets get it right from inet_csk_route_child_sock() |
1206 | */ |
1207 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
1208 | index 40ba4249a586..2dc7fcf60bf3 100644 |
1209 | --- a/net/ipv4/tcp.c |
1210 | +++ b/net/ipv4/tcp.c |
1211 | @@ -533,7 +533,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) |
1212 | |
1213 | if (tp->urg_data & TCP_URG_VALID) |
1214 | mask |= POLLPRI; |
1215 | - } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { |
1216 | + } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { |
1217 | /* Active TCP fastopen socket with defer_connect |
1218 | * Return POLLOUT so application can call write() |
1219 | * in order for kernel to generate SYN+data |
1220 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1221 | index 659d1baefb2b..3c6c8787b42e 100644 |
1222 | --- a/net/ipv4/tcp_input.c |
1223 | +++ b/net/ipv4/tcp_input.c |
1224 | @@ -85,7 +85,6 @@ int sysctl_tcp_dsack __read_mostly = 1; |
1225 | int sysctl_tcp_app_win __read_mostly = 31; |
1226 | int sysctl_tcp_adv_win_scale __read_mostly = 1; |
1227 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); |
1228 | -EXPORT_SYMBOL(sysctl_tcp_timestamps); |
1229 | |
1230 | /* rfc5961 challenge ack rate limiting */ |
1231 | int sysctl_tcp_challenge_ack_limit = 1000; |
1232 | @@ -6332,8 +6331,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, |
1233 | if (security_inet_conn_request(sk, skb, req)) |
1234 | goto drop_and_free; |
1235 | |
1236 | - if (isn && tmp_opt.tstamp_ok) |
1237 | - af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); |
1238 | + if (tmp_opt.tstamp_ok) |
1239 | + tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb); |
1240 | |
1241 | if (!want_cookie && !isn) { |
1242 | /* VJ's idea. We save last timestamp seen |
1243 | @@ -6375,7 +6374,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, |
1244 | goto drop_and_release; |
1245 | } |
1246 | |
1247 | - isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); |
1248 | + isn = af_ops->init_seq(skb); |
1249 | } |
1250 | if (!dst) { |
1251 | dst = af_ops->route_req(sk, &fl, req, NULL); |
1252 | @@ -6387,7 +6386,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, |
1253 | |
1254 | if (want_cookie) { |
1255 | isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); |
1256 | - tcp_rsk(req)->ts_off = 0; |
1257 | req->cookie_ts = tmp_opt.tstamp_ok; |
1258 | if (!tmp_opt.tstamp_ok) |
1259 | inet_rsk(req)->ecn_ok = 0; |
1260 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1261 | index 575e19dcc017..1a5fa95c981f 100644 |
1262 | --- a/net/ipv4/tcp_ipv4.c |
1263 | +++ b/net/ipv4/tcp_ipv4.c |
1264 | @@ -94,12 +94,18 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, |
1265 | struct inet_hashinfo tcp_hashinfo; |
1266 | EXPORT_SYMBOL(tcp_hashinfo); |
1267 | |
1268 | -static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff) |
1269 | +static u32 tcp_v4_init_seq(const struct sk_buff *skb) |
1270 | { |
1271 | - return secure_tcp_sequence_number(ip_hdr(skb)->daddr, |
1272 | - ip_hdr(skb)->saddr, |
1273 | - tcp_hdr(skb)->dest, |
1274 | - tcp_hdr(skb)->source, tsoff); |
1275 | + return secure_tcp_seq(ip_hdr(skb)->daddr, |
1276 | + ip_hdr(skb)->saddr, |
1277 | + tcp_hdr(skb)->dest, |
1278 | + tcp_hdr(skb)->source); |
1279 | +} |
1280 | + |
1281 | +static u32 tcp_v4_init_ts_off(const struct sk_buff *skb) |
1282 | +{ |
1283 | + return secure_tcp_ts_off(ip_hdr(skb)->daddr, |
1284 | + ip_hdr(skb)->saddr); |
1285 | } |
1286 | |
1287 | int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) |
1288 | @@ -145,7 +151,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1289 | struct flowi4 *fl4; |
1290 | struct rtable *rt; |
1291 | int err; |
1292 | - u32 seq; |
1293 | struct ip_options_rcu *inet_opt; |
1294 | struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; |
1295 | |
1296 | @@ -236,13 +241,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1297 | rt = NULL; |
1298 | |
1299 | if (likely(!tp->repair)) { |
1300 | - seq = secure_tcp_sequence_number(inet->inet_saddr, |
1301 | - inet->inet_daddr, |
1302 | - inet->inet_sport, |
1303 | - usin->sin_port, |
1304 | - &tp->tsoffset); |
1305 | if (!tp->write_seq) |
1306 | - tp->write_seq = seq; |
1307 | + tp->write_seq = secure_tcp_seq(inet->inet_saddr, |
1308 | + inet->inet_daddr, |
1309 | + inet->inet_sport, |
1310 | + usin->sin_port); |
1311 | + tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr, |
1312 | + inet->inet_daddr); |
1313 | } |
1314 | |
1315 | inet->inet_id = tp->write_seq ^ jiffies; |
1316 | @@ -1253,7 +1258,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { |
1317 | .cookie_init_seq = cookie_v4_init_sequence, |
1318 | #endif |
1319 | .route_req = tcp_v4_route_req, |
1320 | - .init_seq = tcp_v4_init_sequence, |
1321 | + .init_seq = tcp_v4_init_seq, |
1322 | + .init_ts_off = tcp_v4_init_ts_off, |
1323 | .send_synack = tcp_v4_send_synack, |
1324 | }; |
1325 | |
1326 | diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c |
1327 | index 046fd3910873..d6fb6c067af4 100644 |
1328 | --- a/net/ipv4/tcp_lp.c |
1329 | +++ b/net/ipv4/tcp_lp.c |
1330 | @@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) |
1331 | { |
1332 | struct tcp_sock *tp = tcp_sk(sk); |
1333 | struct lp *lp = inet_csk_ca(sk); |
1334 | + u32 delta; |
1335 | |
1336 | if (sample->rtt_us > 0) |
1337 | tcp_lp_rtt_sample(sk, sample->rtt_us); |
1338 | |
1339 | /* calc inference */ |
1340 | - if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) |
1341 | - lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); |
1342 | + delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr; |
1343 | + if ((s32)delta > 0) |
1344 | + lp->inference = 3 * delta; |
1345 | |
1346 | /* test if within inference */ |
1347 | if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) |
1348 | diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
1349 | index 65c0f3d13eca..c1259ccc422f 100644 |
1350 | --- a/net/ipv4/tcp_minisocks.c |
1351 | +++ b/net/ipv4/tcp_minisocks.c |
1352 | @@ -536,6 +536,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, |
1353 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
1354 | newtp->rx_opt.mss_clamp = req->mss; |
1355 | tcp_ecn_openreq_child(newtp, req); |
1356 | + newtp->fastopen_req = NULL; |
1357 | newtp->fastopen_rsk = NULL; |
1358 | newtp->syn_data_acked = 0; |
1359 | newtp->rack.mstamp.v64 = 0; |
1360 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
1361 | index 0ea96c4d334d..311f45641673 100644 |
1362 | --- a/net/ipv6/addrconf.c |
1363 | +++ b/net/ipv6/addrconf.c |
1364 | @@ -3296,7 +3296,8 @@ static int fixup_permanent_addr(struct inet6_dev *idev, |
1365 | idev->dev, 0, 0); |
1366 | } |
1367 | |
1368 | - addrconf_dad_start(ifp); |
1369 | + if (ifp->state == INET6_IFADDR_STATE_PREDAD) |
1370 | + addrconf_dad_start(ifp); |
1371 | |
1372 | return 0; |
1373 | } |
1374 | @@ -3515,6 +3516,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, |
1375 | */ |
1376 | static struct notifier_block ipv6_dev_notf = { |
1377 | .notifier_call = addrconf_notify, |
1378 | + .priority = ADDRCONF_NOTIFY_PRIORITY, |
1379 | }; |
1380 | |
1381 | static void addrconf_type_change(struct net_device *dev, unsigned long event) |
1382 | @@ -3651,7 +3653,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) |
1383 | if (keep) { |
1384 | /* set state to skip the notifier below */ |
1385 | state = INET6_IFADDR_STATE_DEAD; |
1386 | - ifa->state = 0; |
1387 | + ifa->state = INET6_IFADDR_STATE_PREDAD; |
1388 | if (!(ifa->flags & IFA_F_NODAD)) |
1389 | ifa->flags |= IFA_F_TENTATIVE; |
1390 | |
1391 | @@ -6408,6 +6410,8 @@ int __init addrconf_init(void) |
1392 | goto errlo; |
1393 | } |
1394 | |
1395 | + ip6_route_init_special_entries(); |
1396 | + |
1397 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
1398 | INIT_HLIST_HEAD(&inet6_addr_lst[i]); |
1399 | |
1400 | diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c |
1401 | index 0da6a12b5472..1f992d9e261d 100644 |
1402 | --- a/net/ipv6/raw.c |
1403 | +++ b/net/ipv6/raw.c |
1404 | @@ -632,6 +632,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, |
1405 | ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); |
1406 | return -EMSGSIZE; |
1407 | } |
1408 | + if (length < sizeof(struct ipv6hdr)) |
1409 | + return -EINVAL; |
1410 | if (flags&MSG_PROBE) |
1411 | goto out; |
1412 | |
1413 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
1414 | index fb174b590fd3..d316d00e11ab 100644 |
1415 | --- a/net/ipv6/route.c |
1416 | +++ b/net/ipv6/route.c |
1417 | @@ -3704,7 +3704,10 @@ static int ip6_route_dev_notify(struct notifier_block *this, |
1418 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
1419 | struct net *net = dev_net(dev); |
1420 | |
1421 | - if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { |
1422 | + if (!(dev->flags & IFF_LOOPBACK)) |
1423 | + return NOTIFY_OK; |
1424 | + |
1425 | + if (event == NETDEV_REGISTER) { |
1426 | net->ipv6.ip6_null_entry->dst.dev = dev; |
1427 | net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); |
1428 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1429 | @@ -3713,6 +3716,12 @@ static int ip6_route_dev_notify(struct notifier_block *this, |
1430 | net->ipv6.ip6_blk_hole_entry->dst.dev = dev; |
1431 | net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); |
1432 | #endif |
1433 | + } else if (event == NETDEV_UNREGISTER) { |
1434 | + in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); |
1435 | +#ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1436 | + in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); |
1437 | + in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); |
1438 | +#endif |
1439 | } |
1440 | |
1441 | return NOTIFY_OK; |
1442 | @@ -4019,9 +4028,24 @@ static struct pernet_operations ip6_route_net_late_ops = { |
1443 | |
1444 | static struct notifier_block ip6_route_dev_notifier = { |
1445 | .notifier_call = ip6_route_dev_notify, |
1446 | - .priority = 0, |
1447 | + .priority = ADDRCONF_NOTIFY_PRIORITY - 10, |
1448 | }; |
1449 | |
1450 | +void __init ip6_route_init_special_entries(void) |
1451 | +{ |
1452 | + /* Registering of the loopback is done before this portion of code, |
1453 | + * the loopback reference in rt6_info will not be taken, do it |
1454 | + * manually for init_net */ |
1455 | + init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; |
1456 | + init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1457 | + #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1458 | + init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; |
1459 | + init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1460 | + init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; |
1461 | + init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1462 | + #endif |
1463 | +} |
1464 | + |
1465 | int __init ip6_route_init(void) |
1466 | { |
1467 | int ret; |
1468 | @@ -4048,17 +4072,6 @@ int __init ip6_route_init(void) |
1469 | |
1470 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; |
1471 | |
1472 | - /* Registering of the loopback is done before this portion of code, |
1473 | - * the loopback reference in rt6_info will not be taken, do it |
1474 | - * manually for init_net */ |
1475 | - init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; |
1476 | - init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1477 | - #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1478 | - init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; |
1479 | - init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1480 | - init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; |
1481 | - init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
1482 | - #endif |
1483 | ret = fib6_init(); |
1484 | if (ret) |
1485 | goto out_register_subsys; |
1486 | diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c |
1487 | index 895ff650db43..5abc3692b901 100644 |
1488 | --- a/net/ipv6/syncookies.c |
1489 | +++ b/net/ipv6/syncookies.c |
1490 | @@ -18,6 +18,7 @@ |
1491 | #include <linux/random.h> |
1492 | #include <linux/siphash.h> |
1493 | #include <linux/kernel.h> |
1494 | +#include <net/secure_seq.h> |
1495 | #include <net/ipv6.h> |
1496 | #include <net/tcp.h> |
1497 | |
1498 | @@ -143,6 +144,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
1499 | int mss; |
1500 | struct dst_entry *dst; |
1501 | __u8 rcv_wscale; |
1502 | + u32 tsoff = 0; |
1503 | |
1504 | if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) |
1505 | goto out; |
1506 | @@ -162,6 +164,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
1507 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
1508 | tcp_parse_options(skb, &tcp_opt, 0, NULL); |
1509 | |
1510 | + if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { |
1511 | + tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32, |
1512 | + ipv6_hdr(skb)->saddr.s6_addr32); |
1513 | + tcp_opt.rcv_tsecr -= tsoff; |
1514 | + } |
1515 | + |
1516 | if (!cookie_timestamp_decode(&tcp_opt)) |
1517 | goto out; |
1518 | |
1519 | @@ -242,7 +250,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
1520 | ireq->rcv_wscale = rcv_wscale; |
1521 | ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); |
1522 | |
1523 | - ret = tcp_get_cookie_sock(sk, skb, req, dst); |
1524 | + ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff); |
1525 | out: |
1526 | return ret; |
1527 | out_free: |
1528 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1529 | index 49fa2e8c3fa9..4c4afdca41ff 100644 |
1530 | --- a/net/ipv6/tcp_ipv6.c |
1531 | +++ b/net/ipv6/tcp_ipv6.c |
1532 | @@ -101,12 +101,18 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) |
1533 | } |
1534 | } |
1535 | |
1536 | -static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff) |
1537 | +static u32 tcp_v6_init_seq(const struct sk_buff *skb) |
1538 | { |
1539 | - return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, |
1540 | - ipv6_hdr(skb)->saddr.s6_addr32, |
1541 | - tcp_hdr(skb)->dest, |
1542 | - tcp_hdr(skb)->source, tsoff); |
1543 | + return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, |
1544 | + ipv6_hdr(skb)->saddr.s6_addr32, |
1545 | + tcp_hdr(skb)->dest, |
1546 | + tcp_hdr(skb)->source); |
1547 | +} |
1548 | + |
1549 | +static u32 tcp_v6_init_ts_off(const struct sk_buff *skb) |
1550 | +{ |
1551 | + return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32, |
1552 | + ipv6_hdr(skb)->saddr.s6_addr32); |
1553 | } |
1554 | |
1555 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
1556 | @@ -122,7 +128,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
1557 | struct flowi6 fl6; |
1558 | struct dst_entry *dst; |
1559 | int addr_type; |
1560 | - u32 seq; |
1561 | int err; |
1562 | struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; |
1563 | |
1564 | @@ -287,13 +292,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
1565 | sk_set_txhash(sk); |
1566 | |
1567 | if (likely(!tp->repair)) { |
1568 | - seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
1569 | - sk->sk_v6_daddr.s6_addr32, |
1570 | - inet->inet_sport, |
1571 | - inet->inet_dport, |
1572 | - &tp->tsoffset); |
1573 | if (!tp->write_seq) |
1574 | - tp->write_seq = seq; |
1575 | + tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32, |
1576 | + sk->sk_v6_daddr.s6_addr32, |
1577 | + inet->inet_sport, |
1578 | + inet->inet_dport); |
1579 | + tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32, |
1580 | + sk->sk_v6_daddr.s6_addr32); |
1581 | } |
1582 | |
1583 | if (tcp_fastopen_defer_connect(sk, &err)) |
1584 | @@ -757,7 +762,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
1585 | .cookie_init_seq = cookie_v6_init_sequence, |
1586 | #endif |
1587 | .route_req = tcp_v6_route_req, |
1588 | - .init_seq = tcp_v6_init_sequence, |
1589 | + .init_seq = tcp_v6_init_seq, |
1590 | + .init_ts_off = tcp_v6_init_ts_off, |
1591 | .send_synack = tcp_v6_send_synack, |
1592 | }; |
1593 | |
1594 | diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c |
1595 | index c848e90b6421..8b433bf3fdd7 100644 |
1596 | --- a/tools/testing/selftests/bpf/test_verifier.c |
1597 | +++ b/tools/testing/selftests/bpf/test_verifier.c |
1598 | @@ -1809,16 +1809,22 @@ static struct bpf_test tests[] = { |
1599 | .result = ACCEPT, |
1600 | }, |
1601 | { |
1602 | - "unpriv: obfuscate stack pointer", |
1603 | + "stack pointer arithmetic", |
1604 | .insns = { |
1605 | - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), |
1606 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), |
1607 | - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), |
1608 | + BPF_MOV64_IMM(BPF_REG_1, 4), |
1609 | + BPF_JMP_IMM(BPF_JA, 0, 0, 0), |
1610 | + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), |
1611 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), |
1612 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), |
1613 | + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), |
1614 | + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), |
1615 | + BPF_ST_MEM(0, BPF_REG_2, 4, 0), |
1616 | + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), |
1617 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), |
1618 | + BPF_ST_MEM(0, BPF_REG_2, 4, 0), |
1619 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1620 | BPF_EXIT_INSN(), |
1621 | }, |
1622 | - .errstr_unpriv = "R2 pointer arithmetic", |
1623 | - .result_unpriv = REJECT, |
1624 | .result = ACCEPT, |
1625 | }, |
1626 | { |