Contents of /trunk/kernel-magellan/patches-4.14/0106-4.14.7-all-fixes.patch
Parent Directory | Revision Log
Revision 3067 -
(show annotations)
(download)
Wed Jan 17 13:26:41 2018 UTC (6 years, 8 months ago) by niro
File size: 79208 byte(s)
Wed Jan 17 13:26:41 2018 UTC (6 years, 8 months ago) by niro
File size: 79208 byte(s)
-linux-4.14.7
1 | diff --git a/Makefile b/Makefile |
2 | index eabbd7748a24..39d7af0165a8 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 14 |
9 | -SUBLEVEL = 6 |
10 | +SUBLEVEL = 7 |
11 | EXTRAVERSION = |
12 | NAME = Petit Gorille |
13 | |
14 | @@ -373,9 +373,6 @@ LDFLAGS_MODULE = |
15 | CFLAGS_KERNEL = |
16 | AFLAGS_KERNEL = |
17 | LDFLAGS_vmlinux = |
18 | -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) |
19 | -CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) |
20 | - |
21 | |
22 | # Use USERINCLUDE when you must reference the UAPI directories only. |
23 | USERINCLUDE := \ |
24 | @@ -394,21 +391,19 @@ LINUXINCLUDE := \ |
25 | -I$(objtree)/include \ |
26 | $(USERINCLUDE) |
27 | |
28 | -KBUILD_CPPFLAGS := -D__KERNEL__ |
29 | - |
30 | +KBUILD_AFLAGS := -D__ASSEMBLY__ |
31 | KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ |
32 | -fno-strict-aliasing -fno-common -fshort-wchar \ |
33 | -Werror-implicit-function-declaration \ |
34 | -Wno-format-security \ |
35 | - -std=gnu89 $(call cc-option,-fno-PIE) |
36 | - |
37 | - |
38 | + -std=gnu89 |
39 | +KBUILD_CPPFLAGS := -D__KERNEL__ |
40 | KBUILD_AFLAGS_KERNEL := |
41 | KBUILD_CFLAGS_KERNEL := |
42 | -KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) |
43 | KBUILD_AFLAGS_MODULE := -DMODULE |
44 | KBUILD_CFLAGS_MODULE := -DMODULE |
45 | KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds |
46 | +GCC_PLUGINS_CFLAGS := |
47 | |
48 | # Read KERNELRELEASE from include/config/kernel.release (if it exists) |
49 | KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) |
50 | @@ -421,7 +416,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE |
51 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS |
52 | |
53 | export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS |
54 | -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN |
55 | +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN |
56 | export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE |
57 | export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE |
58 | export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL |
59 | @@ -622,6 +617,12 @@ endif |
60 | # Defaults to vmlinux, but the arch makefile usually adds further targets |
61 | all: vmlinux |
62 | |
63 | +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) |
64 | +KBUILD_AFLAGS += $(call cc-option,-fno-PIE) |
65 | +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) |
66 | +CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) |
67 | +export CFLAGS_GCOV CFLAGS_KCOV |
68 | + |
69 | # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default |
70 | # values of the respective KBUILD_* variables |
71 | ARCH_CPPFLAGS := |
72 | diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h |
73 | index e39d487bf724..a3c7f271ad4c 100644 |
74 | --- a/arch/arm64/include/asm/compat.h |
75 | +++ b/arch/arm64/include/asm/compat.h |
76 | @@ -215,7 +215,6 @@ typedef struct compat_siginfo { |
77 | } compat_siginfo_t; |
78 | |
79 | #define COMPAT_OFF_T_MAX 0x7fffffff |
80 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
81 | |
82 | /* |
83 | * A pointer passed in from user mode. This should not |
84 | diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h |
85 | index 8e2b5b556488..49691331ada4 100644 |
86 | --- a/arch/mips/include/asm/compat.h |
87 | +++ b/arch/mips/include/asm/compat.h |
88 | @@ -200,7 +200,6 @@ typedef struct compat_siginfo { |
89 | } compat_siginfo_t; |
90 | |
91 | #define COMPAT_OFF_T_MAX 0x7fffffff |
92 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
93 | |
94 | /* |
95 | * A pointer passed in from user mode. This should not |
96 | diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h |
97 | index 07f48827afda..acf8aa07cbe0 100644 |
98 | --- a/arch/parisc/include/asm/compat.h |
99 | +++ b/arch/parisc/include/asm/compat.h |
100 | @@ -195,7 +195,6 @@ typedef struct compat_siginfo { |
101 | } compat_siginfo_t; |
102 | |
103 | #define COMPAT_OFF_T_MAX 0x7fffffff |
104 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
105 | |
106 | struct compat_ipc64_perm { |
107 | compat_key_t key; |
108 | diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h |
109 | index a035b1e5dfa7..8a2aecfe9b02 100644 |
110 | --- a/arch/powerpc/include/asm/compat.h |
111 | +++ b/arch/powerpc/include/asm/compat.h |
112 | @@ -185,7 +185,6 @@ typedef struct compat_siginfo { |
113 | } compat_siginfo_t; |
114 | |
115 | #define COMPAT_OFF_T_MAX 0x7fffffff |
116 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
117 | |
118 | /* |
119 | * A pointer passed in from user mode. This should not |
120 | diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h |
121 | index 1b60eb3676d5..5e6a63641a5f 100644 |
122 | --- a/arch/s390/include/asm/compat.h |
123 | +++ b/arch/s390/include/asm/compat.h |
124 | @@ -263,7 +263,6 @@ typedef struct compat_siginfo { |
125 | #define si_overrun _sifields._timer._overrun |
126 | |
127 | #define COMPAT_OFF_T_MAX 0x7fffffff |
128 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
129 | |
130 | /* |
131 | * A pointer passed in from user mode. This should not |
132 | diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h |
133 | index 977c3f280ba1..fa38c78de0f0 100644 |
134 | --- a/arch/sparc/include/asm/compat.h |
135 | +++ b/arch/sparc/include/asm/compat.h |
136 | @@ -209,7 +209,6 @@ typedef struct compat_siginfo { |
137 | } compat_siginfo_t; |
138 | |
139 | #define COMPAT_OFF_T_MAX 0x7fffffff |
140 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
141 | |
142 | /* |
143 | * A pointer passed in from user mode. This should not |
144 | diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h |
145 | index c14e36f008c8..62a7b83025dd 100644 |
146 | --- a/arch/tile/include/asm/compat.h |
147 | +++ b/arch/tile/include/asm/compat.h |
148 | @@ -173,7 +173,6 @@ typedef struct compat_siginfo { |
149 | } compat_siginfo_t; |
150 | |
151 | #define COMPAT_OFF_T_MAX 0x7fffffff |
152 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
153 | |
154 | struct compat_ipc64_perm { |
155 | compat_key_t key; |
156 | diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h |
157 | index 9eef9cc64c68..70bc1df580b2 100644 |
158 | --- a/arch/x86/include/asm/compat.h |
159 | +++ b/arch/x86/include/asm/compat.h |
160 | @@ -209,7 +209,6 @@ typedef struct compat_siginfo { |
161 | } compat_siginfo_t; |
162 | |
163 | #define COMPAT_OFF_T_MAX 0x7fffffff |
164 | -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL |
165 | |
166 | struct compat_ipc64_perm { |
167 | compat_key_t key; |
168 | diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c |
169 | index 3d433af856a5..7be35b600299 100644 |
170 | --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c |
171 | +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c |
172 | @@ -1297,9 +1297,7 @@ static void rmdir_all_sub(void) |
173 | kfree(rdtgrp); |
174 | } |
175 | /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ |
176 | - get_online_cpus(); |
177 | update_closid_rmid(cpu_online_mask, &rdtgroup_default); |
178 | - put_online_cpus(); |
179 | |
180 | kernfs_remove(kn_info); |
181 | kernfs_remove(kn_mongrp); |
182 | @@ -1310,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb) |
183 | { |
184 | struct rdt_resource *r; |
185 | |
186 | + cpus_read_lock(); |
187 | mutex_lock(&rdtgroup_mutex); |
188 | |
189 | /*Put everything back to default values. */ |
190 | @@ -1317,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb) |
191 | reset_all_ctrls(r); |
192 | cdp_disable(); |
193 | rmdir_all_sub(); |
194 | - static_branch_disable(&rdt_alloc_enable_key); |
195 | - static_branch_disable(&rdt_mon_enable_key); |
196 | - static_branch_disable(&rdt_enable_key); |
197 | + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); |
198 | + static_branch_disable_cpuslocked(&rdt_mon_enable_key); |
199 | + static_branch_disable_cpuslocked(&rdt_enable_key); |
200 | kernfs_kill_sb(sb); |
201 | mutex_unlock(&rdtgroup_mutex); |
202 | + cpus_read_unlock(); |
203 | } |
204 | |
205 | static struct file_system_type rdt_fs_type = { |
206 | diff --git a/block/blk-core.c b/block/blk-core.c |
207 | index 516ce3174683..7b30bf10b1d4 100644 |
208 | --- a/block/blk-core.c |
209 | +++ b/block/blk-core.c |
210 | @@ -339,6 +339,7 @@ void blk_sync_queue(struct request_queue *q) |
211 | struct blk_mq_hw_ctx *hctx; |
212 | int i; |
213 | |
214 | + cancel_delayed_work_sync(&q->requeue_work); |
215 | queue_for_each_hw_ctx(q, hctx, i) |
216 | cancel_delayed_work_sync(&hctx->run_work); |
217 | } else { |
218 | diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c |
219 | index bc3984ffe867..c04aa11f0e21 100644 |
220 | --- a/drivers/char/ipmi/ipmi_si_intf.c |
221 | +++ b/drivers/char/ipmi/ipmi_si_intf.c |
222 | @@ -242,6 +242,9 @@ struct smi_info { |
223 | /* The timer for this si. */ |
224 | struct timer_list si_timer; |
225 | |
226 | + /* This flag is set, if the timer can be set */ |
227 | + bool timer_can_start; |
228 | + |
229 | /* This flag is set, if the timer is running (timer_pending() isn't enough) */ |
230 | bool timer_running; |
231 | |
232 | @@ -417,6 +420,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) |
233 | |
234 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
235 | { |
236 | + if (!smi_info->timer_can_start) |
237 | + return; |
238 | smi_info->last_timeout_jiffies = jiffies; |
239 | mod_timer(&smi_info->si_timer, new_val); |
240 | smi_info->timer_running = true; |
241 | @@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, |
242 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); |
243 | } |
244 | |
245 | -static void start_check_enables(struct smi_info *smi_info, bool start_timer) |
246 | +static void start_check_enables(struct smi_info *smi_info) |
247 | { |
248 | unsigned char msg[2]; |
249 | |
250 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
251 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
252 | |
253 | - if (start_timer) |
254 | - start_new_msg(smi_info, msg, 2); |
255 | - else |
256 | - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
257 | + start_new_msg(smi_info, msg, 2); |
258 | smi_info->si_state = SI_CHECKING_ENABLES; |
259 | } |
260 | |
261 | -static void start_clear_flags(struct smi_info *smi_info, bool start_timer) |
262 | +static void start_clear_flags(struct smi_info *smi_info) |
263 | { |
264 | unsigned char msg[3]; |
265 | |
266 | @@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer) |
267 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
268 | msg[2] = WDT_PRE_TIMEOUT_INT; |
269 | |
270 | - if (start_timer) |
271 | - start_new_msg(smi_info, msg, 3); |
272 | - else |
273 | - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); |
274 | + start_new_msg(smi_info, msg, 3); |
275 | smi_info->si_state = SI_CLEARING_FLAGS; |
276 | } |
277 | |
278 | @@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info *smi_info) |
279 | * Note that we cannot just use disable_irq(), since the interrupt may |
280 | * be shared. |
281 | */ |
282 | -static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) |
283 | +static inline bool disable_si_irq(struct smi_info *smi_info) |
284 | { |
285 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
286 | smi_info->interrupt_disabled = true; |
287 | - start_check_enables(smi_info, start_timer); |
288 | + start_check_enables(smi_info); |
289 | return true; |
290 | } |
291 | return false; |
292 | @@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) |
293 | { |
294 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
295 | smi_info->interrupt_disabled = false; |
296 | - start_check_enables(smi_info, true); |
297 | + start_check_enables(smi_info); |
298 | return true; |
299 | } |
300 | return false; |
301 | @@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) |
302 | |
303 | msg = ipmi_alloc_smi_msg(); |
304 | if (!msg) { |
305 | - if (!disable_si_irq(smi_info, true)) |
306 | + if (!disable_si_irq(smi_info)) |
307 | smi_info->si_state = SI_NORMAL; |
308 | } else if (enable_si_irq(smi_info)) { |
309 | ipmi_free_smi_msg(msg); |
310 | @@ -545,7 +544,7 @@ static void handle_flags(struct smi_info *smi_info) |
311 | /* Watchdog pre-timeout */ |
312 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
313 | |
314 | - start_clear_flags(smi_info, true); |
315 | + start_clear_flags(smi_info); |
316 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
317 | if (smi_info->intf) |
318 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
319 | @@ -928,7 +927,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
320 | * disable and messages disabled. |
321 | */ |
322 | if (smi_info->supports_event_msg_buff || smi_info->irq) { |
323 | - start_check_enables(smi_info, true); |
324 | + start_check_enables(smi_info); |
325 | } else { |
326 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
327 | if (!smi_info->curr_msg) |
328 | @@ -1235,6 +1234,7 @@ static int smi_start_processing(void *send_info, |
329 | |
330 | /* Set up the timer that drives the interface. */ |
331 | setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); |
332 | + new_smi->timer_can_start = true; |
333 | smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); |
334 | |
335 | /* Try to claim any interrupts. */ |
336 | @@ -3416,10 +3416,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info) |
337 | check_set_rcv_irq(smi_info); |
338 | } |
339 | |
340 | -static inline void wait_for_timer_and_thread(struct smi_info *smi_info) |
341 | +static inline void stop_timer_and_thread(struct smi_info *smi_info) |
342 | { |
343 | if (smi_info->thread != NULL) |
344 | kthread_stop(smi_info->thread); |
345 | + |
346 | + smi_info->timer_can_start = false; |
347 | if (smi_info->timer_running) |
348 | del_timer_sync(&smi_info->si_timer); |
349 | } |
350 | @@ -3605,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi) |
351 | * Start clearing the flags before we enable interrupts or the |
352 | * timer to avoid racing with the timer. |
353 | */ |
354 | - start_clear_flags(new_smi, false); |
355 | + start_clear_flags(new_smi); |
356 | |
357 | /* |
358 | * IRQ is defined to be set when non-zero. req_events will |
359 | @@ -3674,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi) |
360 | return 0; |
361 | |
362 | out_err_stop_timer: |
363 | - wait_for_timer_and_thread(new_smi); |
364 | + stop_timer_and_thread(new_smi); |
365 | |
366 | out_err: |
367 | new_smi->interrupt_disabled = true; |
368 | @@ -3866,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean) |
369 | */ |
370 | if (to_clean->irq_cleanup) |
371 | to_clean->irq_cleanup(to_clean); |
372 | - wait_for_timer_and_thread(to_clean); |
373 | + stop_timer_and_thread(to_clean); |
374 | |
375 | /* |
376 | * Timeouts are stopped, now make sure the interrupts are off |
377 | @@ -3878,7 +3880,7 @@ static void cleanup_one_si(struct smi_info *to_clean) |
378 | schedule_timeout_uninterruptible(1); |
379 | } |
380 | if (to_clean->handlers) |
381 | - disable_si_irq(to_clean, false); |
382 | + disable_si_irq(to_clean); |
383 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
384 | poll(to_clean); |
385 | schedule_timeout_uninterruptible(1); |
386 | diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c |
387 | index ed6531f075c6..e06605b21841 100644 |
388 | --- a/drivers/cpuidle/cpuidle-powernv.c |
389 | +++ b/drivers/cpuidle/cpuidle-powernv.c |
390 | @@ -384,9 +384,9 @@ static int powernv_add_idle_states(void) |
391 | * Firmware passes residency and latency values in ns. |
392 | * cpuidle expects it in us. |
393 | */ |
394 | - exit_latency = latency_ns[i] / 1000; |
395 | + exit_latency = DIV_ROUND_UP(latency_ns[i], 1000); |
396 | if (!rc) |
397 | - target_residency = residency_ns[i] / 1000; |
398 | + target_residency = DIV_ROUND_UP(residency_ns[i], 1000); |
399 | else |
400 | target_residency = 0; |
401 | |
402 | diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c |
403 | index 14d1e7d9a1d6..0e6bc631a1ca 100644 |
404 | --- a/drivers/ide/ide-atapi.c |
405 | +++ b/drivers/ide/ide-atapi.c |
406 | @@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive) |
407 | struct request *rq = drive->hwif->rq; |
408 | unsigned long wait = 0; |
409 | |
410 | - debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); |
411 | + debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]); |
412 | |
413 | /* |
414 | * Some commands are *slow* and normally take a long time to complete. |
415 | @@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) |
416 | return ide_do_reset(drive); |
417 | } |
418 | |
419 | - debug_log("[cmd %x]: check condition\n", rq->cmd[0]); |
420 | + debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]); |
421 | |
422 | /* Retry operation */ |
423 | ide_retry_pc(drive); |
424 | @@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) |
425 | ide_pad_transfer(drive, write, bcount); |
426 | |
427 | debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", |
428 | - rq->cmd[0], done, bcount, scsi_req(rq)->resid_len); |
429 | + scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len); |
430 | |
431 | /* And set the interrupt handler again */ |
432 | ide_set_handler(drive, ide_pc_intr, timeout); |
433 | diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h |
434 | index 010c709ba3bb..58c531db4f4a 100644 |
435 | --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h |
436 | +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h |
437 | @@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr { |
438 | __u16 wrid; |
439 | __u8 r1[3]; |
440 | __u8 len16; |
441 | - __u32 r2; |
442 | - __u32 stag; |
443 | + __be32 r2; |
444 | + __be32 stag; |
445 | struct fw_ri_tpte tpte; |
446 | __u64 pbl[2]; |
447 | }; |
448 | diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c |
449 | index f425905c97fa..0cabf31fb163 100644 |
450 | --- a/drivers/md/bitmap.c |
451 | +++ b/drivers/md/bitmap.c |
452 | @@ -2158,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
453 | for (k = 0; k < page; k++) { |
454 | kfree(new_bp[k].map); |
455 | } |
456 | + kfree(new_bp); |
457 | |
458 | /* restore some fields from old_counts */ |
459 | bitmap->counts.bp = old_counts.bp; |
460 | @@ -2208,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
461 | block += old_blocks; |
462 | } |
463 | |
464 | + if (bitmap->counts.bp != old_counts.bp) { |
465 | + unsigned long k; |
466 | + for (k = 0; k < old_counts.pages; k++) |
467 | + if (!old_counts.bp[k].hijacked) |
468 | + kfree(old_counts.bp[k].map); |
469 | + kfree(old_counts.bp); |
470 | + } |
471 | + |
472 | if (!init) { |
473 | int i; |
474 | while (block < (chunks << chunkshift)) { |
475 | diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c |
476 | index 2245d06d2045..a25eebd98996 100644 |
477 | --- a/drivers/md/dm-raid.c |
478 | +++ b/drivers/md/dm-raid.c |
479 | @@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
480 | struct dm_raid_superblock *refsb; |
481 | uint64_t events_sb, events_refsb; |
482 | |
483 | - rdev->sb_start = 0; |
484 | - rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
485 | - if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { |
486 | - DMERR("superblock size of a logical block is no longer valid"); |
487 | - return -EINVAL; |
488 | - } |
489 | - |
490 | r = read_disk_sb(rdev, rdev->sb_size, false); |
491 | if (r) |
492 | return r; |
493 | @@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) |
494 | if (test_bit(Journal, &rdev->flags)) |
495 | continue; |
496 | |
497 | + if (!rdev->meta_bdev) |
498 | + continue; |
499 | + |
500 | + /* Set superblock offset/size for metadata device. */ |
501 | + rdev->sb_start = 0; |
502 | + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
503 | + if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) { |
504 | + DMERR("superblock size of a logical block is no longer valid"); |
505 | + return -EINVAL; |
506 | + } |
507 | + |
508 | /* |
509 | * Skipping super_load due to CTR_FLAG_SYNC will cause |
510 | * the array to undergo initialization again as |
511 | @@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) |
512 | if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) |
513 | continue; |
514 | |
515 | - if (!rdev->meta_bdev) |
516 | - continue; |
517 | - |
518 | r = super_load(rdev, freshest); |
519 | |
520 | switch (r) { |
521 | diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c |
522 | index 9139d01ba7ed..33d844fe2e70 100644 |
523 | --- a/drivers/media/dvb-core/dvb_frontend.c |
524 | +++ b/drivers/media/dvb-core/dvb_frontend.c |
525 | @@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) |
526 | { |
527 | struct dvb_frontend_private *fepriv = fe->frontend_priv; |
528 | |
529 | - if (!fepriv) |
530 | - return; |
531 | - |
532 | - dvb_free_device(fepriv->dvbdev); |
533 | + if (fepriv) |
534 | + dvb_free_device(fepriv->dvbdev); |
535 | |
536 | dvb_frontend_invoke_release(fe, fe->ops.release); |
537 | |
538 | - kfree(fepriv); |
539 | - fe->frontend_priv = NULL; |
540 | + if (fepriv) |
541 | + kfree(fepriv); |
542 | } |
543 | |
544 | static void dvb_frontend_free(struct kref *ref) |
545 | diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c |
546 | index d4496e9afcdf..a3d12dbde95b 100644 |
547 | --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c |
548 | +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c |
549 | @@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
550 | |
551 | /* Offload checksum calculation to HW */ |
552 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
553 | - hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
554 | + if (ip.v4->version == 4) |
555 | + hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
556 | hdr->l3_offset = skb_network_offset(skb); |
557 | hdr->l4_offset = skb_transport_offset(skb); |
558 | |
559 | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
560 | index a3c949ea7d1a..9541465e43e9 100644 |
561 | --- a/drivers/net/ethernet/realtek/r8169.c |
562 | +++ b/drivers/net/ethernet/realtek/r8169.c |
563 | @@ -2025,21 +2025,6 @@ static int rtl8169_set_speed(struct net_device *dev, |
564 | return ret; |
565 | } |
566 | |
567 | -static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
568 | -{ |
569 | - struct rtl8169_private *tp = netdev_priv(dev); |
570 | - int ret; |
571 | - |
572 | - del_timer_sync(&tp->timer); |
573 | - |
574 | - rtl_lock_work(tp); |
575 | - ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), |
576 | - cmd->duplex, cmd->advertising); |
577 | - rtl_unlock_work(tp); |
578 | - |
579 | - return ret; |
580 | -} |
581 | - |
582 | static netdev_features_t rtl8169_fix_features(struct net_device *dev, |
583 | netdev_features_t features) |
584 | { |
585 | @@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev, |
586 | return rc; |
587 | } |
588 | |
589 | +static int rtl8169_set_link_ksettings(struct net_device *dev, |
590 | + const struct ethtool_link_ksettings *cmd) |
591 | +{ |
592 | + struct rtl8169_private *tp = netdev_priv(dev); |
593 | + int rc; |
594 | + u32 advertising; |
595 | + |
596 | + if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, |
597 | + cmd->link_modes.advertising)) |
598 | + return -EINVAL; |
599 | + |
600 | + del_timer_sync(&tp->timer); |
601 | + |
602 | + rtl_lock_work(tp); |
603 | + rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, |
604 | + cmd->base.duplex, advertising); |
605 | + rtl_unlock_work(tp); |
606 | + |
607 | + return rc; |
608 | +} |
609 | + |
610 | static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
611 | void *p) |
612 | { |
613 | @@ -2367,7 +2373,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { |
614 | .get_drvinfo = rtl8169_get_drvinfo, |
615 | .get_regs_len = rtl8169_get_regs_len, |
616 | .get_link = ethtool_op_get_link, |
617 | - .set_settings = rtl8169_set_settings, |
618 | .get_msglevel = rtl8169_get_msglevel, |
619 | .set_msglevel = rtl8169_set_msglevel, |
620 | .get_regs = rtl8169_get_regs, |
621 | @@ -2379,6 +2384,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { |
622 | .get_ts_info = ethtool_op_get_ts_info, |
623 | .nway_reset = rtl8169_nway_reset, |
624 | .get_link_ksettings = rtl8169_get_link_ksettings, |
625 | + .set_link_ksettings = rtl8169_set_link_ksettings, |
626 | }; |
627 | |
628 | static void rtl8169_get_mac_version(struct rtl8169_private *tp, |
629 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
630 | index 16bd50929084..28c4d6fa096c 100644 |
631 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
632 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
633 | @@ -2564,6 +2564,7 @@ static int stmmac_open(struct net_device *dev) |
634 | |
635 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
636 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
637 | + priv->mss = 0; |
638 | |
639 | ret = alloc_dma_desc_resources(priv); |
640 | if (ret < 0) { |
641 | diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c |
642 | index 1f3295e274d0..8feb84fd4ca7 100644 |
643 | --- a/drivers/net/ipvlan/ipvlan_core.c |
644 | +++ b/drivers/net/ipvlan/ipvlan_core.c |
645 | @@ -409,7 +409,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) |
646 | struct dst_entry *dst; |
647 | int err, ret = NET_XMIT_DROP; |
648 | struct flowi6 fl6 = { |
649 | - .flowi6_iif = dev->ifindex, |
650 | + .flowi6_oif = dev->ifindex, |
651 | .daddr = ip6h->daddr, |
652 | .saddr = ip6h->saddr, |
653 | .flowi6_flags = FLOWI_FLAG_ANYSRC, |
654 | diff --git a/drivers/net/tap.c b/drivers/net/tap.c |
655 | index 6c0c84c33e1f..bfd4ded0a53f 100644 |
656 | --- a/drivers/net/tap.c |
657 | +++ b/drivers/net/tap.c |
658 | @@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q, |
659 | DEFINE_WAIT(wait); |
660 | ssize_t ret = 0; |
661 | |
662 | - if (!iov_iter_count(to)) |
663 | + if (!iov_iter_count(to)) { |
664 | + if (skb) |
665 | + kfree_skb(skb); |
666 | return 0; |
667 | + } |
668 | |
669 | if (skb) |
670 | goto put; |
671 | @@ -1077,7 +1080,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, |
672 | case TUNSETOFFLOAD: |
673 | /* let the user check for future flags */ |
674 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
675 | - TUN_F_TSO_ECN)) |
676 | + TUN_F_TSO_ECN | TUN_F_UFO)) |
677 | return -EINVAL; |
678 | |
679 | rtnl_lock(); |
680 | @@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m, |
681 | size_t total_len, int flags) |
682 | { |
683 | struct tap_queue *q = container_of(sock, struct tap_queue, sock); |
684 | + struct sk_buff *skb = m->msg_control; |
685 | int ret; |
686 | - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
687 | + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { |
688 | + if (skb) |
689 | + kfree_skb(skb); |
690 | return -EINVAL; |
691 | - ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, |
692 | - m->msg_control); |
693 | + } |
694 | + ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); |
695 | if (ret > total_len) { |
696 | m->msg_flags |= MSG_TRUNC; |
697 | ret = flags & MSG_TRUNC ? ret : total_len; |
698 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
699 | index 42bb820a56c9..c91b110f2169 100644 |
700 | --- a/drivers/net/tun.c |
701 | +++ b/drivers/net/tun.c |
702 | @@ -1326,6 +1326,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, |
703 | err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); |
704 | if (err) |
705 | goto err_redirect; |
706 | + rcu_read_unlock(); |
707 | return NULL; |
708 | case XDP_TX: |
709 | xdp_xmit = true; |
710 | @@ -1358,7 +1359,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, |
711 | if (xdp_xmit) { |
712 | skb->dev = tun->dev; |
713 | generic_xdp_tx(skb, xdp_prog); |
714 | - rcu_read_lock(); |
715 | + rcu_read_unlock(); |
716 | return NULL; |
717 | } |
718 | |
719 | @@ -1734,8 +1735,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, |
720 | |
721 | tun_debug(KERN_INFO, tun, "tun_do_read\n"); |
722 | |
723 | - if (!iov_iter_count(to)) |
724 | + if (!iov_iter_count(to)) { |
725 | + if (skb) |
726 | + kfree_skb(skb); |
727 | return 0; |
728 | + } |
729 | |
730 | if (!skb) { |
731 | /* Read frames from ring */ |
732 | @@ -1851,22 +1855,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, |
733 | { |
734 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); |
735 | struct tun_struct *tun = __tun_get(tfile); |
736 | + struct sk_buff *skb = m->msg_control; |
737 | int ret; |
738 | |
739 | - if (!tun) |
740 | - return -EBADFD; |
741 | + if (!tun) { |
742 | + ret = -EBADFD; |
743 | + goto out_free_skb; |
744 | + } |
745 | |
746 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { |
747 | ret = -EINVAL; |
748 | - goto out; |
749 | + goto out_put_tun; |
750 | } |
751 | if (flags & MSG_ERRQUEUE) { |
752 | ret = sock_recv_errqueue(sock->sk, m, total_len, |
753 | SOL_PACKET, TUN_TX_TIMESTAMP); |
754 | goto out; |
755 | } |
756 | - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, |
757 | - m->msg_control); |
758 | + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); |
759 | if (ret > (ssize_t)total_len) { |
760 | m->msg_flags |= MSG_TRUNC; |
761 | ret = flags & MSG_TRUNC ? ret : total_len; |
762 | @@ -1874,6 +1880,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, |
763 | out: |
764 | tun_put(tun); |
765 | return ret; |
766 | + |
767 | +out_put_tun: |
768 | + tun_put(tun); |
769 | +out_free_skb: |
770 | + if (skb) |
771 | + kfree_skb(skb); |
772 | + return ret; |
773 | } |
774 | |
775 | static int tun_peek_len(struct socket *sock) |
776 | @@ -2144,6 +2157,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) |
777 | features |= NETIF_F_TSO6; |
778 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); |
779 | } |
780 | + |
781 | + arg &= ~TUN_F_UFO; |
782 | } |
783 | |
784 | /* This gives the user a way to test for new features in future by |
785 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
786 | index 8d4a6f7cba61..81394a4b2803 100644 |
787 | --- a/drivers/net/usb/qmi_wwan.c |
788 | +++ b/drivers/net/usb/qmi_wwan.c |
789 | @@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net) |
790 | net->hard_header_len = 0; |
791 | net->addr_len = 0; |
792 | net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
793 | + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); |
794 | netdev_dbg(net, "mode: raw IP\n"); |
795 | } else if (!net->header_ops) { /* don't bother if already set */ |
796 | ether_setup(net); |
797 | + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); |
798 | netdev_dbg(net, "mode: Ethernet\n"); |
799 | } |
800 | |
801 | @@ -1239,6 +1241,7 @@ static const struct usb_device_id products[] = { |
802 | {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ |
803 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ |
804 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ |
805 | + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ |
806 | |
807 | /* 4. Gobi 1000 devices */ |
808 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
809 | diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c |
810 | index 6510e5cc1817..42baad125a7d 100644 |
811 | --- a/drivers/net/usb/usbnet.c |
812 | +++ b/drivers/net/usb/usbnet.c |
813 | @@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) |
814 | return -ENOLINK; |
815 | } |
816 | |
817 | - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); |
818 | + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) |
819 | + skb = __netdev_alloc_skb(dev->net, size, flags); |
820 | + else |
821 | + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); |
822 | if (!skb) { |
823 | netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); |
824 | usbnet_defer_kevent (dev, EVENT_RX_MEMORY); |
825 | diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c |
826 | index 76d2bb793afe..3333d417b248 100644 |
827 | --- a/drivers/nvme/target/rdma.c |
828 | +++ b/drivers/nvme/target/rdma.c |
829 | @@ -1512,15 +1512,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { |
830 | |
831 | static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) |
832 | { |
833 | - struct nvmet_rdma_queue *queue; |
834 | + struct nvmet_rdma_queue *queue, *tmp; |
835 | |
836 | /* Device is being removed, delete all queues using this device */ |
837 | mutex_lock(&nvmet_rdma_queue_mutex); |
838 | - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { |
839 | + list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, |
840 | + queue_list) { |
841 | if (queue->dev->device != ib_device) |
842 | continue; |
843 | |
844 | pr_info("Removing queue %d\n", queue->idx); |
845 | + list_del_init(&queue->queue_list); |
846 | __nvmet_rdma_queue_disconnect(queue); |
847 | } |
848 | mutex_unlock(&nvmet_rdma_queue_mutex); |
849 | diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h |
850 | index 47a13c5723c6..5340efc673a9 100644 |
851 | --- a/drivers/s390/net/qeth_core.h |
852 | +++ b/drivers/s390/net/qeth_core.h |
853 | @@ -985,6 +985,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, |
854 | int qeth_set_features(struct net_device *, netdev_features_t); |
855 | int qeth_recover_features(struct net_device *); |
856 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); |
857 | +netdev_features_t qeth_features_check(struct sk_buff *skb, |
858 | + struct net_device *dev, |
859 | + netdev_features_t features); |
860 | int qeth_vm_request_mac(struct qeth_card *card); |
861 | int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); |
862 | |
863 | diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c |
864 | index bae7440abc01..330e5d3dadf3 100644 |
865 | --- a/drivers/s390/net/qeth_core_main.c |
866 | +++ b/drivers/s390/net/qeth_core_main.c |
867 | @@ -19,6 +19,11 @@ |
868 | #include <linux/mii.h> |
869 | #include <linux/kthread.h> |
870 | #include <linux/slab.h> |
871 | +#include <linux/if_vlan.h> |
872 | +#include <linux/netdevice.h> |
873 | +#include <linux/netdev_features.h> |
874 | +#include <linux/skbuff.h> |
875 | + |
876 | #include <net/iucv/af_iucv.h> |
877 | #include <net/dsfield.h> |
878 | |
879 | @@ -6505,6 +6510,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev, |
880 | } |
881 | EXPORT_SYMBOL_GPL(qeth_fix_features); |
882 | |
883 | +netdev_features_t qeth_features_check(struct sk_buff *skb, |
884 | + struct net_device *dev, |
885 | + netdev_features_t features) |
886 | +{ |
887 | + /* GSO segmentation builds skbs with |
888 | + * a (small) linear part for the headers, and |
889 | + * page frags for the data. |
890 | + * Compared to a linear skb, the header-only part consumes an |
891 | + * additional buffer element. This reduces buffer utilization, and |
892 | + * hurts throughput. So compress small segments into one element. |
893 | + */ |
894 | + if (netif_needs_gso(skb, features)) { |
895 | + /* match skb_segment(): */ |
896 | + unsigned int doffset = skb->data - skb_mac_header(skb); |
897 | + unsigned int hsize = skb_shinfo(skb)->gso_size; |
898 | + unsigned int hroom = skb_headroom(skb); |
899 | + |
900 | + /* linearize only if resulting skb allocations are order-0: */ |
901 | + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) |
902 | + features &= ~NETIF_F_SG; |
903 | + } |
904 | + |
905 | + return vlan_features_check(skb, features); |
906 | +} |
907 | +EXPORT_SYMBOL_GPL(qeth_features_check); |
908 | + |
909 | static int __init qeth_core_init(void) |
910 | { |
911 | int rc; |
912 | diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c |
913 | index 760b023eae95..5a973ebcb13c 100644 |
914 | --- a/drivers/s390/net/qeth_l2_main.c |
915 | +++ b/drivers/s390/net/qeth_l2_main.c |
916 | @@ -963,6 +963,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { |
917 | .ndo_stop = qeth_l2_stop, |
918 | .ndo_get_stats = qeth_get_stats, |
919 | .ndo_start_xmit = qeth_l2_hard_start_xmit, |
920 | + .ndo_features_check = qeth_features_check, |
921 | .ndo_validate_addr = eth_validate_addr, |
922 | .ndo_set_rx_mode = qeth_l2_set_rx_mode, |
923 | .ndo_do_ioctl = qeth_do_ioctl, |
924 | @@ -1009,6 +1010,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) |
925 | if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { |
926 | card->dev->hw_features = NETIF_F_SG; |
927 | card->dev->vlan_features = NETIF_F_SG; |
928 | + card->dev->features |= NETIF_F_SG; |
929 | /* OSA 3S and earlier has no RX/TX support */ |
930 | if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { |
931 | card->dev->hw_features |= NETIF_F_IP_CSUM; |
932 | @@ -1027,8 +1029,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) |
933 | |
934 | card->info.broadcast_capable = 1; |
935 | qeth_l2_request_initial_mac(card); |
936 | - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * |
937 | - PAGE_SIZE; |
938 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
939 | netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); |
940 | netif_carrier_off(card->dev); |
941 | diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c |
942 | index ab661a431f7c..27185ab38136 100644 |
943 | --- a/drivers/s390/net/qeth_l3_main.c |
944 | +++ b/drivers/s390/net/qeth_l3_main.c |
945 | @@ -1376,6 +1376,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) |
946 | |
947 | tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); |
948 | memcpy(tmp->mac, buf, sizeof(tmp->mac)); |
949 | + tmp->is_multicast = 1; |
950 | |
951 | ipm = qeth_l3_ip_from_hash(card, tmp); |
952 | if (ipm) { |
953 | @@ -1553,7 +1554,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, |
954 | |
955 | addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
956 | if (!addr) |
957 | - return; |
958 | + goto out; |
959 | |
960 | spin_lock_bh(&card->ip_lock); |
961 | |
962 | @@ -1567,6 +1568,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, |
963 | spin_unlock_bh(&card->ip_lock); |
964 | |
965 | kfree(addr); |
966 | +out: |
967 | in_dev_put(in_dev); |
968 | } |
969 | |
970 | @@ -1591,7 +1593,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, |
971 | |
972 | addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); |
973 | if (!addr) |
974 | - return; |
975 | + goto out; |
976 | |
977 | spin_lock_bh(&card->ip_lock); |
978 | |
979 | @@ -1606,6 +1608,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, |
980 | spin_unlock_bh(&card->ip_lock); |
981 | |
982 | kfree(addr); |
983 | +out: |
984 | in6_dev_put(in6_dev); |
985 | #endif /* CONFIG_QETH_IPV6 */ |
986 | } |
987 | @@ -2920,6 +2923,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { |
988 | .ndo_stop = qeth_l3_stop, |
989 | .ndo_get_stats = qeth_get_stats, |
990 | .ndo_start_xmit = qeth_l3_hard_start_xmit, |
991 | + .ndo_features_check = qeth_features_check, |
992 | .ndo_validate_addr = eth_validate_addr, |
993 | .ndo_set_rx_mode = qeth_l3_set_multicast_list, |
994 | .ndo_do_ioctl = qeth_do_ioctl, |
995 | @@ -2960,6 +2964,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) |
996 | card->dev->vlan_features = NETIF_F_SG | |
997 | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | |
998 | NETIF_F_TSO; |
999 | + card->dev->features |= NETIF_F_SG; |
1000 | } |
1001 | } |
1002 | } else if (card->info.type == QETH_CARD_TYPE_IQD) { |
1003 | @@ -2987,8 +2992,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) |
1004 | NETIF_F_HW_VLAN_CTAG_RX | |
1005 | NETIF_F_HW_VLAN_CTAG_FILTER; |
1006 | netif_keep_dst(card->dev); |
1007 | - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * |
1008 | - PAGE_SIZE; |
1009 | + netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * |
1010 | + PAGE_SIZE); |
1011 | |
1012 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
1013 | netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); |
1014 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
1015 | index 0202e5132fa7..876cdbec1307 100644 |
1016 | --- a/drivers/usb/gadget/function/f_fs.c |
1017 | +++ b/drivers/usb/gadget/function/f_fs.c |
1018 | @@ -1016,7 +1016,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) |
1019 | else |
1020 | ret = ep->status; |
1021 | goto error_mutex; |
1022 | - } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { |
1023 | + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { |
1024 | ret = -ENOMEM; |
1025 | } else { |
1026 | req->buf = data; |
1027 | diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c |
1028 | index 58585ec8699e..bd15309ac5f1 100644 |
1029 | --- a/drivers/vhost/net.c |
1030 | +++ b/drivers/vhost/net.c |
1031 | @@ -782,16 +782,6 @@ static void handle_rx(struct vhost_net *net) |
1032 | /* On error, stop handling until the next kick. */ |
1033 | if (unlikely(headcount < 0)) |
1034 | goto out; |
1035 | - if (nvq->rx_array) |
1036 | - msg.msg_control = vhost_net_buf_consume(&nvq->rxq); |
1037 | - /* On overrun, truncate and discard */ |
1038 | - if (unlikely(headcount > UIO_MAXIOV)) { |
1039 | - iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); |
1040 | - err = sock->ops->recvmsg(sock, &msg, |
1041 | - 1, MSG_DONTWAIT | MSG_TRUNC); |
1042 | - pr_debug("Discarded rx packet: len %zd\n", sock_len); |
1043 | - continue; |
1044 | - } |
1045 | /* OK, now we need to know about added descriptors. */ |
1046 | if (!headcount) { |
1047 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
1048 | @@ -804,6 +794,16 @@ static void handle_rx(struct vhost_net *net) |
1049 | * they refilled. */ |
1050 | goto out; |
1051 | } |
1052 | + if (nvq->rx_array) |
1053 | + msg.msg_control = vhost_net_buf_consume(&nvq->rxq); |
1054 | + /* On overrun, truncate and discard */ |
1055 | + if (unlikely(headcount > UIO_MAXIOV)) { |
1056 | + iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); |
1057 | + err = sock->ops->recvmsg(sock, &msg, |
1058 | + 1, MSG_DONTWAIT | MSG_TRUNC); |
1059 | + pr_debug("Discarded rx packet: len %zd\n", sock_len); |
1060 | + continue; |
1061 | + } |
1062 | /* We don't need to be notified again. */ |
1063 | iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); |
1064 | fixup = msg.msg_iter; |
1065 | diff --git a/fs/fcntl.c b/fs/fcntl.c |
1066 | index 6fd311367efc..0345a46b8856 100644 |
1067 | --- a/fs/fcntl.c |
1068 | +++ b/fs/fcntl.c |
1069 | @@ -563,6 +563,9 @@ static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __u |
1070 | { |
1071 | struct compat_flock64 fl; |
1072 | |
1073 | + BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start)); |
1074 | + BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len)); |
1075 | + |
1076 | memset(&fl, 0, sizeof(struct compat_flock64)); |
1077 | copy_flock_fields(&fl, kfl); |
1078 | if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) |
1079 | @@ -641,12 +644,8 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, |
1080 | if (err) |
1081 | break; |
1082 | err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock); |
1083 | - if (err) |
1084 | - break; |
1085 | - err = fixup_compat_flock(&flock); |
1086 | - if (err) |
1087 | - return err; |
1088 | - err = put_compat_flock64(&flock, compat_ptr(arg)); |
1089 | + if (!err) |
1090 | + err = put_compat_flock64(&flock, compat_ptr(arg)); |
1091 | break; |
1092 | case F_SETLK: |
1093 | case F_SETLKW: |
1094 | diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h |
1095 | index dc8b4896b77b..b1b0ca7ccb2b 100644 |
1096 | --- a/include/linux/netdev_features.h |
1097 | +++ b/include/linux/netdev_features.h |
1098 | @@ -54,8 +54,9 @@ enum { |
1099 | NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ |
1100 | NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ |
1101 | NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ |
1102 | + NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ |
1103 | /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ |
1104 | - NETIF_F_GSO_ESP_BIT, |
1105 | + NETIF_F_GSO_UDP_BIT, |
1106 | |
1107 | NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ |
1108 | NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ |
1109 | @@ -132,6 +133,7 @@ enum { |
1110 | #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) |
1111 | #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) |
1112 | #define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) |
1113 | +#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) |
1114 | #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) |
1115 | #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) |
1116 | #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) |
1117 | diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
1118 | index 2eaac7d75af4..46bf7cc7d5d5 100644 |
1119 | --- a/include/linux/netdevice.h |
1120 | +++ b/include/linux/netdevice.h |
1121 | @@ -4101,6 +4101,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
1122 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
1123 | BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); |
1124 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
1125 | + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
1126 | |
1127 | return (features & feature) == feature; |
1128 | } |
1129 | diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h |
1130 | index a328e8181e49..e4b257ff881b 100644 |
1131 | --- a/include/linux/rculist_nulls.h |
1132 | +++ b/include/linux/rculist_nulls.h |
1133 | @@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, |
1134 | first->pprev = &n->next; |
1135 | } |
1136 | |
1137 | -/** |
1138 | - * hlist_nulls_add_tail_rcu |
1139 | - * @n: the element to add to the hash list. |
1140 | - * @h: the list to add to. |
1141 | - * |
1142 | - * Description: |
1143 | - * Adds the specified element to the end of the specified hlist_nulls, |
1144 | - * while permitting racing traversals. NOTE: tail insertion requires |
1145 | - * list traversal. |
1146 | - * |
1147 | - * The caller must take whatever precautions are necessary |
1148 | - * (such as holding appropriate locks) to avoid racing |
1149 | - * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() |
1150 | - * or hlist_nulls_del_rcu(), running on this same list. |
1151 | - * However, it is perfectly legal to run concurrently with |
1152 | - * the _rcu list-traversal primitives, such as |
1153 | - * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency |
1154 | - * problems on Alpha CPUs. Regardless of the type of CPU, the |
1155 | - * list-traversal primitive must be guarded by rcu_read_lock(). |
1156 | - */ |
1157 | -static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, |
1158 | - struct hlist_nulls_head *h) |
1159 | -{ |
1160 | - struct hlist_nulls_node *i, *last = NULL; |
1161 | - |
1162 | - for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); |
1163 | - i = hlist_nulls_next_rcu(i)) |
1164 | - last = i; |
1165 | - |
1166 | - if (last) { |
1167 | - n->next = last->next; |
1168 | - n->pprev = &last->next; |
1169 | - rcu_assign_pointer(hlist_nulls_next_rcu(last), n); |
1170 | - } else { |
1171 | - hlist_nulls_add_head_rcu(n, h); |
1172 | - } |
1173 | -} |
1174 | - |
1175 | /** |
1176 | * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type |
1177 | * @tpos: the type * to use as a loop cursor. |
1178 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
1179 | index d448a4804aea..051e0939ec19 100644 |
1180 | --- a/include/linux/skbuff.h |
1181 | +++ b/include/linux/skbuff.h |
1182 | @@ -569,6 +569,8 @@ enum { |
1183 | SKB_GSO_SCTP = 1 << 14, |
1184 | |
1185 | SKB_GSO_ESP = 1 << 15, |
1186 | + |
1187 | + SKB_GSO_UDP = 1 << 16, |
1188 | }; |
1189 | |
1190 | #if BITS_PER_LONG > 32 |
1191 | diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h |
1192 | index 97116379db5f..e87a805cbfef 100644 |
1193 | --- a/include/linux/usb/usbnet.h |
1194 | +++ b/include/linux/usb/usbnet.h |
1195 | @@ -81,6 +81,7 @@ struct usbnet { |
1196 | # define EVENT_RX_KILL 10 |
1197 | # define EVENT_LINK_CHANGE 11 |
1198 | # define EVENT_SET_RX_MODE 12 |
1199 | +# define EVENT_NO_IP_ALIGN 13 |
1200 | }; |
1201 | |
1202 | static inline struct usb_driver *driver_of(struct usb_interface *intf) |
1203 | diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h |
1204 | index 210034c896e3..f144216febc6 100644 |
1205 | --- a/include/linux/virtio_net.h |
1206 | +++ b/include/linux/virtio_net.h |
1207 | @@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, |
1208 | const struct virtio_net_hdr *hdr, |
1209 | bool little_endian) |
1210 | { |
1211 | - unsigned short gso_type = 0; |
1212 | + unsigned int gso_type = 0; |
1213 | |
1214 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
1215 | switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
1216 | @@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, |
1217 | case VIRTIO_NET_HDR_GSO_TCPV6: |
1218 | gso_type = SKB_GSO_TCPV6; |
1219 | break; |
1220 | + case VIRTIO_NET_HDR_GSO_UDP: |
1221 | + gso_type = SKB_GSO_UDP; |
1222 | + break; |
1223 | default: |
1224 | return -EINVAL; |
1225 | } |
1226 | diff --git a/include/net/ipv6.h b/include/net/ipv6.h |
1227 | index 6eac5cf8f1e6..35e9dd2d18ba 100644 |
1228 | --- a/include/net/ipv6.h |
1229 | +++ b/include/net/ipv6.h |
1230 | @@ -727,7 +727,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add |
1231 | __be32 ipv6_select_ident(struct net *net, |
1232 | const struct in6_addr *daddr, |
1233 | const struct in6_addr *saddr); |
1234 | -void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); |
1235 | +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); |
1236 | |
1237 | int ip6_dst_hoplimit(struct dst_entry *dst); |
1238 | |
1239 | diff --git a/include/net/sock.h b/include/net/sock.h |
1240 | index a6b9a8d1a6df..006580155a87 100644 |
1241 | --- a/include/net/sock.h |
1242 | +++ b/include/net/sock.h |
1243 | @@ -683,11 +683,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) |
1244 | |
1245 | static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
1246 | { |
1247 | - if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && |
1248 | - sk->sk_family == AF_INET6) |
1249 | - hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); |
1250 | - else |
1251 | - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
1252 | + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); |
1253 | } |
1254 | |
1255 | static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) |
1256 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
1257 | index e6d0002a1b0b..6ced69940f5c 100644 |
1258 | --- a/include/net/tcp.h |
1259 | +++ b/include/net/tcp.h |
1260 | @@ -563,7 +563,7 @@ void tcp_push_one(struct sock *, unsigned int mss_now); |
1261 | void tcp_send_ack(struct sock *sk); |
1262 | void tcp_send_delayed_ack(struct sock *sk); |
1263 | void tcp_send_loss_probe(struct sock *sk); |
1264 | -bool tcp_schedule_loss_probe(struct sock *sk); |
1265 | +bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); |
1266 | void tcp_skb_collapse_tstamp(struct sk_buff *skb, |
1267 | const struct sk_buff *next_skb); |
1268 | |
1269 | @@ -874,12 +874,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) |
1270 | } |
1271 | #endif |
1272 | |
1273 | -/* TCP_SKB_CB reference means this can not be used from early demux */ |
1274 | static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) |
1275 | { |
1276 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
1277 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
1278 | - skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) |
1279 | + skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) |
1280 | return true; |
1281 | #endif |
1282 | return false; |
1283 | diff --git a/kernel/audit.c b/kernel/audit.c |
1284 | index be1c28fd4d57..5b34d3114af4 100644 |
1285 | --- a/kernel/audit.c |
1286 | +++ b/kernel/audit.c |
1287 | @@ -85,13 +85,13 @@ static int audit_initialized; |
1288 | #define AUDIT_OFF 0 |
1289 | #define AUDIT_ON 1 |
1290 | #define AUDIT_LOCKED 2 |
1291 | -u32 audit_enabled; |
1292 | -u32 audit_ever_enabled; |
1293 | +u32 audit_enabled = AUDIT_OFF; |
1294 | +u32 audit_ever_enabled = !!AUDIT_OFF; |
1295 | |
1296 | EXPORT_SYMBOL_GPL(audit_enabled); |
1297 | |
1298 | /* Default state when kernel boots without any parameters. */ |
1299 | -static u32 audit_default; |
1300 | +static u32 audit_default = AUDIT_OFF; |
1301 | |
1302 | /* If auditing cannot proceed, audit_failure selects what happens. */ |
1303 | static u32 audit_failure = AUDIT_FAIL_PRINTK; |
1304 | @@ -1197,25 +1197,28 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
1305 | pid_t auditd_pid; |
1306 | struct pid *req_pid = task_tgid(current); |
1307 | |
1308 | - /* sanity check - PID values must match */ |
1309 | - if (new_pid != pid_vnr(req_pid)) |
1310 | + /* Sanity check - PID values must match. Setting |
1311 | + * pid to 0 is how auditd ends auditing. */ |
1312 | + if (new_pid && (new_pid != pid_vnr(req_pid))) |
1313 | return -EINVAL; |
1314 | |
1315 | /* test the auditd connection */ |
1316 | audit_replace(req_pid); |
1317 | |
1318 | auditd_pid = auditd_pid_vnr(); |
1319 | - /* only the current auditd can unregister itself */ |
1320 | - if ((!new_pid) && (new_pid != auditd_pid)) { |
1321 | - audit_log_config_change("audit_pid", new_pid, |
1322 | - auditd_pid, 0); |
1323 | - return -EACCES; |
1324 | - } |
1325 | - /* replacing a healthy auditd is not allowed */ |
1326 | - if (auditd_pid && new_pid) { |
1327 | - audit_log_config_change("audit_pid", new_pid, |
1328 | - auditd_pid, 0); |
1329 | - return -EEXIST; |
1330 | + if (auditd_pid) { |
1331 | + /* replacing a healthy auditd is not allowed */ |
1332 | + if (new_pid) { |
1333 | + audit_log_config_change("audit_pid", |
1334 | + new_pid, auditd_pid, 0); |
1335 | + return -EEXIST; |
1336 | + } |
1337 | + /* only current auditd can unregister itself */ |
1338 | + if (pid_vnr(req_pid) != auditd_pid) { |
1339 | + audit_log_config_change("audit_pid", |
1340 | + new_pid, auditd_pid, 0); |
1341 | + return -EACCES; |
1342 | + } |
1343 | } |
1344 | |
1345 | if (new_pid) { |
1346 | @@ -1549,8 +1552,6 @@ static int __init audit_init(void) |
1347 | register_pernet_subsys(&audit_net_ops); |
1348 | |
1349 | audit_initialized = AUDIT_INITIALIZED; |
1350 | - audit_enabled = audit_default; |
1351 | - audit_ever_enabled |= !!audit_default; |
1352 | |
1353 | kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); |
1354 | if (IS_ERR(kauditd_task)) { |
1355 | @@ -1572,6 +1573,8 @@ static int __init audit_enable(char *str) |
1356 | audit_default = !!simple_strtol(str, NULL, 0); |
1357 | if (!audit_default) |
1358 | audit_initialized = AUDIT_DISABLED; |
1359 | + audit_enabled = audit_default; |
1360 | + audit_ever_enabled = !!audit_enabled; |
1361 | |
1362 | pr_info("%s\n", audit_default ? |
1363 | "enabled (after initialization)" : "disabled (until reboot)"); |
1364 | diff --git a/net/core/dev.c b/net/core/dev.c |
1365 | index 11596a302a26..27357fc1730b 100644 |
1366 | --- a/net/core/dev.c |
1367 | +++ b/net/core/dev.c |
1368 | @@ -2735,7 +2735,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment); |
1369 | static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) |
1370 | { |
1371 | if (tx_path) |
1372 | - return skb->ip_summed != CHECKSUM_PARTIAL; |
1373 | + return skb->ip_summed != CHECKSUM_PARTIAL && |
1374 | + skb->ip_summed != CHECKSUM_UNNECESSARY; |
1375 | |
1376 | return skb->ip_summed == CHECKSUM_NONE; |
1377 | } |
1378 | diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c |
1379 | index abd07a443219..178bb9833311 100644 |
1380 | --- a/net/dccp/minisocks.c |
1381 | +++ b/net/dccp/minisocks.c |
1382 | @@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) |
1383 | if (state == DCCP_TIME_WAIT) |
1384 | timeo = DCCP_TIMEWAIT_LEN; |
1385 | |
1386 | + /* tw_timer is pinned, so we need to make sure BH are disabled |
1387 | + * in following section, otherwise timer handler could run before |
1388 | + * we complete the initialization. |
1389 | + */ |
1390 | + local_bh_disable(); |
1391 | inet_twsk_schedule(tw, timeo); |
1392 | /* Linkage updates. */ |
1393 | __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); |
1394 | inet_twsk_put(tw); |
1395 | + local_bh_enable(); |
1396 | } else { |
1397 | /* Sorry, if we're out of memory, just CLOSE this |
1398 | * socket up. We've got bigger problems than |
1399 | diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
1400 | index e31108e5ef79..b9d9a2b8792c 100644 |
1401 | --- a/net/ipv4/af_inet.c |
1402 | +++ b/net/ipv4/af_inet.c |
1403 | @@ -1221,9 +1221,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); |
1404 | struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
1405 | netdev_features_t features) |
1406 | { |
1407 | - bool fixedid = false, gso_partial, encap; |
1408 | + bool udpfrag = false, fixedid = false, gso_partial, encap; |
1409 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
1410 | const struct net_offload *ops; |
1411 | + unsigned int offset = 0; |
1412 | struct iphdr *iph; |
1413 | int proto, tot_len; |
1414 | int nhoff; |
1415 | @@ -1258,6 +1259,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
1416 | segs = ERR_PTR(-EPROTONOSUPPORT); |
1417 | |
1418 | if (!skb->encapsulation || encap) { |
1419 | + udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); |
1420 | fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); |
1421 | |
1422 | /* fixed ID is invalid if DF bit is not set */ |
1423 | @@ -1277,7 +1279,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
1424 | skb = segs; |
1425 | do { |
1426 | iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); |
1427 | - if (skb_is_gso(skb)) { |
1428 | + if (udpfrag) { |
1429 | + iph->frag_off = htons(offset >> 3); |
1430 | + if (skb->next) |
1431 | + iph->frag_off |= htons(IP_MF); |
1432 | + offset += skb->len - nhoff - ihl; |
1433 | + tot_len = skb->len - nhoff; |
1434 | + } else if (skb_is_gso(skb)) { |
1435 | if (!fixedid) { |
1436 | iph->id = htons(id); |
1437 | id += skb_shinfo(skb)->gso_segs; |
1438 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1439 | index b6bb3cdfad09..c5447b9f8517 100644 |
1440 | --- a/net/ipv4/tcp_input.c |
1441 | +++ b/net/ipv4/tcp_input.c |
1442 | @@ -592,6 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk) |
1443 | int time; |
1444 | int copied; |
1445 | |
1446 | + tcp_mstamp_refresh(tp); |
1447 | time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); |
1448 | if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) |
1449 | return; |
1450 | @@ -3020,7 +3021,7 @@ void tcp_rearm_rto(struct sock *sk) |
1451 | /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ |
1452 | static void tcp_set_xmit_timer(struct sock *sk) |
1453 | { |
1454 | - if (!tcp_schedule_loss_probe(sk)) |
1455 | + if (!tcp_schedule_loss_probe(sk, true)) |
1456 | tcp_rearm_rto(sk); |
1457 | } |
1458 | |
1459 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1460 | index 5b027c69cbc5..5a5ed4f14678 100644 |
1461 | --- a/net/ipv4/tcp_ipv4.c |
1462 | +++ b/net/ipv4/tcp_ipv4.c |
1463 | @@ -1587,6 +1587,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb) |
1464 | } |
1465 | EXPORT_SYMBOL(tcp_filter); |
1466 | |
1467 | +static void tcp_v4_restore_cb(struct sk_buff *skb) |
1468 | +{ |
1469 | + memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4, |
1470 | + sizeof(struct inet_skb_parm)); |
1471 | +} |
1472 | + |
1473 | +static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, |
1474 | + const struct tcphdr *th) |
1475 | +{ |
1476 | + /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() |
1477 | + * barrier() makes sure compiler wont play fool^Waliasing games. |
1478 | + */ |
1479 | + memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), |
1480 | + sizeof(struct inet_skb_parm)); |
1481 | + barrier(); |
1482 | + |
1483 | + TCP_SKB_CB(skb)->seq = ntohl(th->seq); |
1484 | + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + |
1485 | + skb->len - th->doff * 4); |
1486 | + TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); |
1487 | + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); |
1488 | + TCP_SKB_CB(skb)->tcp_tw_isn = 0; |
1489 | + TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); |
1490 | + TCP_SKB_CB(skb)->sacked = 0; |
1491 | + TCP_SKB_CB(skb)->has_rxtstamp = |
1492 | + skb->tstamp || skb_hwtstamps(skb)->hwtstamp; |
1493 | +} |
1494 | + |
1495 | /* |
1496 | * From tcp_input.c |
1497 | */ |
1498 | @@ -1627,24 +1655,6 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1499 | |
1500 | th = (const struct tcphdr *)skb->data; |
1501 | iph = ip_hdr(skb); |
1502 | - /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() |
1503 | - * barrier() makes sure compiler wont play fool^Waliasing games. |
1504 | - */ |
1505 | - memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), |
1506 | - sizeof(struct inet_skb_parm)); |
1507 | - barrier(); |
1508 | - |
1509 | - TCP_SKB_CB(skb)->seq = ntohl(th->seq); |
1510 | - TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + |
1511 | - skb->len - th->doff * 4); |
1512 | - TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); |
1513 | - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); |
1514 | - TCP_SKB_CB(skb)->tcp_tw_isn = 0; |
1515 | - TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); |
1516 | - TCP_SKB_CB(skb)->sacked = 0; |
1517 | - TCP_SKB_CB(skb)->has_rxtstamp = |
1518 | - skb->tstamp || skb_hwtstamps(skb)->hwtstamp; |
1519 | - |
1520 | lookup: |
1521 | sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, |
1522 | th->dest, sdif, &refcounted); |
1523 | @@ -1675,14 +1685,19 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1524 | sock_hold(sk); |
1525 | refcounted = true; |
1526 | nsk = NULL; |
1527 | - if (!tcp_filter(sk, skb)) |
1528 | + if (!tcp_filter(sk, skb)) { |
1529 | + th = (const struct tcphdr *)skb->data; |
1530 | + iph = ip_hdr(skb); |
1531 | + tcp_v4_fill_cb(skb, iph, th); |
1532 | nsk = tcp_check_req(sk, skb, req, false); |
1533 | + } |
1534 | if (!nsk) { |
1535 | reqsk_put(req); |
1536 | goto discard_and_relse; |
1537 | } |
1538 | if (nsk == sk) { |
1539 | reqsk_put(req); |
1540 | + tcp_v4_restore_cb(skb); |
1541 | } else if (tcp_child_process(sk, nsk, skb)) { |
1542 | tcp_v4_send_reset(nsk, skb); |
1543 | goto discard_and_relse; |
1544 | @@ -1708,6 +1723,7 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1545 | goto discard_and_relse; |
1546 | th = (const struct tcphdr *)skb->data; |
1547 | iph = ip_hdr(skb); |
1548 | + tcp_v4_fill_cb(skb, iph, th); |
1549 | |
1550 | skb->dev = NULL; |
1551 | |
1552 | @@ -1738,6 +1754,8 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1553 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
1554 | goto discard_it; |
1555 | |
1556 | + tcp_v4_fill_cb(skb, iph, th); |
1557 | + |
1558 | if (tcp_checksum_complete(skb)) { |
1559 | csum_error: |
1560 | __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); |
1561 | @@ -1764,6 +1782,8 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1562 | goto discard_it; |
1563 | } |
1564 | |
1565 | + tcp_v4_fill_cb(skb, iph, th); |
1566 | + |
1567 | if (tcp_checksum_complete(skb)) { |
1568 | inet_twsk_put(inet_twsk(sk)); |
1569 | goto csum_error; |
1570 | @@ -1780,6 +1800,7 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1571 | if (sk2) { |
1572 | inet_twsk_deschedule_put(inet_twsk(sk)); |
1573 | sk = sk2; |
1574 | + tcp_v4_restore_cb(skb); |
1575 | refcounted = false; |
1576 | goto process; |
1577 | } |
1578 | diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
1579 | index 188a6f31356d..420fecbb98fe 100644 |
1580 | --- a/net/ipv4/tcp_minisocks.c |
1581 | +++ b/net/ipv4/tcp_minisocks.c |
1582 | @@ -312,10 +312,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) |
1583 | if (state == TCP_TIME_WAIT) |
1584 | timeo = TCP_TIMEWAIT_LEN; |
1585 | |
1586 | + /* tw_timer is pinned, so we need to make sure BH are disabled |
1587 | + * in following section, otherwise timer handler could run before |
1588 | + * we complete the initialization. |
1589 | + */ |
1590 | + local_bh_disable(); |
1591 | inet_twsk_schedule(tw, timeo); |
1592 | /* Linkage updates. */ |
1593 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
1594 | inet_twsk_put(tw); |
1595 | + local_bh_enable(); |
1596 | } else { |
1597 | /* Sorry, if we're out of memory, just CLOSE this |
1598 | * socket up. We've got bigger problems than |
1599 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1600 | index 478909f4694d..cd3d60bb7cc8 100644 |
1601 | --- a/net/ipv4/tcp_output.c |
1602 | +++ b/net/ipv4/tcp_output.c |
1603 | @@ -2337,7 +2337,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
1604 | |
1605 | /* Send one loss probe per tail loss episode. */ |
1606 | if (push_one != 2) |
1607 | - tcp_schedule_loss_probe(sk); |
1608 | + tcp_schedule_loss_probe(sk, false); |
1609 | is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); |
1610 | tcp_cwnd_validate(sk, is_cwnd_limited); |
1611 | return false; |
1612 | @@ -2345,7 +2345,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
1613 | return !tp->packets_out && tcp_send_head(sk); |
1614 | } |
1615 | |
1616 | -bool tcp_schedule_loss_probe(struct sock *sk) |
1617 | +bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) |
1618 | { |
1619 | struct inet_connection_sock *icsk = inet_csk(sk); |
1620 | struct tcp_sock *tp = tcp_sk(sk); |
1621 | @@ -2384,7 +2384,9 @@ bool tcp_schedule_loss_probe(struct sock *sk) |
1622 | } |
1623 | |
1624 | /* If the RTO formula yields an earlier time, then use that time. */ |
1625 | - rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ |
1626 | + rto_delta_us = advancing_rto ? |
1627 | + jiffies_to_usecs(inet_csk(sk)->icsk_rto) : |
1628 | + tcp_rto_delta_us(sk); /* How far in future is RTO? */ |
1629 | if (rto_delta_us > 0) |
1630 | timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); |
1631 | |
1632 | diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c |
1633 | index e360d55be555..01801b77bd0d 100644 |
1634 | --- a/net/ipv4/udp_offload.c |
1635 | +++ b/net/ipv4/udp_offload.c |
1636 | @@ -187,16 +187,57 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, |
1637 | } |
1638 | EXPORT_SYMBOL(skb_udp_tunnel_segment); |
1639 | |
1640 | -static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb, |
1641 | - netdev_features_t features) |
1642 | +static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
1643 | + netdev_features_t features) |
1644 | { |
1645 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
1646 | + unsigned int mss; |
1647 | + __wsum csum; |
1648 | + struct udphdr *uh; |
1649 | + struct iphdr *iph; |
1650 | |
1651 | if (skb->encapsulation && |
1652 | (skb_shinfo(skb)->gso_type & |
1653 | - (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) |
1654 | + (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { |
1655 | segs = skb_udp_tunnel_segment(skb, features, false); |
1656 | + goto out; |
1657 | + } |
1658 | + |
1659 | + if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
1660 | + goto out; |
1661 | + |
1662 | + mss = skb_shinfo(skb)->gso_size; |
1663 | + if (unlikely(skb->len <= mss)) |
1664 | + goto out; |
1665 | + |
1666 | + /* Do software UFO. Complete and fill in the UDP checksum as |
1667 | + * HW cannot do checksum of UDP packets sent as multiple |
1668 | + * IP fragments. |
1669 | + */ |
1670 | |
1671 | + uh = udp_hdr(skb); |
1672 | + iph = ip_hdr(skb); |
1673 | + |
1674 | + uh->check = 0; |
1675 | + csum = skb_checksum(skb, 0, skb->len, 0); |
1676 | + uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); |
1677 | + if (uh->check == 0) |
1678 | + uh->check = CSUM_MANGLED_0; |
1679 | + |
1680 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
1681 | + |
1682 | + /* If there is no outer header we can fake a checksum offload |
1683 | + * due to the fact that we have already done the checksum in |
1684 | + * software prior to segmenting the frame. |
1685 | + */ |
1686 | + if (!skb->encap_hdr_csum) |
1687 | + features |= NETIF_F_HW_CSUM; |
1688 | + |
1689 | + /* Fragment the skb. IP headers of the fragments are updated in |
1690 | + * inet_gso_segment() |
1691 | + */ |
1692 | + segs = skb_segment(skb, features); |
1693 | +out: |
1694 | return segs; |
1695 | } |
1696 | |
1697 | @@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) |
1698 | |
1699 | static const struct net_offload udpv4_offload = { |
1700 | .callbacks = { |
1701 | - .gso_segment = udp4_tunnel_segment, |
1702 | + .gso_segment = udp4_ufo_fragment, |
1703 | .gro_receive = udp4_gro_receive, |
1704 | .gro_complete = udp4_gro_complete, |
1705 | }, |
1706 | diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c |
1707 | index a338bbc33cf3..4fe7c90962dd 100644 |
1708 | --- a/net/ipv6/output_core.c |
1709 | +++ b/net/ipv6/output_core.c |
1710 | @@ -39,7 +39,7 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, |
1711 | * |
1712 | * The network header must be set before calling this. |
1713 | */ |
1714 | -void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) |
1715 | +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) |
1716 | { |
1717 | static u32 ip6_proxy_idents_hashrnd __read_mostly; |
1718 | struct in6_addr buf[2]; |
1719 | @@ -51,14 +51,14 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) |
1720 | offsetof(struct ipv6hdr, saddr), |
1721 | sizeof(buf), buf); |
1722 | if (!addrs) |
1723 | - return; |
1724 | + return 0; |
1725 | |
1726 | net_get_random_once(&ip6_proxy_idents_hashrnd, |
1727 | sizeof(ip6_proxy_idents_hashrnd)); |
1728 | |
1729 | id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd, |
1730 | &addrs[1], &addrs[0]); |
1731 | - skb_shinfo(skb)->ip6_frag_id = htonl(id); |
1732 | + return htonl(id); |
1733 | } |
1734 | EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); |
1735 | |
1736 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
1737 | index a96d5b385d8f..598efa8cfe25 100644 |
1738 | --- a/net/ipv6/route.c |
1739 | +++ b/net/ipv6/route.c |
1740 | @@ -960,7 +960,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) |
1741 | { |
1742 | struct net_device *dev = rt->dst.dev; |
1743 | |
1744 | - if (rt->rt6i_flags & RTF_LOCAL) { |
1745 | + if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { |
1746 | /* for copies of local routes, dst->dev needs to be the |
1747 | * device if it is a master device, the master device if |
1748 | * device is enslaved, and the loopback as the default |
1749 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
1750 | index ac912bb21747..e79854cc5790 100644 |
1751 | --- a/net/ipv6/sit.c |
1752 | +++ b/net/ipv6/sit.c |
1753 | @@ -1087,6 +1087,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, |
1754 | ipip6_tunnel_link(sitn, t); |
1755 | t->parms.iph.ttl = p->iph.ttl; |
1756 | t->parms.iph.tos = p->iph.tos; |
1757 | + t->parms.iph.frag_off = p->iph.frag_off; |
1758 | if (t->parms.link != p->link || t->fwmark != fwmark) { |
1759 | t->parms.link = p->link; |
1760 | t->fwmark = fwmark; |
1761 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1762 | index 64d94afa427f..32ded300633d 100644 |
1763 | --- a/net/ipv6/tcp_ipv6.c |
1764 | +++ b/net/ipv6/tcp_ipv6.c |
1765 | @@ -1448,7 +1448,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1766 | struct sock *nsk; |
1767 | |
1768 | sk = req->rsk_listener; |
1769 | - tcp_v6_fill_cb(skb, hdr, th); |
1770 | if (tcp_v6_inbound_md5_hash(sk, skb)) { |
1771 | sk_drops_add(sk, skb); |
1772 | reqsk_put(req); |
1773 | @@ -1461,8 +1460,12 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1774 | sock_hold(sk); |
1775 | refcounted = true; |
1776 | nsk = NULL; |
1777 | - if (!tcp_filter(sk, skb)) |
1778 | + if (!tcp_filter(sk, skb)) { |
1779 | + th = (const struct tcphdr *)skb->data; |
1780 | + hdr = ipv6_hdr(skb); |
1781 | + tcp_v6_fill_cb(skb, hdr, th); |
1782 | nsk = tcp_check_req(sk, skb, req, false); |
1783 | + } |
1784 | if (!nsk) { |
1785 | reqsk_put(req); |
1786 | goto discard_and_relse; |
1787 | @@ -1486,8 +1489,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1788 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1789 | goto discard_and_relse; |
1790 | |
1791 | - tcp_v6_fill_cb(skb, hdr, th); |
1792 | - |
1793 | if (tcp_v6_inbound_md5_hash(sk, skb)) |
1794 | goto discard_and_relse; |
1795 | |
1796 | @@ -1495,6 +1496,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1797 | goto discard_and_relse; |
1798 | th = (const struct tcphdr *)skb->data; |
1799 | hdr = ipv6_hdr(skb); |
1800 | + tcp_v6_fill_cb(skb, hdr, th); |
1801 | |
1802 | skb->dev = NULL; |
1803 | |
1804 | @@ -1583,7 +1585,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1805 | tcp_v6_timewait_ack(sk, skb); |
1806 | break; |
1807 | case TCP_TW_RST: |
1808 | - tcp_v6_restore_cb(skb); |
1809 | tcp_v6_send_reset(sk, skb); |
1810 | inet_twsk_deschedule_put(inet_twsk(sk)); |
1811 | goto discard_it; |
1812 | diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c |
1813 | index 455fd4e39333..a0f89ad76f9d 100644 |
1814 | --- a/net/ipv6/udp_offload.c |
1815 | +++ b/net/ipv6/udp_offload.c |
1816 | @@ -17,15 +17,94 @@ |
1817 | #include <net/ip6_checksum.h> |
1818 | #include "ip6_offload.h" |
1819 | |
1820 | -static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb, |
1821 | - netdev_features_t features) |
1822 | +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, |
1823 | + netdev_features_t features) |
1824 | { |
1825 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
1826 | + unsigned int mss; |
1827 | + unsigned int unfrag_ip6hlen, unfrag_len; |
1828 | + struct frag_hdr *fptr; |
1829 | + u8 *packet_start, *prevhdr; |
1830 | + u8 nexthdr; |
1831 | + u8 frag_hdr_sz = sizeof(struct frag_hdr); |
1832 | + __wsum csum; |
1833 | + int tnl_hlen; |
1834 | + int err; |
1835 | + |
1836 | + mss = skb_shinfo(skb)->gso_size; |
1837 | + if (unlikely(skb->len <= mss)) |
1838 | + goto out; |
1839 | |
1840 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & |
1841 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) |
1842 | segs = skb_udp_tunnel_segment(skb, features, true); |
1843 | + else { |
1844 | + const struct ipv6hdr *ipv6h; |
1845 | + struct udphdr *uh; |
1846 | + |
1847 | + if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
1848 | + goto out; |
1849 | + |
1850 | + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot |
1851 | + * do checksum of UDP packets sent as multiple IP fragments. |
1852 | + */ |
1853 | + |
1854 | + uh = udp_hdr(skb); |
1855 | + ipv6h = ipv6_hdr(skb); |
1856 | + |
1857 | + uh->check = 0; |
1858 | + csum = skb_checksum(skb, 0, skb->len, 0); |
1859 | + uh->check = udp_v6_check(skb->len, &ipv6h->saddr, |
1860 | + &ipv6h->daddr, csum); |
1861 | + if (uh->check == 0) |
1862 | + uh->check = CSUM_MANGLED_0; |
1863 | + |
1864 | + skb->ip_summed = CHECKSUM_UNNECESSARY; |
1865 | + |
1866 | + /* If there is no outer header we can fake a checksum offload |
1867 | + * due to the fact that we have already done the checksum in |
1868 | + * software prior to segmenting the frame. |
1869 | + */ |
1870 | + if (!skb->encap_hdr_csum) |
1871 | + features |= NETIF_F_HW_CSUM; |
1872 | + |
1873 | + /* Check if there is enough headroom to insert fragment header. */ |
1874 | + tnl_hlen = skb_tnl_header_len(skb); |
1875 | + if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { |
1876 | + if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) |
1877 | + goto out; |
1878 | + } |
1879 | + |
1880 | + /* Find the unfragmentable header and shift it left by frag_hdr_sz |
1881 | + * bytes to insert fragment header. |
1882 | + */ |
1883 | + err = ip6_find_1stfragopt(skb, &prevhdr); |
1884 | + if (err < 0) |
1885 | + return ERR_PTR(err); |
1886 | + unfrag_ip6hlen = err; |
1887 | + nexthdr = *prevhdr; |
1888 | + *prevhdr = NEXTHDR_FRAGMENT; |
1889 | + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + |
1890 | + unfrag_ip6hlen + tnl_hlen; |
1891 | + packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; |
1892 | + memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); |
1893 | + |
1894 | + SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; |
1895 | + skb->mac_header -= frag_hdr_sz; |
1896 | + skb->network_header -= frag_hdr_sz; |
1897 | + |
1898 | + fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
1899 | + fptr->nexthdr = nexthdr; |
1900 | + fptr->reserved = 0; |
1901 | + fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb); |
1902 | + |
1903 | + /* Fragment the skb. ipv6 header and the remaining fields of the |
1904 | + * fragment header are updated in ipv6_gso_segment() |
1905 | + */ |
1906 | + segs = skb_segment(skb, features); |
1907 | + } |
1908 | |
1909 | +out: |
1910 | return segs; |
1911 | } |
1912 | |
1913 | @@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) |
1914 | |
1915 | static const struct net_offload udpv6_offload = { |
1916 | .callbacks = { |
1917 | - .gso_segment = udp6_tunnel_segment, |
1918 | + .gso_segment = udp6_ufo_fragment, |
1919 | .gro_receive = udp6_gro_receive, |
1920 | .gro_complete = udp6_gro_complete, |
1921 | }, |
1922 | diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c |
1923 | index af4e76ac88ff..c5fa634e63ca 100644 |
1924 | --- a/net/kcm/kcmsock.c |
1925 | +++ b/net/kcm/kcmsock.c |
1926 | @@ -1625,60 +1625,35 @@ static struct proto kcm_proto = { |
1927 | }; |
1928 | |
1929 | /* Clone a kcm socket. */ |
1930 | -static int kcm_clone(struct socket *osock, struct kcm_clone *info, |
1931 | - struct socket **newsockp) |
1932 | +static struct file *kcm_clone(struct socket *osock) |
1933 | { |
1934 | struct socket *newsock; |
1935 | struct sock *newsk; |
1936 | - struct file *newfile; |
1937 | - int err, newfd; |
1938 | + struct file *file; |
1939 | |
1940 | - err = -ENFILE; |
1941 | newsock = sock_alloc(); |
1942 | if (!newsock) |
1943 | - goto out; |
1944 | + return ERR_PTR(-ENFILE); |
1945 | |
1946 | newsock->type = osock->type; |
1947 | newsock->ops = osock->ops; |
1948 | |
1949 | __module_get(newsock->ops->owner); |
1950 | |
1951 | - newfd = get_unused_fd_flags(0); |
1952 | - if (unlikely(newfd < 0)) { |
1953 | - err = newfd; |
1954 | - goto out_fd_fail; |
1955 | - } |
1956 | - |
1957 | - newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); |
1958 | - if (unlikely(IS_ERR(newfile))) { |
1959 | - err = PTR_ERR(newfile); |
1960 | - goto out_sock_alloc_fail; |
1961 | - } |
1962 | - |
1963 | newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, |
1964 | &kcm_proto, true); |
1965 | if (!newsk) { |
1966 | - err = -ENOMEM; |
1967 | - goto out_sk_alloc_fail; |
1968 | + sock_release(newsock); |
1969 | + return ERR_PTR(-ENOMEM); |
1970 | } |
1971 | - |
1972 | sock_init_data(newsock, newsk); |
1973 | init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); |
1974 | |
1975 | - fd_install(newfd, newfile); |
1976 | - *newsockp = newsock; |
1977 | - info->fd = newfd; |
1978 | - |
1979 | - return 0; |
1980 | + file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); |
1981 | + if (IS_ERR(file)) |
1982 | + sock_release(newsock); |
1983 | |
1984 | -out_sk_alloc_fail: |
1985 | - fput(newfile); |
1986 | -out_sock_alloc_fail: |
1987 | - put_unused_fd(newfd); |
1988 | -out_fd_fail: |
1989 | - sock_release(newsock); |
1990 | -out: |
1991 | - return err; |
1992 | + return file; |
1993 | } |
1994 | |
1995 | static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1996 | @@ -1708,17 +1683,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1997 | } |
1998 | case SIOCKCMCLONE: { |
1999 | struct kcm_clone info; |
2000 | - struct socket *newsock = NULL; |
2001 | - |
2002 | - err = kcm_clone(sock, &info, &newsock); |
2003 | - if (!err) { |
2004 | - if (copy_to_user((void __user *)arg, &info, |
2005 | - sizeof(info))) { |
2006 | - err = -EFAULT; |
2007 | - sys_close(info.fd); |
2008 | - } |
2009 | - } |
2010 | + struct file *file; |
2011 | + |
2012 | + info.fd = get_unused_fd_flags(0); |
2013 | + if (unlikely(info.fd < 0)) |
2014 | + return info.fd; |
2015 | |
2016 | + file = kcm_clone(sock); |
2017 | + if (IS_ERR(file)) { |
2018 | + put_unused_fd(info.fd); |
2019 | + return PTR_ERR(file); |
2020 | + } |
2021 | + if (copy_to_user((void __user *)arg, &info, |
2022 | + sizeof(info))) { |
2023 | + put_unused_fd(info.fd); |
2024 | + fput(file); |
2025 | + return -EFAULT; |
2026 | + } |
2027 | + fd_install(info.fd, file); |
2028 | + err = 0; |
2029 | break; |
2030 | } |
2031 | default: |
2032 | diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c |
2033 | index c3aec6227c91..363dd904733d 100644 |
2034 | --- a/net/openvswitch/datapath.c |
2035 | +++ b/net/openvswitch/datapath.c |
2036 | @@ -335,6 +335,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, |
2037 | const struct dp_upcall_info *upcall_info, |
2038 | uint32_t cutlen) |
2039 | { |
2040 | + unsigned int gso_type = skb_shinfo(skb)->gso_type; |
2041 | + struct sw_flow_key later_key; |
2042 | struct sk_buff *segs, *nskb; |
2043 | int err; |
2044 | |
2045 | @@ -345,9 +347,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, |
2046 | if (segs == NULL) |
2047 | return -EINVAL; |
2048 | |
2049 | + if (gso_type & SKB_GSO_UDP) { |
2050 | + /* The initial flow key extracted by ovs_flow_key_extract() |
2051 | + * in this case is for a first fragment, so we need to |
2052 | + * properly mark later fragments. |
2053 | + */ |
2054 | + later_key = *key; |
2055 | + later_key.ip.frag = OVS_FRAG_TYPE_LATER; |
2056 | + } |
2057 | + |
2058 | /* Queue all of the segments. */ |
2059 | skb = segs; |
2060 | do { |
2061 | + if (gso_type & SKB_GSO_UDP && skb != segs) |
2062 | + key = &later_key; |
2063 | + |
2064 | err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); |
2065 | if (err) |
2066 | break; |
2067 | diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c |
2068 | index 8c94cef25a72..cfb652a4e007 100644 |
2069 | --- a/net/openvswitch/flow.c |
2070 | +++ b/net/openvswitch/flow.c |
2071 | @@ -584,7 +584,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) |
2072 | key->ip.frag = OVS_FRAG_TYPE_LATER; |
2073 | return 0; |
2074 | } |
2075 | - if (nh->frag_off & htons(IP_MF)) |
2076 | + if (nh->frag_off & htons(IP_MF) || |
2077 | + skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
2078 | key->ip.frag = OVS_FRAG_TYPE_FIRST; |
2079 | else |
2080 | key->ip.frag = OVS_FRAG_TYPE_NONE; |
2081 | @@ -700,6 +701,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) |
2082 | |
2083 | if (key->ip.frag == OVS_FRAG_TYPE_LATER) |
2084 | return 0; |
2085 | + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
2086 | + key->ip.frag = OVS_FRAG_TYPE_FIRST; |
2087 | + |
2088 | /* Transport layer. */ |
2089 | if (key->ip.proto == NEXTHDR_TCP) { |
2090 | if (tcphdr_ok(skb)) { |
2091 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
2092 | index 2986941164b1..f4a0587b7d5e 100644 |
2093 | --- a/net/packet/af_packet.c |
2094 | +++ b/net/packet/af_packet.c |
2095 | @@ -1697,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
2096 | atomic_long_set(&rollover->num, 0); |
2097 | atomic_long_set(&rollover->num_huge, 0); |
2098 | atomic_long_set(&rollover->num_failed, 0); |
2099 | - po->rollover = rollover; |
2100 | } |
2101 | |
2102 | if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { |
2103 | @@ -1755,6 +1754,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
2104 | if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { |
2105 | __dev_remove_pack(&po->prot_hook); |
2106 | po->fanout = match; |
2107 | + po->rollover = rollover; |
2108 | + rollover = NULL; |
2109 | refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); |
2110 | __fanout_link(sk, po); |
2111 | err = 0; |
2112 | @@ -1768,10 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
2113 | } |
2114 | |
2115 | out: |
2116 | - if (err && rollover) { |
2117 | - kfree_rcu(rollover, rcu); |
2118 | - po->rollover = NULL; |
2119 | - } |
2120 | + kfree(rollover); |
2121 | mutex_unlock(&fanout_mutex); |
2122 | return err; |
2123 | } |
2124 | @@ -1795,11 +1793,6 @@ static struct packet_fanout *fanout_release(struct sock *sk) |
2125 | list_del(&f->list); |
2126 | else |
2127 | f = NULL; |
2128 | - |
2129 | - if (po->rollover) { |
2130 | - kfree_rcu(po->rollover, rcu); |
2131 | - po->rollover = NULL; |
2132 | - } |
2133 | } |
2134 | mutex_unlock(&fanout_mutex); |
2135 | |
2136 | @@ -3039,6 +3032,7 @@ static int packet_release(struct socket *sock) |
2137 | synchronize_net(); |
2138 | |
2139 | if (f) { |
2140 | + kfree(po->rollover); |
2141 | fanout_release_data(f); |
2142 | kfree(f); |
2143 | } |
2144 | @@ -3107,6 +3101,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, |
2145 | if (need_rehook) { |
2146 | if (po->running) { |
2147 | rcu_read_unlock(); |
2148 | + /* prevents packet_notifier() from calling |
2149 | + * register_prot_hook() |
2150 | + */ |
2151 | + po->num = 0; |
2152 | __unregister_prot_hook(sk, true); |
2153 | rcu_read_lock(); |
2154 | dev_curr = po->prot_hook.dev; |
2155 | @@ -3115,6 +3113,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, |
2156 | dev->ifindex); |
2157 | } |
2158 | |
2159 | + BUG_ON(po->running); |
2160 | po->num = proto; |
2161 | po->prot_hook.type = proto; |
2162 | |
2163 | @@ -3853,7 +3852,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
2164 | void *data = &val; |
2165 | union tpacket_stats_u st; |
2166 | struct tpacket_rollover_stats rstats; |
2167 | - struct packet_rollover *rollover; |
2168 | |
2169 | if (level != SOL_PACKET) |
2170 | return -ENOPROTOOPT; |
2171 | @@ -3932,18 +3930,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, |
2172 | 0); |
2173 | break; |
2174 | case PACKET_ROLLOVER_STATS: |
2175 | - rcu_read_lock(); |
2176 | - rollover = rcu_dereference(po->rollover); |
2177 | - if (rollover) { |
2178 | - rstats.tp_all = atomic_long_read(&rollover->num); |
2179 | - rstats.tp_huge = atomic_long_read(&rollover->num_huge); |
2180 | - rstats.tp_failed = atomic_long_read(&rollover->num_failed); |
2181 | - data = &rstats; |
2182 | - lv = sizeof(rstats); |
2183 | - } |
2184 | - rcu_read_unlock(); |
2185 | - if (!rollover) |
2186 | + if (!po->rollover) |
2187 | return -EINVAL; |
2188 | + rstats.tp_all = atomic_long_read(&po->rollover->num); |
2189 | + rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); |
2190 | + rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); |
2191 | + data = &rstats; |
2192 | + lv = sizeof(rstats); |
2193 | break; |
2194 | case PACKET_TX_HAS_OFF: |
2195 | val = po->tp_tx_has_off; |
2196 | diff --git a/net/packet/internal.h b/net/packet/internal.h |
2197 | index 562fbc155006..a1d2b2319ae9 100644 |
2198 | --- a/net/packet/internal.h |
2199 | +++ b/net/packet/internal.h |
2200 | @@ -95,7 +95,6 @@ struct packet_fanout { |
2201 | |
2202 | struct packet_rollover { |
2203 | int sock; |
2204 | - struct rcu_head rcu; |
2205 | atomic_long_t num; |
2206 | atomic_long_t num_huge; |
2207 | atomic_long_t num_failed; |
2208 | diff --git a/net/rds/rdma.c b/net/rds/rdma.c |
2209 | index 8886f15abe90..bc2f1e0977d6 100644 |
2210 | --- a/net/rds/rdma.c |
2211 | +++ b/net/rds/rdma.c |
2212 | @@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, |
2213 | long i; |
2214 | int ret; |
2215 | |
2216 | - if (rs->rs_bound_addr == 0) { |
2217 | + if (rs->rs_bound_addr == 0 || !rs->rs_transport) { |
2218 | ret = -ENOTCONN; /* XXX not a great errno */ |
2219 | goto out; |
2220 | } |
2221 | diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c |
2222 | index 1c40caadcff9..d836f998117b 100644 |
2223 | --- a/net/sched/act_csum.c |
2224 | +++ b/net/sched/act_csum.c |
2225 | @@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, |
2226 | const struct iphdr *iph; |
2227 | u16 ul; |
2228 | |
2229 | + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
2230 | + return 1; |
2231 | + |
2232 | /* |
2233 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
2234 | * udph->len to get the real length without any protocol check, |
2235 | @@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, |
2236 | const struct ipv6hdr *ip6h; |
2237 | u16 ul; |
2238 | |
2239 | + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
2240 | + return 1; |
2241 | + |
2242 | /* |
2243 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
2244 | * udph->len to get the real length without any protocol check, |
2245 | diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c |
2246 | index 990eb4d91d54..3a499530f321 100644 |
2247 | --- a/net/sched/cls_bpf.c |
2248 | +++ b/net/sched/cls_bpf.c |
2249 | @@ -246,11 +246,8 @@ static int cls_bpf_init(struct tcf_proto *tp) |
2250 | return 0; |
2251 | } |
2252 | |
2253 | -static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) |
2254 | +static void cls_bpf_free_parms(struct cls_bpf_prog *prog) |
2255 | { |
2256 | - tcf_exts_destroy(&prog->exts); |
2257 | - tcf_exts_put_net(&prog->exts); |
2258 | - |
2259 | if (cls_bpf_is_ebpf(prog)) |
2260 | bpf_prog_put(prog->filter); |
2261 | else |
2262 | @@ -258,6 +255,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) |
2263 | |
2264 | kfree(prog->bpf_name); |
2265 | kfree(prog->bpf_ops); |
2266 | +} |
2267 | + |
2268 | +static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) |
2269 | +{ |
2270 | + tcf_exts_destroy(&prog->exts); |
2271 | + tcf_exts_put_net(&prog->exts); |
2272 | + |
2273 | + cls_bpf_free_parms(prog); |
2274 | kfree(prog); |
2275 | } |
2276 | |
2277 | @@ -509,10 +514,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, |
2278 | goto errout; |
2279 | |
2280 | ret = cls_bpf_offload(tp, prog, oldprog); |
2281 | - if (ret) { |
2282 | - __cls_bpf_delete_prog(prog); |
2283 | - return ret; |
2284 | - } |
2285 | + if (ret) |
2286 | + goto errout_parms; |
2287 | |
2288 | if (!tc_in_hw(prog->gen_flags)) |
2289 | prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; |
2290 | @@ -529,6 +532,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, |
2291 | *arg = prog; |
2292 | return 0; |
2293 | |
2294 | +errout_parms: |
2295 | + cls_bpf_free_parms(prog); |
2296 | errout: |
2297 | tcf_exts_destroy(&prog->exts); |
2298 | kfree(prog); |
2299 | diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c |
2300 | index dcef97fa8047..aeffa320429d 100644 |
2301 | --- a/net/sched/sch_cbq.c |
2302 | +++ b/net/sched/sch_cbq.c |
2303 | @@ -1157,9 +1157,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) |
2304 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) |
2305 | return -EINVAL; |
2306 | |
2307 | + err = tcf_block_get(&q->link.block, &q->link.filter_list); |
2308 | + if (err) |
2309 | + goto put_rtab; |
2310 | + |
2311 | err = qdisc_class_hash_init(&q->clhash); |
2312 | if (err < 0) |
2313 | - goto put_rtab; |
2314 | + goto put_block; |
2315 | |
2316 | q->link.sibling = &q->link; |
2317 | q->link.common.classid = sch->handle; |
2318 | @@ -1193,6 +1197,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) |
2319 | cbq_addprio(q, &q->link); |
2320 | return 0; |
2321 | |
2322 | +put_block: |
2323 | + tcf_block_put(q->link.block); |
2324 | + |
2325 | put_rtab: |
2326 | qdisc_put_rtab(q->link.R_tab); |
2327 | return err; |
2328 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
2329 | index 14c28fbfe6b8..d6163f7aefb1 100644 |
2330 | --- a/net/sctp/socket.c |
2331 | +++ b/net/sctp/socket.c |
2332 | @@ -187,13 +187,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, |
2333 | list_for_each_entry(chunk, &t->transmitted, transmitted_list) |
2334 | cb(chunk); |
2335 | |
2336 | - list_for_each_entry(chunk, &q->retransmit, list) |
2337 | + list_for_each_entry(chunk, &q->retransmit, transmitted_list) |
2338 | cb(chunk); |
2339 | |
2340 | - list_for_each_entry(chunk, &q->sacked, list) |
2341 | + list_for_each_entry(chunk, &q->sacked, transmitted_list) |
2342 | cb(chunk); |
2343 | |
2344 | - list_for_each_entry(chunk, &q->abandoned, list) |
2345 | + list_for_each_entry(chunk, &q->abandoned, transmitted_list) |
2346 | cb(chunk); |
2347 | |
2348 | list_for_each_entry(chunk, &q->out_chunk_list, list) |
2349 | diff --git a/net/tipc/server.c b/net/tipc/server.c |
2350 | index 3cd6402e812c..f4c1b18c5fb0 100644 |
2351 | --- a/net/tipc/server.c |
2352 | +++ b/net/tipc/server.c |
2353 | @@ -313,6 +313,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) |
2354 | newcon->usr_data = s->tipc_conn_new(newcon->conid); |
2355 | if (!newcon->usr_data) { |
2356 | sock_release(newsock); |
2357 | + conn_put(newcon); |
2358 | return -ENOMEM; |
2359 | } |
2360 | |
2361 | diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c |
2362 | index ecca64fc6a6f..3deabcab4882 100644 |
2363 | --- a/net/tipc/udp_media.c |
2364 | +++ b/net/tipc/udp_media.c |
2365 | @@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) |
2366 | goto rcu_out; |
2367 | } |
2368 | |
2369 | - tipc_rcv(sock_net(sk), skb, b); |
2370 | - rcu_read_unlock(); |
2371 | - return 0; |
2372 | - |
2373 | rcu_out: |
2374 | rcu_read_unlock(); |
2375 | out: |
2376 | diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c |
2377 | index 3108e07526af..59ce2fb49821 100644 |
2378 | --- a/virt/kvm/arm/vgic/vgic-its.c |
2379 | +++ b/virt/kvm/arm/vgic/vgic-its.c |
2380 | @@ -393,6 +393,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) |
2381 | int ret = 0; |
2382 | u32 *intids; |
2383 | int nr_irqs, i; |
2384 | + u8 pendmask; |
2385 | |
2386 | nr_irqs = vgic_copy_lpi_list(vcpu, &intids); |
2387 | if (nr_irqs < 0) |
2388 | @@ -400,7 +401,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) |
2389 | |
2390 | for (i = 0; i < nr_irqs; i++) { |
2391 | int byte_offset, bit_nr; |
2392 | - u8 pendmask; |
2393 | |
2394 | byte_offset = intids[i] / BITS_PER_BYTE; |
2395 | bit_nr = intids[i] % BITS_PER_BYTE; |