Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0169-4.9.70-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 35753 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 8f2819bf8135..7ad3271a1a1d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 69
9 +SUBLEVEL = 70
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 @@ -370,9 +370,6 @@ LDFLAGS_MODULE =
14 CFLAGS_KERNEL =
15 AFLAGS_KERNEL =
16 LDFLAGS_vmlinux =
17 -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
18 -CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
19 -
20
21 # Use USERINCLUDE when you must reference the UAPI directories only.
22 USERINCLUDE := \
23 @@ -393,21 +390,19 @@ LINUXINCLUDE := \
24
25 LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
26
27 -KBUILD_CPPFLAGS := -D__KERNEL__
28 -
29 +KBUILD_AFLAGS := -D__ASSEMBLY__
30 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
31 -fno-strict-aliasing -fno-common \
32 -Werror-implicit-function-declaration \
33 -Wno-format-security \
34 - -std=gnu89 $(call cc-option,-fno-PIE)
35 -
36 -
37 + -std=gnu89
38 +KBUILD_CPPFLAGS := -D__KERNEL__
39 KBUILD_AFLAGS_KERNEL :=
40 KBUILD_CFLAGS_KERNEL :=
41 -KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
42 KBUILD_AFLAGS_MODULE := -DMODULE
43 KBUILD_CFLAGS_MODULE := -DMODULE
44 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
45 +GCC_PLUGINS_CFLAGS :=
46
47 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
48 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
49 @@ -420,7 +415,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
50 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
51
52 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
53 -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
54 +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
55 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
56 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
57 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
58 @@ -620,6 +615,12 @@ endif
59 # Defaults to vmlinux, but the arch makefile usually adds further targets
60 all: vmlinux
61
62 +KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
63 +KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
64 +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
65 +CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
66 +export CFLAGS_GCOV CFLAGS_KCOV
67 +
68 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
69 # values of the respective KBUILD_* variables
70 ARCH_CPPFLAGS :=
71 diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
72 index a67bb09585f4..430d038eb2a4 100644
73 --- a/arch/powerpc/include/asm/checksum.h
74 +++ b/arch/powerpc/include/asm/checksum.h
75 @@ -53,17 +53,25 @@ static inline __sum16 csum_fold(__wsum sum)
76 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
77 }
78
79 +static inline u32 from64to32(u64 x)
80 +{
81 + /* add up 32-bit and 32-bit for 32+c bit */
82 + x = (x & 0xffffffff) + (x >> 32);
83 + /* add up carry.. */
84 + x = (x & 0xffffffff) + (x >> 32);
85 + return (u32)x;
86 +}
87 +
88 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
89 __u8 proto, __wsum sum)
90 {
91 #ifdef __powerpc64__
92 - unsigned long s = (__force u32)sum;
93 + u64 s = (__force u32)sum;
94
95 s += (__force u32)saddr;
96 s += (__force u32)daddr;
97 s += proto + len;
98 - s += (s >> 32);
99 - return (__force __wsum) s;
100 + return (__force __wsum) from64to32(s);
101 #else
102 __asm__("\n\
103 addc %0,%0,%1 \n\
104 @@ -123,8 +131,7 @@ static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
105
106 for (i = 0; i < ihl - 1; i++, ptr++)
107 s += *ptr;
108 - s += (s >> 32);
109 - return (__force __wsum)s;
110 + return (__force __wsum)from64to32(s);
111 #else
112 __wsum sum, tmp;
113
114 diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
115 index dde6b52359c5..ff2fbdafe689 100644
116 --- a/arch/s390/include/asm/switch_to.h
117 +++ b/arch/s390/include/asm/switch_to.h
118 @@ -29,17 +29,16 @@ static inline void restore_access_regs(unsigned int *acrs)
119 }
120
121 #define switch_to(prev,next,last) do { \
122 - if (prev->mm) { \
123 - save_fpu_regs(); \
124 - save_access_regs(&prev->thread.acrs[0]); \
125 - save_ri_cb(prev->thread.ri_cb); \
126 - } \
127 + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
128 + * a restore of the floating point / vector registers as \
129 + * soon as the next task returns to user space \
130 + */ \
131 + save_fpu_regs(); \
132 + save_access_regs(&prev->thread.acrs[0]); \
133 + save_ri_cb(prev->thread.ri_cb); \
134 update_cr_regs(next); \
135 - if (next->mm) { \
136 - set_cpu_flag(CIF_FPU); \
137 - restore_access_regs(&next->thread.acrs[0]); \
138 - restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
139 - } \
140 + restore_access_regs(&next->thread.acrs[0]); \
141 + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
142 prev = __switch_to(prev,next); \
143 } while (0)
144
145 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
146 index a112c0146012..e0a53156b782 100644
147 --- a/drivers/char/ipmi/ipmi_si_intf.c
148 +++ b/drivers/char/ipmi/ipmi_si_intf.c
149 @@ -241,6 +241,9 @@ struct smi_info {
150 /* The timer for this si. */
151 struct timer_list si_timer;
152
153 + /* This flag is set, if the timer can be set */
154 + bool timer_can_start;
155 +
156 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
157 bool timer_running;
158
159 @@ -416,6 +419,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
160
161 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
162 {
163 + if (!smi_info->timer_can_start)
164 + return;
165 smi_info->last_timeout_jiffies = jiffies;
166 mod_timer(&smi_info->si_timer, new_val);
167 smi_info->timer_running = true;
168 @@ -435,21 +440,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
169 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
170 }
171
172 -static void start_check_enables(struct smi_info *smi_info, bool start_timer)
173 +static void start_check_enables(struct smi_info *smi_info)
174 {
175 unsigned char msg[2];
176
177 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
178 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
179
180 - if (start_timer)
181 - start_new_msg(smi_info, msg, 2);
182 - else
183 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
184 + start_new_msg(smi_info, msg, 2);
185 smi_info->si_state = SI_CHECKING_ENABLES;
186 }
187
188 -static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
189 +static void start_clear_flags(struct smi_info *smi_info)
190 {
191 unsigned char msg[3];
192
193 @@ -458,10 +460,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
194 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
195 msg[2] = WDT_PRE_TIMEOUT_INT;
196
197 - if (start_timer)
198 - start_new_msg(smi_info, msg, 3);
199 - else
200 - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
201 + start_new_msg(smi_info, msg, 3);
202 smi_info->si_state = SI_CLEARING_FLAGS;
203 }
204
205 @@ -496,11 +495,11 @@ static void start_getting_events(struct smi_info *smi_info)
206 * Note that we cannot just use disable_irq(), since the interrupt may
207 * be shared.
208 */
209 -static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
210 +static inline bool disable_si_irq(struct smi_info *smi_info)
211 {
212 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
213 smi_info->interrupt_disabled = true;
214 - start_check_enables(smi_info, start_timer);
215 + start_check_enables(smi_info);
216 return true;
217 }
218 return false;
219 @@ -510,7 +509,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
220 {
221 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
222 smi_info->interrupt_disabled = false;
223 - start_check_enables(smi_info, true);
224 + start_check_enables(smi_info);
225 return true;
226 }
227 return false;
228 @@ -528,7 +527,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
229
230 msg = ipmi_alloc_smi_msg();
231 if (!msg) {
232 - if (!disable_si_irq(smi_info, true))
233 + if (!disable_si_irq(smi_info))
234 smi_info->si_state = SI_NORMAL;
235 } else if (enable_si_irq(smi_info)) {
236 ipmi_free_smi_msg(msg);
237 @@ -544,7 +543,7 @@ static void handle_flags(struct smi_info *smi_info)
238 /* Watchdog pre-timeout */
239 smi_inc_stat(smi_info, watchdog_pretimeouts);
240
241 - start_clear_flags(smi_info, true);
242 + start_clear_flags(smi_info);
243 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
244 if (smi_info->intf)
245 ipmi_smi_watchdog_pretimeout(smi_info->intf);
246 @@ -927,7 +926,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
247 * disable and messages disabled.
248 */
249 if (smi_info->supports_event_msg_buff || smi_info->irq) {
250 - start_check_enables(smi_info, true);
251 + start_check_enables(smi_info);
252 } else {
253 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
254 if (!smi_info->curr_msg)
255 @@ -1234,6 +1233,7 @@ static int smi_start_processing(void *send_info,
256
257 /* Set up the timer that drives the interface. */
258 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
259 + new_smi->timer_can_start = true;
260 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
261
262 /* Try to claim any interrupts. */
263 @@ -3448,10 +3448,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
264 check_set_rcv_irq(smi_info);
265 }
266
267 -static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
268 +static inline void stop_timer_and_thread(struct smi_info *smi_info)
269 {
270 if (smi_info->thread != NULL)
271 kthread_stop(smi_info->thread);
272 +
273 + smi_info->timer_can_start = false;
274 if (smi_info->timer_running)
275 del_timer_sync(&smi_info->si_timer);
276 }
277 @@ -3593,7 +3595,7 @@ static int try_smi_init(struct smi_info *new_smi)
278 * Start clearing the flags before we enable interrupts or the
279 * timer to avoid racing with the timer.
280 */
281 - start_clear_flags(new_smi, false);
282 + start_clear_flags(new_smi);
283
284 /*
285 * IRQ is defined to be set when non-zero. req_events will
286 @@ -3671,7 +3673,7 @@ static int try_smi_init(struct smi_info *new_smi)
287 return 0;
288
289 out_err_stop_timer:
290 - wait_for_timer_and_thread(new_smi);
291 + stop_timer_and_thread(new_smi);
292
293 out_err:
294 new_smi->interrupt_disabled = true;
295 @@ -3865,7 +3867,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
296 */
297 if (to_clean->irq_cleanup)
298 to_clean->irq_cleanup(to_clean);
299 - wait_for_timer_and_thread(to_clean);
300 + stop_timer_and_thread(to_clean);
301
302 /*
303 * Timeouts are stopped, now make sure the interrupts are off
304 @@ -3876,7 +3878,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
305 poll(to_clean);
306 schedule_timeout_uninterruptible(1);
307 }
308 - disable_si_irq(to_clean, false);
309 + disable_si_irq(to_clean);
310 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
311 poll(to_clean);
312 schedule_timeout_uninterruptible(1);
313 diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
314 index 010c709ba3bb..58c531db4f4a 100644
315 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
316 +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
317 @@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
318 __u16 wrid;
319 __u8 r1[3];
320 __u8 len16;
321 - __u32 r2;
322 - __u32 stag;
323 + __be32 r2;
324 + __be32 stag;
325 struct fw_ri_tpte tpte;
326 __u64 pbl[2];
327 };
328 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
329 index fb02c3979bf4..f7ff408567ad 100644
330 --- a/drivers/md/bitmap.c
331 +++ b/drivers/md/bitmap.c
332 @@ -2084,6 +2084,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
333 for (k = 0; k < page; k++) {
334 kfree(new_bp[k].map);
335 }
336 + kfree(new_bp);
337
338 /* restore some fields from old_counts */
339 bitmap->counts.bp = old_counts.bp;
340 @@ -2134,6 +2135,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
341 block += old_blocks;
342 }
343
344 + if (bitmap->counts.bp != old_counts.bp) {
345 + unsigned long k;
346 + for (k = 0; k < old_counts.pages; k++)
347 + if (!old_counts.bp[k].hijacked)
348 + kfree(old_counts.bp[k].map);
349 + kfree(old_counts.bp);
350 + }
351 +
352 if (!init) {
353 int i;
354 while (block < (chunks << chunkshift)) {
355 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
356 index ef6bff820cf6..adf61a7b1b01 100644
357 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
358 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
359 @@ -1795,6 +1795,7 @@ static int stmmac_open(struct net_device *dev)
360
361 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
362 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
363 + priv->mss = 0;
364
365 ret = alloc_dma_desc_resources(priv);
366 if (ret < 0) {
367 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
368 index b4e990743e1d..980e38524418 100644
369 --- a/drivers/net/ipvlan/ipvlan_core.c
370 +++ b/drivers/net/ipvlan/ipvlan_core.c
371 @@ -404,7 +404,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
372 struct dst_entry *dst;
373 int err, ret = NET_XMIT_DROP;
374 struct flowi6 fl6 = {
375 - .flowi6_iif = dev->ifindex,
376 + .flowi6_oif = dev->ifindex,
377 .daddr = ip6h->daddr,
378 .saddr = ip6h->saddr,
379 .flowi6_flags = FLOWI_FLAG_ANYSRC,
380 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
381 index 9cf11c83993a..62725655d8e4 100644
382 --- a/drivers/net/usb/qmi_wwan.c
383 +++ b/drivers/net/usb/qmi_wwan.c
384 @@ -74,9 +74,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
385 net->hard_header_len = 0;
386 net->addr_len = 0;
387 net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
388 + set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
389 netdev_dbg(net, "mode: raw IP\n");
390 } else if (!net->header_ops) { /* don't bother if already set */
391 ether_setup(net);
392 + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
393 netdev_dbg(net, "mode: Ethernet\n");
394 }
395
396 @@ -936,6 +938,7 @@ static const struct usb_device_id products[] = {
397 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
398 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
399 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
400 + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
401
402 /* 4. Gobi 1000 devices */
403 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
404 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
405 index d5071e364d40..4ab82b998a0f 100644
406 --- a/drivers/net/usb/usbnet.c
407 +++ b/drivers/net/usb/usbnet.c
408 @@ -485,7 +485,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
409 return -ENOLINK;
410 }
411
412 - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
413 + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
414 + skb = __netdev_alloc_skb(dev->net, size, flags);
415 + else
416 + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
417 if (!skb) {
418 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
419 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
420 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
421 index d55e6438bb5e..e2bd2ad01b15 100644
422 --- a/drivers/s390/net/qeth_core.h
423 +++ b/drivers/s390/net/qeth_core.h
424 @@ -1004,6 +1004,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
425 int qeth_set_features(struct net_device *, netdev_features_t);
426 int qeth_recover_features(struct net_device *);
427 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
428 +netdev_features_t qeth_features_check(struct sk_buff *skb,
429 + struct net_device *dev,
430 + netdev_features_t features);
431
432 /* exports for OSN */
433 int qeth_osn_assist(struct net_device *, void *, int);
434 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
435 index 21ef8023430f..b5fa6bb56b29 100644
436 --- a/drivers/s390/net/qeth_core_main.c
437 +++ b/drivers/s390/net/qeth_core_main.c
438 @@ -19,6 +19,11 @@
439 #include <linux/mii.h>
440 #include <linux/kthread.h>
441 #include <linux/slab.h>
442 +#include <linux/if_vlan.h>
443 +#include <linux/netdevice.h>
444 +#include <linux/netdev_features.h>
445 +#include <linux/skbuff.h>
446 +
447 #include <net/iucv/af_iucv.h>
448 #include <net/dsfield.h>
449
450 @@ -6240,6 +6245,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
451 }
452 EXPORT_SYMBOL_GPL(qeth_fix_features);
453
454 +netdev_features_t qeth_features_check(struct sk_buff *skb,
455 + struct net_device *dev,
456 + netdev_features_t features)
457 +{
458 + /* GSO segmentation builds skbs with
459 + * a (small) linear part for the headers, and
460 + * page frags for the data.
461 + * Compared to a linear skb, the header-only part consumes an
462 + * additional buffer element. This reduces buffer utilization, and
463 + * hurts throughput. So compress small segments into one element.
464 + */
465 + if (netif_needs_gso(skb, features)) {
466 + /* match skb_segment(): */
467 + unsigned int doffset = skb->data - skb_mac_header(skb);
468 + unsigned int hsize = skb_shinfo(skb)->gso_size;
469 + unsigned int hroom = skb_headroom(skb);
470 +
471 + /* linearize only if resulting skb allocations are order-0: */
472 + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
473 + features &= ~NETIF_F_SG;
474 + }
475 +
476 + return vlan_features_check(skb, features);
477 +}
478 +EXPORT_SYMBOL_GPL(qeth_features_check);
479 +
480 static int __init qeth_core_init(void)
481 {
482 int rc;
483 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
484 index 8530477caab8..ac33f6c999b1 100644
485 --- a/drivers/s390/net/qeth_l2_main.c
486 +++ b/drivers/s390/net/qeth_l2_main.c
487 @@ -1084,6 +1084,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
488 .ndo_stop = qeth_l2_stop,
489 .ndo_get_stats = qeth_get_stats,
490 .ndo_start_xmit = qeth_l2_hard_start_xmit,
491 + .ndo_features_check = qeth_features_check,
492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
494 .ndo_do_ioctl = qeth_l2_do_ioctl,
495 @@ -1128,6 +1129,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
496 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
497 card->dev->hw_features = NETIF_F_SG;
498 card->dev->vlan_features = NETIF_F_SG;
499 + card->dev->features |= NETIF_F_SG;
500 /* OSA 3S and earlier has no RX/TX support */
501 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
502 card->dev->hw_features |= NETIF_F_IP_CSUM;
503 @@ -1140,8 +1142,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
504 }
505 card->info.broadcast_capable = 1;
506 qeth_l2_request_initial_mac(card);
507 - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
508 - PAGE_SIZE;
509 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
510 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
511 netif_carrier_off(card->dev);
512 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
513 index 03a2619166ca..5735fc3be6c7 100644
514 --- a/drivers/s390/net/qeth_l3_main.c
515 +++ b/drivers/s390/net/qeth_l3_main.c
516 @@ -1416,6 +1416,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
517
518 tmp->u.a4.addr = im4->multiaddr;
519 memcpy(tmp->mac, buf, sizeof(tmp->mac));
520 + tmp->is_multicast = 1;
521
522 ipm = qeth_l3_ip_from_hash(card, tmp);
523 if (ipm) {
524 @@ -1593,7 +1594,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
525
526 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
527 if (!addr)
528 - return;
529 + goto out;
530
531 spin_lock_bh(&card->ip_lock);
532
533 @@ -1607,6 +1608,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
534 spin_unlock_bh(&card->ip_lock);
535
536 kfree(addr);
537 +out:
538 in_dev_put(in_dev);
539 }
540
541 @@ -1631,7 +1633,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
542
543 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
544 if (!addr)
545 - return;
546 + goto out;
547
548 spin_lock_bh(&card->ip_lock);
549
550 @@ -1646,6 +1648,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
551 spin_unlock_bh(&card->ip_lock);
552
553 kfree(addr);
554 +out:
555 in6_dev_put(in6_dev);
556 #endif /* CONFIG_QETH_IPV6 */
557 }
558 @@ -3064,6 +3067,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
559 .ndo_stop = qeth_l3_stop,
560 .ndo_get_stats = qeth_get_stats,
561 .ndo_start_xmit = qeth_l3_hard_start_xmit,
562 + .ndo_features_check = qeth_features_check,
563 .ndo_validate_addr = eth_validate_addr,
564 .ndo_set_rx_mode = qeth_l3_set_multicast_list,
565 .ndo_do_ioctl = qeth_l3_do_ioctl,
566 @@ -3120,6 +3124,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
567 card->dev->vlan_features = NETIF_F_SG |
568 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
569 NETIF_F_TSO;
570 + card->dev->features |= NETIF_F_SG;
571 }
572 }
573 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
574 @@ -3145,8 +3150,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
575 NETIF_F_HW_VLAN_CTAG_RX |
576 NETIF_F_HW_VLAN_CTAG_FILTER;
577 netif_keep_dst(card->dev);
578 - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
579 - PAGE_SIZE;
580 + netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
581 + PAGE_SIZE);
582
583 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
584 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
585 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
586 index 346a630cebd5..7b107e43b1c4 100644
587 --- a/drivers/usb/gadget/function/f_fs.c
588 +++ b/drivers/usb/gadget/function/f_fs.c
589 @@ -1015,7 +1015,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
590 else
591 ret = ep->status;
592 goto error_mutex;
593 - } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
594 + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
595 ret = -ENOMEM;
596 } else {
597 req->buf = data;
598 diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
599 index 4ae95f7e8597..6224a0ab0b1e 100644
600 --- a/include/linux/rculist_nulls.h
601 +++ b/include/linux/rculist_nulls.h
602 @@ -99,44 +99,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
603 first->pprev = &n->next;
604 }
605
606 -/**
607 - * hlist_nulls_add_tail_rcu
608 - * @n: the element to add to the hash list.
609 - * @h: the list to add to.
610 - *
611 - * Description:
612 - * Adds the specified element to the end of the specified hlist_nulls,
613 - * while permitting racing traversals. NOTE: tail insertion requires
614 - * list traversal.
615 - *
616 - * The caller must take whatever precautions are necessary
617 - * (such as holding appropriate locks) to avoid racing
618 - * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
619 - * or hlist_nulls_del_rcu(), running on this same list.
620 - * However, it is perfectly legal to run concurrently with
621 - * the _rcu list-traversal primitives, such as
622 - * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
623 - * problems on Alpha CPUs. Regardless of the type of CPU, the
624 - * list-traversal primitive must be guarded by rcu_read_lock().
625 - */
626 -static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
627 - struct hlist_nulls_head *h)
628 -{
629 - struct hlist_nulls_node *i, *last = NULL;
630 -
631 - for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
632 - i = hlist_nulls_next_rcu(i))
633 - last = i;
634 -
635 - if (last) {
636 - n->next = last->next;
637 - n->pprev = &last->next;
638 - rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
639 - } else {
640 - hlist_nulls_add_head_rcu(n, h);
641 - }
642 -}
643 -
644 /**
645 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
646 * @tpos: the type * to use as a loop cursor.
647 diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
648 index 6e0ce8c7b8cb..fde7550754df 100644
649 --- a/include/linux/usb/usbnet.h
650 +++ b/include/linux/usb/usbnet.h
651 @@ -79,6 +79,7 @@ struct usbnet {
652 # define EVENT_RX_KILL 10
653 # define EVENT_LINK_CHANGE 11
654 # define EVENT_SET_RX_MODE 12
655 +# define EVENT_NO_IP_ALIGN 13
656 };
657
658 static inline struct usb_driver *driver_of(struct usb_interface *intf)
659 diff --git a/include/net/sock.h b/include/net/sock.h
660 index 92b269709b9a..6d42ed883bf9 100644
661 --- a/include/net/sock.h
662 +++ b/include/net/sock.h
663 @@ -648,11 +648,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
664
665 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
666 {
667 - if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
668 - sk->sk_family == AF_INET6)
669 - hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
670 - else
671 - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
672 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
673 }
674
675 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
676 diff --git a/kernel/audit.c b/kernel/audit.c
677 index f1ca11613379..da4e7c0e36f7 100644
678 --- a/kernel/audit.c
679 +++ b/kernel/audit.c
680 @@ -79,13 +79,13 @@ static int audit_initialized;
681 #define AUDIT_OFF 0
682 #define AUDIT_ON 1
683 #define AUDIT_LOCKED 2
684 -u32 audit_enabled;
685 -u32 audit_ever_enabled;
686 +u32 audit_enabled = AUDIT_OFF;
687 +u32 audit_ever_enabled = !!AUDIT_OFF;
688
689 EXPORT_SYMBOL_GPL(audit_enabled);
690
691 /* Default state when kernel boots without any parameters. */
692 -static u32 audit_default;
693 +static u32 audit_default = AUDIT_OFF;
694
695 /* If auditing cannot proceed, audit_failure selects what happens. */
696 static u32 audit_failure = AUDIT_FAIL_PRINTK;
697 @@ -1199,8 +1199,6 @@ static int __init audit_init(void)
698 skb_queue_head_init(&audit_skb_queue);
699 skb_queue_head_init(&audit_skb_hold_queue);
700 audit_initialized = AUDIT_INITIALIZED;
701 - audit_enabled = audit_default;
702 - audit_ever_enabled |= !!audit_default;
703
704 audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
705
706 @@ -1217,6 +1215,8 @@ static int __init audit_enable(char *str)
707 audit_default = !!simple_strtol(str, NULL, 0);
708 if (!audit_default)
709 audit_initialized = AUDIT_DISABLED;
710 + audit_enabled = audit_default;
711 + audit_ever_enabled = !!audit_enabled;
712
713 pr_info("%s\n", audit_default ?
714 "enabled (after initialization)" : "disabled (until reboot)");
715 diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
716 index 39e7e2bca8db..62522b8d2f97 100644
717 --- a/net/dccp/minisocks.c
718 +++ b/net/dccp/minisocks.c
719 @@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
720 if (state == DCCP_TIME_WAIT)
721 timeo = DCCP_TIMEWAIT_LEN;
722
723 + /* tw_timer is pinned, so we need to make sure BH are disabled
724 + * in following section, otherwise timer handler could run before
725 + * we complete the initialization.
726 + */
727 + local_bh_disable();
728 inet_twsk_schedule(tw, timeo);
729 /* Linkage updates. */
730 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
731 inet_twsk_put(tw);
732 + local_bh_enable();
733 } else {
734 /* Sorry, if we're out of memory, just CLOSE this
735 * socket up. We've got bigger problems than
736 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
737 index 64e1ba49c3e2..830a5645d8c1 100644
738 --- a/net/ipv4/tcp_minisocks.c
739 +++ b/net/ipv4/tcp_minisocks.c
740 @@ -328,10 +328,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
741 timeo = TCP_TIMEWAIT_LEN;
742 }
743
744 + /* tw_timer is pinned, so we need to make sure BH are disabled
745 + * in following section, otherwise timer handler could run before
746 + * we complete the initialization.
747 + */
748 + local_bh_disable();
749 inet_twsk_schedule(tw, timeo);
750 /* Linkage updates. */
751 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
752 inet_twsk_put(tw);
753 + local_bh_enable();
754 } else {
755 /* Sorry, if we're out of memory, just CLOSE this
756 * socket up. We've got bigger problems than
757 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
758 index 40d740572354..db6d437002a6 100644
759 --- a/net/ipv6/sit.c
760 +++ b/net/ipv6/sit.c
761 @@ -1085,6 +1085,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
762 ipip6_tunnel_link(sitn, t);
763 t->parms.iph.ttl = p->iph.ttl;
764 t->parms.iph.tos = p->iph.tos;
765 + t->parms.iph.frag_off = p->iph.frag_off;
766 if (t->parms.link != p->link) {
767 t->parms.link = p->link;
768 ipip6_tunnel_bind_dev(t->dev);
769 diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
770 index 7eb0e8fe3ca8..22785dc03051 100644
771 --- a/net/kcm/kcmsock.c
772 +++ b/net/kcm/kcmsock.c
773 @@ -1624,60 +1624,35 @@ static struct proto kcm_proto = {
774 };
775
776 /* Clone a kcm socket. */
777 -static int kcm_clone(struct socket *osock, struct kcm_clone *info,
778 - struct socket **newsockp)
779 +static struct file *kcm_clone(struct socket *osock)
780 {
781 struct socket *newsock;
782 struct sock *newsk;
783 - struct file *newfile;
784 - int err, newfd;
785 + struct file *file;
786
787 - err = -ENFILE;
788 newsock = sock_alloc();
789 if (!newsock)
790 - goto out;
791 + return ERR_PTR(-ENFILE);
792
793 newsock->type = osock->type;
794 newsock->ops = osock->ops;
795
796 __module_get(newsock->ops->owner);
797
798 - newfd = get_unused_fd_flags(0);
799 - if (unlikely(newfd < 0)) {
800 - err = newfd;
801 - goto out_fd_fail;
802 - }
803 -
804 - newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
805 - if (unlikely(IS_ERR(newfile))) {
806 - err = PTR_ERR(newfile);
807 - goto out_sock_alloc_fail;
808 - }
809 -
810 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
811 &kcm_proto, true);
812 if (!newsk) {
813 - err = -ENOMEM;
814 - goto out_sk_alloc_fail;
815 + sock_release(newsock);
816 + return ERR_PTR(-ENOMEM);
817 }
818 -
819 sock_init_data(newsock, newsk);
820 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
821
822 - fd_install(newfd, newfile);
823 - *newsockp = newsock;
824 - info->fd = newfd;
825 -
826 - return 0;
827 + file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
828 + if (IS_ERR(file))
829 + sock_release(newsock);
830
831 -out_sk_alloc_fail:
832 - fput(newfile);
833 -out_sock_alloc_fail:
834 - put_unused_fd(newfd);
835 -out_fd_fail:
836 - sock_release(newsock);
837 -out:
838 - return err;
839 + return file;
840 }
841
842 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
843 @@ -1707,21 +1682,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
844 }
845 case SIOCKCMCLONE: {
846 struct kcm_clone info;
847 - struct socket *newsock = NULL;
848 + struct file *file;
849
850 - if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
851 - return -EFAULT;
852 + info.fd = get_unused_fd_flags(0);
853 + if (unlikely(info.fd < 0))
854 + return info.fd;
855
856 - err = kcm_clone(sock, &info, &newsock);
857 -
858 - if (!err) {
859 - if (copy_to_user((void __user *)arg, &info,
860 - sizeof(info))) {
861 - err = -EFAULT;
862 - sys_close(info.fd);
863 - }
864 + file = kcm_clone(sock);
865 + if (IS_ERR(file)) {
866 + put_unused_fd(info.fd);
867 + return PTR_ERR(file);
868 }
869 -
870 + if (copy_to_user((void __user *)arg, &info,
871 + sizeof(info))) {
872 + put_unused_fd(info.fd);
873 + fput(file);
874 + return -EFAULT;
875 + }
876 + fd_install(info.fd, file);
877 + err = 0;
878 break;
879 }
880 default:
881 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
882 index 004af030ef1a..d869ea50623e 100644
883 --- a/net/netfilter/core.c
884 +++ b/net/netfilter/core.c
885 @@ -364,6 +364,11 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
886 ret = nf_queue(skb, state, &entry, verdict);
887 if (ret == 1 && entry)
888 goto next_hook;
889 + } else {
890 + /* Implicit handling for NF_STOLEN, as well as any other
891 + * non conventional verdicts.
892 + */
893 + ret = 0;
894 }
895 return ret;
896 }
897 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
898 index e7f6657269e0..267db0d603bc 100644
899 --- a/net/packet/af_packet.c
900 +++ b/net/packet/af_packet.c
901 @@ -1661,7 +1661,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
902 atomic_long_set(&rollover->num, 0);
903 atomic_long_set(&rollover->num_huge, 0);
904 atomic_long_set(&rollover->num_failed, 0);
905 - po->rollover = rollover;
906 }
907
908 match = NULL;
909 @@ -1706,6 +1705,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
910 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
911 __dev_remove_pack(&po->prot_hook);
912 po->fanout = match;
913 + po->rollover = rollover;
914 + rollover = NULL;
915 atomic_inc(&match->sk_ref);
916 __fanout_link(sk, po);
917 err = 0;
918 @@ -1719,10 +1720,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
919 }
920
921 out:
922 - if (err && rollover) {
923 - kfree_rcu(rollover, rcu);
924 - po->rollover = NULL;
925 - }
926 + kfree(rollover);
927 mutex_unlock(&fanout_mutex);
928 return err;
929 }
930 @@ -1746,11 +1744,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
931 list_del(&f->list);
932 else
933 f = NULL;
934 -
935 - if (po->rollover) {
936 - kfree_rcu(po->rollover, rcu);
937 - po->rollover = NULL;
938 - }
939 }
940 mutex_unlock(&fanout_mutex);
941
942 @@ -3039,6 +3032,7 @@ static int packet_release(struct socket *sock)
943 synchronize_net();
944
945 if (f) {
946 + kfree(po->rollover);
947 fanout_release_data(f);
948 kfree(f);
949 }
950 @@ -3107,6 +3101,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
951 if (need_rehook) {
952 if (po->running) {
953 rcu_read_unlock();
954 + /* prevents packet_notifier() from calling
955 + * register_prot_hook()
956 + */
957 + po->num = 0;
958 __unregister_prot_hook(sk, true);
959 rcu_read_lock();
960 dev_curr = po->prot_hook.dev;
961 @@ -3115,6 +3113,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
962 dev->ifindex);
963 }
964
965 + BUG_ON(po->running);
966 po->num = proto;
967 po->prot_hook.type = proto;
968
969 @@ -3853,7 +3852,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
970 void *data = &val;
971 union tpacket_stats_u st;
972 struct tpacket_rollover_stats rstats;
973 - struct packet_rollover *rollover;
974
975 if (level != SOL_PACKET)
976 return -ENOPROTOOPT;
977 @@ -3932,18 +3930,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
978 0);
979 break;
980 case PACKET_ROLLOVER_STATS:
981 - rcu_read_lock();
982 - rollover = rcu_dereference(po->rollover);
983 - if (rollover) {
984 - rstats.tp_all = atomic_long_read(&rollover->num);
985 - rstats.tp_huge = atomic_long_read(&rollover->num_huge);
986 - rstats.tp_failed = atomic_long_read(&rollover->num_failed);
987 - data = &rstats;
988 - lv = sizeof(rstats);
989 - }
990 - rcu_read_unlock();
991 - if (!rollover)
992 + if (!po->rollover)
993 return -EINVAL;
994 + rstats.tp_all = atomic_long_read(&po->rollover->num);
995 + rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
996 + rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
997 + data = &rstats;
998 + lv = sizeof(rstats);
999 break;
1000 case PACKET_TX_HAS_OFF:
1001 val = po->tp_tx_has_off;
1002 diff --git a/net/packet/internal.h b/net/packet/internal.h
1003 index 9ee46314b7d7..d55bfc34d6b3 100644
1004 --- a/net/packet/internal.h
1005 +++ b/net/packet/internal.h
1006 @@ -92,7 +92,6 @@ struct packet_fanout {
1007
1008 struct packet_rollover {
1009 int sock;
1010 - struct rcu_head rcu;
1011 atomic_long_t num;
1012 atomic_long_t num_huge;
1013 atomic_long_t num_failed;
1014 diff --git a/net/rds/rdma.c b/net/rds/rdma.c
1015 index 60e90f761838..de8496e60735 100644
1016 --- a/net/rds/rdma.c
1017 +++ b/net/rds/rdma.c
1018 @@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
1019 long i;
1020 int ret;
1021
1022 - if (rs->rs_bound_addr == 0) {
1023 + if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
1024 ret = -ENOTCONN; /* XXX not a great errno */
1025 goto out;
1026 }
1027 diff --git a/net/tipc/server.c b/net/tipc/server.c
1028 index 3cd6402e812c..f4c1b18c5fb0 100644
1029 --- a/net/tipc/server.c
1030 +++ b/net/tipc/server.c
1031 @@ -313,6 +313,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
1032 newcon->usr_data = s->tipc_conn_new(newcon->conid);
1033 if (!newcon->usr_data) {
1034 sock_release(newsock);
1035 + conn_put(newcon);
1036 return -ENOMEM;
1037 }
1038
1039 diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
1040 index b58dc95f3d35..107375d80c70 100644
1041 --- a/net/tipc/udp_media.c
1042 +++ b/net/tipc/udp_media.c
1043 @@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
1044 goto rcu_out;
1045 }
1046
1047 - tipc_rcv(sock_net(sk), skb, b);
1048 - rcu_read_unlock();
1049 - return 0;
1050 -
1051 rcu_out:
1052 rcu_read_unlock();
1053 out:
1054 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
1055 index ebcaf4641d2b..31f562507915 100644
1056 --- a/virt/kvm/arm/vgic/vgic-its.c
1057 +++ b/virt/kvm/arm/vgic/vgic-its.c
1058 @@ -322,6 +322,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
1059 int ret = 0;
1060 u32 *intids;
1061 int nr_irqs, i;
1062 + u8 pendmask;
1063
1064 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
1065 if (nr_irqs < 0)
1066 @@ -329,7 +330,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
1067
1068 for (i = 0; i < nr_irqs; i++) {
1069 int byte_offset, bit_nr;
1070 - u8 pendmask;
1071
1072 byte_offset = intids[i] / BITS_PER_BYTE;
1073 bit_nr = intids[i] % BITS_PER_BYTE;