Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0142-4.9.43-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2967 - (hide annotations) (download)
Mon Aug 14 07:24:50 2017 UTC (6 years, 9 months ago) by niro
File size: 18521 byte(s)
-linux-4.9.43
1 niro 2967 diff --git a/Makefile b/Makefile
2     index 34d4d9f8a4b2..77953bf3f40a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 42
9     +SUBLEVEL = 43
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
14     index f3baa896ce84..7037201c5e3a 100644
15     --- a/arch/arm/boot/dts/Makefile
16     +++ b/arch/arm/boot/dts/Makefile
17     @@ -820,7 +820,6 @@ dtb-$(CONFIG_MACH_SUN8I) += \
18     sun8i-a83t-allwinner-h8homlet-v2.dtb \
19     sun8i-a83t-cubietruck-plus.dtb \
20     sun8i-h3-bananapi-m2-plus.dtb \
21     - sun8i-h3-nanopi-m1.dtb \
22     sun8i-h3-nanopi-neo.dtb \
23     sun8i-h3-orangepi-2.dtb \
24     sun8i-h3-orangepi-lite.dtb \
25     diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
26     index 332ce3b5a34f..710511cadd50 100644
27     --- a/arch/arm/kvm/mmu.c
28     +++ b/arch/arm/kvm/mmu.c
29     @@ -1664,12 +1664,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
30    
31     int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
32     {
33     + if (!kvm->arch.pgd)
34     + return 0;
35     trace_kvm_age_hva(start, end);
36     return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
37     }
38    
39     int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
40     {
41     + if (!kvm->arch.pgd)
42     + return 0;
43     trace_kvm_test_age_hva(hva);
44     return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
45     }
46     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
47     index bee281f3163d..e8dee623d545 100644
48     --- a/arch/s390/net/bpf_jit_comp.c
49     +++ b/arch/s390/net/bpf_jit_comp.c
50     @@ -1252,7 +1252,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
51     insn_count = bpf_jit_insn(jit, fp, i);
52     if (insn_count < 0)
53     return -1;
54     - jit->addrs[i + 1] = jit->prg; /* Next instruction address */
55     + /* Next instruction address */
56     + jit->addrs[i + insn_count] = jit->prg;
57     }
58     bpf_jit_epilogue(jit);
59    
60     diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
61     index 349dd23e2876..0cdeb2b483a0 100644
62     --- a/arch/sparc/include/asm/mmu_context_64.h
63     +++ b/arch/sparc/include/asm/mmu_context_64.h
64     @@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
65     void __tsb_context_switch(unsigned long pgd_pa,
66     struct tsb_config *tsb_base,
67     struct tsb_config *tsb_huge,
68     - unsigned long tsb_descr_pa);
69     + unsigned long tsb_descr_pa,
70     + unsigned long secondary_ctx);
71    
72     -static inline void tsb_context_switch(struct mm_struct *mm)
73     +static inline void tsb_context_switch_ctx(struct mm_struct *mm,
74     + unsigned long ctx)
75     {
76     __tsb_context_switch(__pa(mm->pgd),
77     &mm->context.tsb_block[0],
78     @@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
79     #else
80     NULL
81     #endif
82     - , __pa(&mm->context.tsb_descr[0]));
83     + , __pa(&mm->context.tsb_descr[0]),
84     + ctx);
85     }
86    
87     +#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
88     +
89     void tsb_grow(struct mm_struct *mm,
90     unsigned long tsb_index,
91     unsigned long mm_rss);
92     @@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
93     * cpu0 to update it's TSB because at that point the cpu_vm_mask
94     * only had cpu1 set in it.
95     */
96     - load_secondary_context(mm);
97     - tsb_context_switch(mm);
98     + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
99    
100     /* Any time a processor runs a context on an address space
101     * for the first time, we must flush that context out of the
102     diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
103     index 395ec1800530..7d961f6e3907 100644
104     --- a/arch/sparc/kernel/tsb.S
105     +++ b/arch/sparc/kernel/tsb.S
106     @@ -375,6 +375,7 @@ tsb_flush:
107     * %o1: TSB base config pointer
108     * %o2: TSB huge config pointer, or NULL if none
109     * %o3: Hypervisor TSB descriptor physical address
110     + * %o4: Secondary context to load, if non-zero
111     *
112     * We have to run this whole thing with interrupts
113     * disabled so that the current cpu doesn't change
114     @@ -387,6 +388,17 @@ __tsb_context_switch:
115     rdpr %pstate, %g1
116     wrpr %g1, PSTATE_IE, %pstate
117    
118     + brz,pn %o4, 1f
119     + mov SECONDARY_CONTEXT, %o5
120     +
121     +661: stxa %o4, [%o5] ASI_DMMU
122     + .section .sun4v_1insn_patch, "ax"
123     + .word 661b
124     + stxa %o4, [%o5] ASI_MMU
125     + .previous
126     + flush %g6
127     +
128     +1:
129     TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
130    
131     stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
132     diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
133     index 17bd2e167e07..df707a8ad311 100644
134     --- a/arch/sparc/power/hibernate.c
135     +++ b/arch/sparc/power/hibernate.c
136     @@ -35,6 +35,5 @@ void restore_processor_state(void)
137     {
138     struct mm_struct *mm = current->active_mm;
139    
140     - load_secondary_context(mm);
141     - tsb_context_switch(mm);
142     + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
143     }
144     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
145     index 5d484581becd..bcbb80ff86a7 100644
146     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
147     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
148     @@ -724,16 +724,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
149     * header, the HW adds it. To address that, we are subtracting the pseudo
150     * header checksum from the checksum value provided by the HW.
151     */
152     -static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
153     - struct iphdr *iph)
154     +static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
155     + struct iphdr *iph)
156     {
157     __u16 length_for_csum = 0;
158     __wsum csum_pseudo_header = 0;
159     + __u8 ipproto = iph->protocol;
160     +
161     + if (unlikely(ipproto == IPPROTO_SCTP))
162     + return -1;
163    
164     length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
165     csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
166     - length_for_csum, iph->protocol, 0);
167     + length_for_csum, ipproto, 0);
168     skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
169     + return 0;
170     }
171    
172     #if IS_ENABLED(CONFIG_IPV6)
173     @@ -744,17 +749,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
174     static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
175     struct ipv6hdr *ipv6h)
176     {
177     + __u8 nexthdr = ipv6h->nexthdr;
178     __wsum csum_pseudo_hdr = 0;
179    
180     - if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
181     - ipv6h->nexthdr == IPPROTO_HOPOPTS))
182     + if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
183     + nexthdr == IPPROTO_HOPOPTS ||
184     + nexthdr == IPPROTO_SCTP))
185     return -1;
186     - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
187     + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
188    
189     csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
190     sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
191     csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
192     - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
193     + csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
194     + (__force __wsum)htons(nexthdr));
195    
196     skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
197     skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
198     @@ -777,11 +785,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
199     }
200    
201     if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
202     - get_fixed_ipv4_csum(hw_checksum, skb, hdr);
203     + return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
204     #if IS_ENABLED(CONFIG_IPV6)
205     - else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
206     - if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
207     - return -1;
208     + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
209     + return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
210     #endif
211     return 0;
212     }
213     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
214     index 5489c0ec1d9a..96fa0e61d3af 100644
215     --- a/drivers/net/ppp/ppp_generic.c
216     +++ b/drivers/net/ppp/ppp_generic.c
217     @@ -119,6 +119,7 @@ struct ppp {
218     int n_channels; /* how many channels are attached 54 */
219     spinlock_t rlock; /* lock for receive side 58 */
220     spinlock_t wlock; /* lock for transmit side 5c */
221     + int *xmit_recursion __percpu; /* xmit recursion detect */
222     int mru; /* max receive unit 60 */
223     unsigned int flags; /* control bits 64 */
224     unsigned int xstate; /* transmit state bits 68 */
225     @@ -1024,6 +1025,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
226     struct ppp *ppp = netdev_priv(dev);
227     int indx;
228     int err;
229     + int cpu;
230    
231     ppp->dev = dev;
232     ppp->ppp_net = src_net;
233     @@ -1038,6 +1040,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
234     INIT_LIST_HEAD(&ppp->channels);
235     spin_lock_init(&ppp->rlock);
236     spin_lock_init(&ppp->wlock);
237     +
238     + ppp->xmit_recursion = alloc_percpu(int);
239     + if (!ppp->xmit_recursion) {
240     + err = -ENOMEM;
241     + goto err1;
242     + }
243     + for_each_possible_cpu(cpu)
244     + (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
245     +
246     #ifdef CONFIG_PPP_MULTILINK
247     ppp->minseq = -1;
248     skb_queue_head_init(&ppp->mrq);
249     @@ -1049,11 +1060,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
250    
251     err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
252     if (err < 0)
253     - return err;
254     + goto err2;
255    
256     conf->file->private_data = &ppp->file;
257    
258     return 0;
259     +err2:
260     + free_percpu(ppp->xmit_recursion);
261     +err1:
262     + return err;
263     }
264    
265     static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
266     @@ -1399,18 +1414,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
267     ppp_xmit_unlock(ppp);
268     }
269    
270     -static DEFINE_PER_CPU(int, ppp_xmit_recursion);
271     -
272     static void ppp_xmit_process(struct ppp *ppp)
273     {
274     local_bh_disable();
275    
276     - if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
277     + if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
278     goto err;
279    
280     - __this_cpu_inc(ppp_xmit_recursion);
281     + (*this_cpu_ptr(ppp->xmit_recursion))++;
282     __ppp_xmit_process(ppp);
283     - __this_cpu_dec(ppp_xmit_recursion);
284     + (*this_cpu_ptr(ppp->xmit_recursion))--;
285    
286     local_bh_enable();
287    
288     @@ -1901,23 +1914,23 @@ static void __ppp_channel_push(struct channel *pch)
289     spin_unlock_bh(&pch->downl);
290     /* see if there is anything from the attached unit to be sent */
291     if (skb_queue_empty(&pch->file.xq)) {
292     - read_lock_bh(&pch->upl);
293     ppp = pch->ppp;
294     if (ppp)
295     __ppp_xmit_process(ppp);
296     - read_unlock_bh(&pch->upl);
297     }
298     }
299    
300     static void ppp_channel_push(struct channel *pch)
301     {
302     - local_bh_disable();
303     -
304     - __this_cpu_inc(ppp_xmit_recursion);
305     - __ppp_channel_push(pch);
306     - __this_cpu_dec(ppp_xmit_recursion);
307     -
308     - local_bh_enable();
309     + read_lock_bh(&pch->upl);
310     + if (pch->ppp) {
311     + (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
312     + __ppp_channel_push(pch);
313     + (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
314     + } else {
315     + __ppp_channel_push(pch);
316     + }
317     + read_unlock_bh(&pch->upl);
318     }
319    
320     /*
321     @@ -3056,6 +3069,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
322     #endif /* CONFIG_PPP_FILTER */
323    
324     kfree_skb(ppp->xmit_pending);
325     + free_percpu(ppp->xmit_recursion);
326    
327     free_netdev(ppp->dev);
328     }
329     diff --git a/net/core/dev.c b/net/core/dev.c
330     index 0af019dfe846..1d0a7369d5a2 100644
331     --- a/net/core/dev.c
332     +++ b/net/core/dev.c
333     @@ -2703,7 +2703,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
334     {
335     if (tx_path)
336     return skb->ip_summed != CHECKSUM_PARTIAL &&
337     - skb->ip_summed != CHECKSUM_NONE;
338     + skb->ip_summed != CHECKSUM_UNNECESSARY;
339    
340     return skb->ip_summed == CHECKSUM_NONE;
341     }
342     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
343     index f60fe82c2c1e..b5116ec31757 100644
344     --- a/net/ipv4/af_inet.c
345     +++ b/net/ipv4/af_inet.c
346     @@ -1693,6 +1693,13 @@ static __net_init int inet_init_net(struct net *net)
347     net->ipv4.sysctl_ip_dynaddr = 0;
348     net->ipv4.sysctl_ip_early_demux = 1;
349    
350     + /* Some igmp sysctl, whose values are always used */
351     + net->ipv4.sysctl_igmp_max_memberships = 20;
352     + net->ipv4.sysctl_igmp_max_msf = 10;
353     + /* IGMP reports for link-local multicast groups are enabled by default */
354     + net->ipv4.sysctl_igmp_llm_reports = 1;
355     + net->ipv4.sysctl_igmp_qrv = 2;
356     +
357     return 0;
358     }
359    
360     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
361     index 19930da56b0a..08575e3bd135 100644
362     --- a/net/ipv4/igmp.c
363     +++ b/net/ipv4/igmp.c
364     @@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
365     goto out_sock;
366     }
367    
368     - /* Sysctl initialization */
369     - net->ipv4.sysctl_igmp_max_memberships = 20;
370     - net->ipv4.sysctl_igmp_max_msf = 10;
371     - /* IGMP reports for link-local multicast groups are enabled by default */
372     - net->ipv4.sysctl_igmp_llm_reports = 1;
373     - net->ipv4.sysctl_igmp_qrv = 2;
374     return 0;
375    
376     out_sock:
377     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
378     index 06215ba88b93..2c3c1a223df4 100644
379     --- a/net/ipv4/ip_output.c
380     +++ b/net/ipv4/ip_output.c
381     @@ -936,11 +936,12 @@ static int __ip_append_data(struct sock *sk,
382     csummode = CHECKSUM_PARTIAL;
383    
384     cork->length += length;
385     - if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
386     - (skb && skb_is_gso(skb))) &&
387     + if ((skb && skb_is_gso(skb)) ||
388     + ((length > mtu) &&
389     + (skb_queue_len(queue) <= 1) &&
390     (sk->sk_protocol == IPPROTO_UDP) &&
391     (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
392     - (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
393     + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
394     err = ip_ufo_append_data(sk, queue, getfrag, from, length,
395     hh_len, fragheaderlen, transhdrlen,
396     maxfraglen, flags);
397     @@ -1256,6 +1257,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
398     return -EINVAL;
399    
400     if ((size + skb->len > mtu) &&
401     + (skb_queue_len(&sk->sk_write_queue) == 1) &&
402     (sk->sk_protocol == IPPROTO_UDP) &&
403     (rt->dst.dev->features & NETIF_F_UFO)) {
404     if (skb->ip_summed != CHECKSUM_PARTIAL)
405     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
406     index 01336aa5f973..32c540145c17 100644
407     --- a/net/ipv4/tcp_input.c
408     +++ b/net/ipv4/tcp_input.c
409     @@ -2560,8 +2560,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
410     return;
411    
412     /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
413     - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
414     - (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
415     + if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
416     + (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
417     tp->snd_cwnd = tp->snd_ssthresh;
418     tp->snd_cwnd_stamp = tcp_time_stamp;
419     }
420     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
421     index dc4258fd15dc..5d836b037442 100644
422     --- a/net/ipv4/tcp_output.c
423     +++ b/net/ipv4/tcp_output.c
424     @@ -3344,6 +3344,9 @@ int tcp_connect(struct sock *sk)
425     struct sk_buff *buff;
426     int err;
427    
428     + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
429     + return -EHOSTUNREACH; /* Routing failure or similar. */
430     +
431     tcp_connect_init(sk);
432    
433     if (unlikely(tp->repair)) {
434     diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
435     index b1e65b3b4361..74db43b47917 100644
436     --- a/net/ipv4/tcp_timer.c
437     +++ b/net/ipv4/tcp_timer.c
438     @@ -654,7 +654,8 @@ static void tcp_keepalive_timer (unsigned long data)
439     goto death;
440     }
441    
442     - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
443     + if (!sock_flag(sk, SOCK_KEEPOPEN) ||
444     + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
445     goto out;
446    
447     elapsed = keepalive_time_when(tp);
448     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
449     index 5bab6c3f7a2f..4363b1e89bdf 100644
450     --- a/net/ipv4/udp.c
451     +++ b/net/ipv4/udp.c
452     @@ -813,7 +813,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
453     if (is_udplite) /* UDP-Lite */
454     csum = udplite_csum(skb);
455    
456     - else if (sk->sk_no_check_tx) { /* UDP csum disabled */
457     + else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
458    
459     skb->ip_summed = CHECKSUM_NONE;
460     goto send;
461     diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
462     index b2be1d9757ef..6de016f80f17 100644
463     --- a/net/ipv4/udp_offload.c
464     +++ b/net/ipv4/udp_offload.c
465     @@ -232,7 +232,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
466     if (uh->check == 0)
467     uh->check = CSUM_MANGLED_0;
468    
469     - skb->ip_summed = CHECKSUM_NONE;
470     + skb->ip_summed = CHECKSUM_UNNECESSARY;
471    
472     /* If there is no outer header we can fake a checksum offload
473     * due to the fact that we have already done the checksum in
474     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
475     index a5cdf2a23609..e0236e902ea7 100644
476     --- a/net/ipv6/ip6_output.c
477     +++ b/net/ipv6/ip6_output.c
478     @@ -1372,11 +1372,12 @@ static int __ip6_append_data(struct sock *sk,
479     */
480    
481     cork->length += length;
482     - if ((((length + (skb ? skb->len : headersize)) > mtu) ||
483     - (skb && skb_is_gso(skb))) &&
484     + if ((skb && skb_is_gso(skb)) ||
485     + (((length + fragheaderlen) > mtu) &&
486     + (skb_queue_len(queue) <= 1) &&
487     (sk->sk_protocol == IPPROTO_UDP) &&
488     (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
489     - (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
490     + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
491     err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
492     hh_len, fragheaderlen, exthdrlen,
493     transhdrlen, mtu, flags, fl6);
494     diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
495     index a2267f80febb..e7d378c032cb 100644
496     --- a/net/ipv6/udp_offload.c
497     +++ b/net/ipv6/udp_offload.c
498     @@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
499     if (uh->check == 0)
500     uh->check = CSUM_MANGLED_0;
501    
502     - skb->ip_summed = CHECKSUM_NONE;
503     + skb->ip_summed = CHECKSUM_UNNECESSARY;
504    
505     /* If there is no outer header we can fake a checksum offload
506     * due to the fact that we have already done the checksum in
507     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
508     index 365c83fcee02..ae7bfd26cd91 100644
509     --- a/net/packet/af_packet.c
510     +++ b/net/packet/af_packet.c
511     @@ -3698,14 +3698,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
512    
513     if (optlen != sizeof(val))
514     return -EINVAL;
515     - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
516     - return -EBUSY;
517     if (copy_from_user(&val, optval, sizeof(val)))
518     return -EFAULT;
519     if (val > INT_MAX)
520     return -EINVAL;
521     - po->tp_reserve = val;
522     - return 0;
523     + lock_sock(sk);
524     + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
525     + ret = -EBUSY;
526     + } else {
527     + po->tp_reserve = val;
528     + ret = 0;
529     + }
530     + release_sock(sk);
531     + return ret;
532     }
533     case PACKET_LOSS:
534     {
535     diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
536     index 378c1c976058..a1aec0a6c789 100644
537     --- a/net/sched/act_ipt.c
538     +++ b/net/sched/act_ipt.c
539     @@ -49,8 +49,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
540     return PTR_ERR(target);
541    
542     t->u.kernel.target = target;
543     + memset(&par, 0, sizeof(par));
544     par.table = table;
545     - par.entryinfo = NULL;
546     par.target = target;
547     par.targinfo = t->data;
548     par.hook_mask = hook;