Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.0/0100-4.0.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2563 - (hide annotations) (download)
Tue Nov 25 22:54:01 2014 UTC (9 years, 5 months ago) by niro
File size: 14124 byte(s)
-linux-4.0.1
1 niro 2563 diff --git a/Makefile b/Makefile
2     index fbd43bfe4445..f499cd2f5738 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 0
8     -SUBLEVEL = 0
9     +SUBLEVEL = 1
10     EXTRAVERSION =
11     NAME = Hurr durr I'ma sheep
12    
13     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
14     index 4085c4b31047..355d5fea5be9 100644
15     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
16     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
17     @@ -531,20 +531,8 @@ struct bnx2x_fastpath {
18     struct napi_struct napi;
19    
20     #ifdef CONFIG_NET_RX_BUSY_POLL
21     - unsigned int state;
22     -#define BNX2X_FP_STATE_IDLE 0
23     -#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
24     -#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
25     -#define BNX2X_FP_STATE_DISABLED (1 << 2)
26     -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
27     -#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
28     -#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
29     -#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
30     -#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
31     -#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
32     - /* protect state */
33     - spinlock_t lock;
34     -#endif /* CONFIG_NET_RX_BUSY_POLL */
35     + unsigned long busy_poll_state;
36     +#endif
37    
38     union host_hc_status_block status_blk;
39     /* chip independent shortcuts into sb structure */
40     @@ -619,104 +607,83 @@ struct bnx2x_fastpath {
41     #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
42    
43     #ifdef CONFIG_NET_RX_BUSY_POLL
44     -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
45     +
46     +enum bnx2x_fp_state {
47     + BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
48     +
49     + BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
50     + BNX2X_STATE_FP_NAPI_REQ = BIT(1),
51     +
52     + BNX2X_STATE_FP_POLL_BIT = 2,
53     + BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
54     +
55     + BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
56     +};
57     +
58     +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
59     {
60     - spin_lock_init(&fp->lock);
61     - fp->state = BNX2X_FP_STATE_IDLE;
62     + WRITE_ONCE(fp->busy_poll_state, 0);
63     }
64    
65     /* called from the device poll routine to get ownership of a FP */
66     static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
67     {
68     - bool rc = true;
69     -
70     - spin_lock_bh(&fp->lock);
71     - if (fp->state & BNX2X_FP_LOCKED) {
72     - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
73     - fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
74     - rc = false;
75     - } else {
76     - /* we don't care if someone yielded */
77     - fp->state = BNX2X_FP_STATE_NAPI;
78     + unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
79     +
80     + while (1) {
81     + switch (old) {
82     + case BNX2X_STATE_FP_POLL:
83     + /* make sure bnx2x_fp_lock_poll() wont starve us */
84     + set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
85     + &fp->busy_poll_state);
86     + /* fallthrough */
87     + case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
88     + return false;
89     + default:
90     + break;
91     + }
92     + prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
93     + if (unlikely(prev != old)) {
94     + old = prev;
95     + continue;
96     + }
97     + return true;
98     }
99     - spin_unlock_bh(&fp->lock);
100     - return rc;
101     }
102    
103     -/* returns true is someone tried to get the FP while napi had it */
104     -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
105     +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
106     {
107     - bool rc = false;
108     -
109     - spin_lock_bh(&fp->lock);
110     - WARN_ON(fp->state &
111     - (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
112     -
113     - if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
114     - rc = true;
115     -
116     - /* state ==> idle, unless currently disabled */
117     - fp->state &= BNX2X_FP_STATE_DISABLED;
118     - spin_unlock_bh(&fp->lock);
119     - return rc;
120     + smp_wmb();
121     + fp->busy_poll_state = 0;
122     }
123    
124     /* called from bnx2x_low_latency_poll() */
125     static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
126     {
127     - bool rc = true;
128     -
129     - spin_lock_bh(&fp->lock);
130     - if ((fp->state & BNX2X_FP_LOCKED)) {
131     - fp->state |= BNX2X_FP_STATE_POLL_YIELD;
132     - rc = false;
133     - } else {
134     - /* preserve yield marks */
135     - fp->state |= BNX2X_FP_STATE_POLL;
136     - }
137     - spin_unlock_bh(&fp->lock);
138     - return rc;
139     + return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
140     }
141    
142     -/* returns true if someone tried to get the FP while it was locked */
143     -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
144     +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
145     {
146     - bool rc = false;
147     -
148     - spin_lock_bh(&fp->lock);
149     - WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
150     -
151     - if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
152     - rc = true;
153     -
154     - /* state ==> idle, unless currently disabled */
155     - fp->state &= BNX2X_FP_STATE_DISABLED;
156     - spin_unlock_bh(&fp->lock);
157     - return rc;
158     + smp_mb__before_atomic();
159     + clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
160     }
161    
162     -/* true if a socket is polling, even if it did not get the lock */
163     +/* true if a socket is polling */
164     static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
165     {
166     - WARN_ON(!(fp->state & BNX2X_FP_OWNED));
167     - return fp->state & BNX2X_FP_USER_PEND;
168     + return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
169     }
170    
171     /* false if fp is currently owned */
172     static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
173     {
174     - int rc = true;
175     -
176     - spin_lock_bh(&fp->lock);
177     - if (fp->state & BNX2X_FP_OWNED)
178     - rc = false;
179     - fp->state |= BNX2X_FP_STATE_DISABLED;
180     - spin_unlock_bh(&fp->lock);
181     + set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
182     + return !bnx2x_fp_ll_polling(fp);
183    
184     - return rc;
185     }
186     #else
187     -static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
188     +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
189     {
190     }
191    
192     @@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
193     return true;
194     }
195    
196     -static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
197     +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
198     {
199     - return false;
200     }
201    
202     static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
203     @@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
204     return false;
205     }
206    
207     -static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
208     +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
209     {
210     - return false;
211     }
212    
213     static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
214     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
215     index 0a9faa134a9a..2f63467bce46 100644
216     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
217     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
218     @@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
219     int i;
220    
221     for_each_rx_queue_cnic(bp, i) {
222     - bnx2x_fp_init_lock(&bp->fp[i]);
223     + bnx2x_fp_busy_poll_init(&bp->fp[i]);
224     napi_enable(&bnx2x_fp(bp, i, napi));
225     }
226     }
227     @@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
228     int i;
229    
230     for_each_eth_queue(bp, i) {
231     - bnx2x_fp_init_lock(&bp->fp[i]);
232     + bnx2x_fp_busy_poll_init(&bp->fp[i]);
233     napi_enable(&bnx2x_fp(bp, i, napi));
234     }
235     }
236     @@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
237     }
238     }
239    
240     + bnx2x_fp_unlock_napi(fp);
241     +
242     /* Fall out from the NAPI loop if needed */
243     - if (!bnx2x_fp_unlock_napi(fp) &&
244     - !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
245     + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
246    
247     /* No need to update SB for FCoE L2 ring as long as
248     * it's connected to the default SB and the SB
249     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
250     index f8528a4cf54f..fceb637efd6b 100644
251     --- a/drivers/net/vxlan.c
252     +++ b/drivers/net/vxlan.c
253     @@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
254     }
255     }
256    
257     - skb = iptunnel_handle_offloads(skb, udp_sum, type);
258     - if (IS_ERR(skb)) {
259     - err = -EINVAL;
260     - goto err;
261     - }
262     -
263     skb_scrub_packet(skb, xnet);
264    
265     min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
266     @@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
267     goto err;
268     }
269    
270     + skb = iptunnel_handle_offloads(skb, udp_sum, type);
271     + if (IS_ERR(skb)) {
272     + err = -EINVAL;
273     + goto err;
274     + }
275     +
276     vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
277     vxh->vx_flags = htonl(VXLAN_HF_VNI);
278     vxh->vx_vni = md->vni;
279     @@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
280     }
281     }
282    
283     - skb = iptunnel_handle_offloads(skb, udp_sum, type);
284     - if (IS_ERR(skb))
285     - return PTR_ERR(skb);
286     -
287     min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
288     + VXLAN_HLEN + sizeof(struct iphdr)
289     + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
290     @@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
291     if (WARN_ON(!skb))
292     return -ENOMEM;
293    
294     + skb = iptunnel_handle_offloads(skb, udp_sum, type);
295     + if (IS_ERR(skb))
296     + return PTR_ERR(skb);
297     +
298     vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
299     vxh->vx_flags = htonl(VXLAN_HF_VNI);
300     vxh->vx_vni = md->vni;
301     diff --git a/fs/exec.c b/fs/exec.c
302     index c7f9b733406d..00400cf522dc 100644
303     --- a/fs/exec.c
304     +++ b/fs/exec.c
305     @@ -1265,6 +1265,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
306     spin_unlock(&p->fs->lock);
307     }
308    
309     +static void bprm_fill_uid(struct linux_binprm *bprm)
310     +{
311     + struct inode *inode;
312     + unsigned int mode;
313     + kuid_t uid;
314     + kgid_t gid;
315     +
316     + /* clear any previous set[ug]id data from a previous binary */
317     + bprm->cred->euid = current_euid();
318     + bprm->cred->egid = current_egid();
319     +
320     + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
321     + return;
322     +
323     + if (task_no_new_privs(current))
324     + return;
325     +
326     + inode = file_inode(bprm->file);
327     + mode = READ_ONCE(inode->i_mode);
328     + if (!(mode & (S_ISUID|S_ISGID)))
329     + return;
330     +
331     + /* Be careful if suid/sgid is set */
332     + mutex_lock(&inode->i_mutex);
333     +
334     + /* reload atomically mode/uid/gid now that lock held */
335     + mode = inode->i_mode;
336     + uid = inode->i_uid;
337     + gid = inode->i_gid;
338     + mutex_unlock(&inode->i_mutex);
339     +
340     + /* We ignore suid/sgid if there are no mappings for them in the ns */
341     + if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
342     + !kgid_has_mapping(bprm->cred->user_ns, gid))
343     + return;
344     +
345     + if (mode & S_ISUID) {
346     + bprm->per_clear |= PER_CLEAR_ON_SETID;
347     + bprm->cred->euid = uid;
348     + }
349     +
350     + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
351     + bprm->per_clear |= PER_CLEAR_ON_SETID;
352     + bprm->cred->egid = gid;
353     + }
354     +}
355     +
356     /*
357     * Fill the binprm structure from the inode.
358     * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
359     @@ -1273,36 +1320,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
360     */
361     int prepare_binprm(struct linux_binprm *bprm)
362     {
363     - struct inode *inode = file_inode(bprm->file);
364     - umode_t mode = inode->i_mode;
365     int retval;
366    
367     -
368     - /* clear any previous set[ug]id data from a previous binary */
369     - bprm->cred->euid = current_euid();
370     - bprm->cred->egid = current_egid();
371     -
372     - if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
373     - !task_no_new_privs(current) &&
374     - kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
375     - kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
376     - /* Set-uid? */
377     - if (mode & S_ISUID) {
378     - bprm->per_clear |= PER_CLEAR_ON_SETID;
379     - bprm->cred->euid = inode->i_uid;
380     - }
381     -
382     - /* Set-gid? */
383     - /*
384     - * If setgid is set but no group execute bit then this
385     - * is a candidate for mandatory locking, not a setgid
386     - * executable.
387     - */
388     - if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
389     - bprm->per_clear |= PER_CLEAR_ON_SETID;
390     - bprm->cred->egid = inode->i_gid;
391     - }
392     - }
393     + bprm_fill_uid(bprm);
394    
395     /* fill in binprm security blob */
396     retval = security_bprm_set_creds(bprm);
397     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
398     index a28e09c7825d..36508e69e92a 100644
399     --- a/kernel/bpf/verifier.c
400     +++ b/kernel/bpf/verifier.c
401     @@ -1380,7 +1380,8 @@ peek_stack:
402     /* tell verifier to check for equivalent states
403     * after every call and jump
404     */
405     - env->explored_states[t + 1] = STATE_LIST_MARK;
406     + if (t + 1 < insn_cnt)
407     + env->explored_states[t + 1] = STATE_LIST_MARK;
408     } else {
409     /* conditional jump with two edges */
410     ret = push_insn(t, t + 1, FALLTHROUGH, env);
411     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
412     index 8e4ac97c8477..98d45fe72f51 100644
413     --- a/net/core/skbuff.c
414     +++ b/net/core/skbuff.c
415     @@ -4169,19 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
416     */
417     void skb_scrub_packet(struct sk_buff *skb, bool xnet)
418     {
419     - if (xnet)
420     - skb_orphan(skb);
421     skb->tstamp.tv64 = 0;
422     skb->pkt_type = PACKET_HOST;
423     skb->skb_iif = 0;
424     skb->ignore_df = 0;
425     skb_dst_drop(skb);
426     - skb->mark = 0;
427     skb_sender_cpu_clear(skb);
428     - skb_init_secmark(skb);
429     secpath_reset(skb);
430     nf_reset(skb);
431     nf_reset_trace(skb);
432     +
433     + if (!xnet)
434     + return;
435     +
436     + skb_orphan(skb);
437     + skb->mark = 0;
438     }
439     EXPORT_SYMBOL_GPL(skb_scrub_packet);
440    
441     diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
442     index 5a4828ba05ad..a566a2e4715b 100644
443     --- a/net/ipv4/geneve.c
444     +++ b/net/ipv4/geneve.c
445     @@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
446     int min_headroom;
447     int err;
448    
449     - skb = udp_tunnel_handle_offloads(skb, csum);
450     - if (IS_ERR(skb))
451     - return PTR_ERR(skb);
452     -
453     min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
454     + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
455     + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
456     @@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
457     if (unlikely(!skb))
458     return -ENOMEM;
459    
460     + skb = udp_tunnel_handle_offloads(skb, csum);
461     + if (IS_ERR(skb))
462     + return PTR_ERR(skb);
463     +
464     gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
465     geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
466    
467     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
468     index 1db253e36045..d520492ba698 100644
469     --- a/net/ipv4/tcp_output.c
470     +++ b/net/ipv4/tcp_output.c
471     @@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
472     }
473     #endif
474    
475     + /* Do not fool tcpdump (if any), clean our debris */
476     + skb->tstamp.tv64 = 0;
477     return skb;
478     }
479     EXPORT_SYMBOL(tcp_make_synack);