Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0114-4.1.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (hide annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 4 months ago) by niro
File size: 83939 byte(s)
-linux-4.1 patches up to 4.1.15
1 niro 2748 diff --git a/Makefile b/Makefile
2     index 091280d66452..cf35f6bcffd8 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 1
8     -SUBLEVEL = 14
9     +SUBLEVEL = 15
10     EXTRAVERSION =
11     NAME = Series 4800
12    
13     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
14     index 1ec6441fe2a5..09138ceba046 100644
15     --- a/drivers/block/rbd.c
16     +++ b/drivers/block/rbd.c
17     @@ -3417,6 +3417,7 @@ static void rbd_queue_workfn(struct work_struct *work)
18     goto err_rq;
19     }
20     img_request->rq = rq;
21     + snapc = NULL; /* img_request consumes a ref */
22    
23     if (op_type == OBJ_OP_DISCARD)
24     result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
25     diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
26     index f51d376d10ba..c2f5117fd8cb 100644
27     --- a/drivers/firewire/ohci.c
28     +++ b/drivers/firewire/ohci.c
29     @@ -3675,6 +3675,11 @@ static int pci_probe(struct pci_dev *dev,
30    
31     reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
32     ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
33     + /* JMicron JMB38x often shows 0 at first read, just ignore it */
34     + if (!ohci->it_context_support) {
35     + ohci_notice(ohci, "overriding IsoXmitIntMask\n");
36     + ohci->it_context_support = 0xf;
37     + }
38     reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
39     ohci->it_context_mask = ohci->it_context_support;
40     ohci->n_it = hweight32(ohci->it_context_mask);
41     diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
42     index 9c71295f2fef..85e640440bd9 100644
43     --- a/drivers/net/phy/broadcom.c
44     +++ b/drivers/net/phy/broadcom.c
45     @@ -675,7 +675,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
46     { PHY_ID_BCM5461, 0xfffffff0 },
47     { PHY_ID_BCM54616S, 0xfffffff0 },
48     { PHY_ID_BCM5464, 0xfffffff0 },
49     - { PHY_ID_BCM5482, 0xfffffff0 },
50     + { PHY_ID_BCM5481, 0xfffffff0 },
51     { PHY_ID_BCM5482, 0xfffffff0 },
52     { PHY_ID_BCM50610, 0xfffffff0 },
53     { PHY_ID_BCM50610M, 0xfffffff0 },
54     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
55     index 4e0470d396a3..71190dc1eacf 100644
56     --- a/drivers/net/usb/qmi_wwan.c
57     +++ b/drivers/net/usb/qmi_wwan.c
58     @@ -774,6 +774,7 @@ static const struct usb_device_id products[] = {
59     {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
60     {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
61     {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
62     + {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
63     {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
64     {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
65     {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
66     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
67     index b072e17479aa..2b0d84d32db4 100644
68     --- a/fs/btrfs/file.c
69     +++ b/fs/btrfs/file.c
70     @@ -756,8 +756,16 @@ next_slot:
71     }
72    
73     btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
74     - if (key.objectid > ino ||
75     - key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
76     +
77     + if (key.objectid > ino)
78     + break;
79     + if (WARN_ON_ONCE(key.objectid < ino) ||
80     + key.type < BTRFS_EXTENT_DATA_KEY) {
81     + ASSERT(del_nr == 0);
82     + path->slots[0]++;
83     + goto next_slot;
84     + }
85     + if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
86     break;
87    
88     fi = btrfs_item_ptr(leaf, path->slots[0],
89     @@ -776,8 +784,8 @@ next_slot:
90     btrfs_file_extent_inline_len(leaf,
91     path->slots[0], fi);
92     } else {
93     - WARN_ON(1);
94     - extent_end = search_start;
95     + /* can't happen */
96     + BUG();
97     }
98    
99     /*
100     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
101     index e3b39f0c4666..5136c73b3dce 100644
102     --- a/fs/btrfs/inode.c
103     +++ b/fs/btrfs/inode.c
104     @@ -1294,8 +1294,14 @@ next_slot:
105     num_bytes = 0;
106     btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
107    
108     - if (found_key.objectid > ino ||
109     - found_key.type > BTRFS_EXTENT_DATA_KEY ||
110     + if (found_key.objectid > ino)
111     + break;
112     + if (WARN_ON_ONCE(found_key.objectid < ino) ||
113     + found_key.type < BTRFS_EXTENT_DATA_KEY) {
114     + path->slots[0]++;
115     + goto next_slot;
116     + }
117     + if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
118     found_key.offset > end)
119     break;
120    
121     @@ -4184,6 +4190,47 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
122    
123     }
124    
125     +static int truncate_inline_extent(struct inode *inode,
126     + struct btrfs_path *path,
127     + struct btrfs_key *found_key,
128     + const u64 item_end,
129     + const u64 new_size)
130     +{
131     + struct extent_buffer *leaf = path->nodes[0];
132     + int slot = path->slots[0];
133     + struct btrfs_file_extent_item *fi;
134     + u32 size = (u32)(new_size - found_key->offset);
135     + struct btrfs_root *root = BTRFS_I(inode)->root;
136     +
137     + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
138     +
139     + if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
140     + loff_t offset = new_size;
141     + loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
142     +
143     + /*
144     + * Zero out the remaining of the last page of our inline extent,
145     + * instead of directly truncating our inline extent here - that
146     + * would be much more complex (decompressing all the data, then
147     + * compressing the truncated data, which might be bigger than
148     + * the size of the inline extent, resize the extent, etc).
149     + * We release the path because to get the page we might need to
150     + * read the extent item from disk (data not in the page cache).
151     + */
152     + btrfs_release_path(path);
153     + return btrfs_truncate_page(inode, offset, page_end - offset, 0);
154     + }
155     +
156     + btrfs_set_file_extent_ram_bytes(leaf, fi, size);
157     + size = btrfs_file_extent_calc_inline_size(size);
158     + btrfs_truncate_item(root, path, size, 1);
159     +
160     + if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
161     + inode_sub_bytes(inode, item_end + 1 - new_size);
162     +
163     + return 0;
164     +}
165     +
166     /*
167     * this can truncate away extent items, csum items and directory items.
168     * It starts at a high offset and removes keys until it can't find
169     @@ -4378,27 +4425,40 @@ search_again:
170     * special encodings
171     */
172     if (!del_item &&
173     - btrfs_file_extent_compression(leaf, fi) == 0 &&
174     btrfs_file_extent_encryption(leaf, fi) == 0 &&
175     btrfs_file_extent_other_encoding(leaf, fi) == 0) {
176     - u32 size = new_size - found_key.offset;
177     -
178     - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
179     - inode_sub_bytes(inode, item_end + 1 -
180     - new_size);
181    
182     /*
183     - * update the ram bytes to properly reflect
184     - * the new size of our item
185     + * Need to release path in order to truncate a
186     + * compressed extent. So delete any accumulated
187     + * extent items so far.
188     */
189     - btrfs_set_file_extent_ram_bytes(leaf, fi, size);
190     - size =
191     - btrfs_file_extent_calc_inline_size(size);
192     - btrfs_truncate_item(root, path, size, 1);
193     + if (btrfs_file_extent_compression(leaf, fi) !=
194     + BTRFS_COMPRESS_NONE && pending_del_nr) {
195     + err = btrfs_del_items(trans, root, path,
196     + pending_del_slot,
197     + pending_del_nr);
198     + if (err) {
199     + btrfs_abort_transaction(trans,
200     + root,
201     + err);
202     + goto error;
203     + }
204     + pending_del_nr = 0;
205     + }
206     +
207     + err = truncate_inline_extent(inode, path,
208     + &found_key,
209     + item_end,
210     + new_size);
211     + if (err) {
212     + btrfs_abort_transaction(trans,
213     + root, err);
214     + goto error;
215     + }
216     } else if (test_bit(BTRFS_ROOT_REF_COWS,
217     &root->state)) {
218     - inode_sub_bytes(inode, item_end + 1 -
219     - found_key.offset);
220     + inode_sub_bytes(inode, item_end + 1 - new_size);
221     }
222     }
223     delete:
224     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
225     index 8b2c82ce36b3..87c720865ebf 100644
226     --- a/fs/btrfs/ioctl.c
227     +++ b/fs/btrfs/ioctl.c
228     @@ -3166,6 +3166,150 @@ static void clone_update_extent_map(struct inode *inode,
229     &BTRFS_I(inode)->runtime_flags);
230     }
231    
232     +/*
233     + * Make sure we do not end up inserting an inline extent into a file that has
234     + * already other (non-inline) extents. If a file has an inline extent it can
235     + * not have any other extents and the (single) inline extent must start at the
236     + * file offset 0. Failing to respect these rules will lead to file corruption,
237     + * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
238     + *
239     + * We can have extents that have been already written to disk or we can have
240     + * dirty ranges still in delalloc, in which case the extent maps and items are
241     + * created only when we run delalloc, and the delalloc ranges might fall outside
242     + * the range we are currently locking in the inode's io tree. So we check the
243     + * inode's i_size because of that (i_size updates are done while holding the
244     + * i_mutex, which we are holding here).
245     + * We also check to see if the inode has a size not greater than "datal" but has
246     + * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
247     + * protected against such concurrent fallocate calls by the i_mutex).
248     + *
249     + * If the file has no extents but a size greater than datal, do not allow the
250     + * copy because we would need turn the inline extent into a non-inline one (even
251     + * with NO_HOLES enabled). If we find our destination inode only has one inline
252     + * extent, just overwrite it with the source inline extent if its size is less
253     + * than the source extent's size, or we could copy the source inline extent's
254     + * data into the destination inode's inline extent if the later is greater then
255     + * the former.
256     + */
257     +static int clone_copy_inline_extent(struct inode *src,
258     + struct inode *dst,
259     + struct btrfs_trans_handle *trans,
260     + struct btrfs_path *path,
261     + struct btrfs_key *new_key,
262     + const u64 drop_start,
263     + const u64 datal,
264     + const u64 skip,
265     + const u64 size,
266     + char *inline_data)
267     +{
268     + struct btrfs_root *root = BTRFS_I(dst)->root;
269     + const u64 aligned_end = ALIGN(new_key->offset + datal,
270     + root->sectorsize);
271     + int ret;
272     + struct btrfs_key key;
273     +
274     + if (new_key->offset > 0)
275     + return -EOPNOTSUPP;
276     +
277     + key.objectid = btrfs_ino(dst);
278     + key.type = BTRFS_EXTENT_DATA_KEY;
279     + key.offset = 0;
280     + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
281     + if (ret < 0) {
282     + return ret;
283     + } else if (ret > 0) {
284     + if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
285     + ret = btrfs_next_leaf(root, path);
286     + if (ret < 0)
287     + return ret;
288     + else if (ret > 0)
289     + goto copy_inline_extent;
290     + }
291     + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
292     + if (key.objectid == btrfs_ino(dst) &&
293     + key.type == BTRFS_EXTENT_DATA_KEY) {
294     + ASSERT(key.offset > 0);
295     + return -EOPNOTSUPP;
296     + }
297     + } else if (i_size_read(dst) <= datal) {
298     + struct btrfs_file_extent_item *ei;
299     + u64 ext_len;
300     +
301     + /*
302     + * If the file size is <= datal, make sure there are no other
303     + * extents following (can happen do to an fallocate call with
304     + * the flag FALLOC_FL_KEEP_SIZE).
305     + */
306     + ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
307     + struct btrfs_file_extent_item);
308     + /*
309     + * If it's an inline extent, it can not have other extents
310     + * following it.
311     + */
312     + if (btrfs_file_extent_type(path->nodes[0], ei) ==
313     + BTRFS_FILE_EXTENT_INLINE)
314     + goto copy_inline_extent;
315     +
316     + ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
317     + if (ext_len > aligned_end)
318     + return -EOPNOTSUPP;
319     +
320     + ret = btrfs_next_item(root, path);
321     + if (ret < 0) {
322     + return ret;
323     + } else if (ret == 0) {
324     + btrfs_item_key_to_cpu(path->nodes[0], &key,
325     + path->slots[0]);
326     + if (key.objectid == btrfs_ino(dst) &&
327     + key.type == BTRFS_EXTENT_DATA_KEY)
328     + return -EOPNOTSUPP;
329     + }
330     + }
331     +
332     +copy_inline_extent:
333     + /*
334     + * We have no extent items, or we have an extent at offset 0 which may
335     + * or may not be inlined. All these cases are dealt the same way.
336     + */
337     + if (i_size_read(dst) > datal) {
338     + /*
339     + * If the destination inode has an inline extent...
340     + * This would require copying the data from the source inline
341     + * extent into the beginning of the destination's inline extent.
342     + * But this is really complex, both extents can be compressed
343     + * or just one of them, which would require decompressing and
344     + * re-compressing data (which could increase the new compressed
345     + * size, not allowing the compressed data to fit anymore in an
346     + * inline extent).
347     + * So just don't support this case for now (it should be rare,
348     + * we are not really saving space when cloning inline extents).
349     + */
350     + return -EOPNOTSUPP;
351     + }
352     +
353     + btrfs_release_path(path);
354     + ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
355     + if (ret)
356     + return ret;
357     + ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
358     + if (ret)
359     + return ret;
360     +
361     + if (skip) {
362     + const u32 start = btrfs_file_extent_calc_inline_size(0);
363     +
364     + memmove(inline_data + start, inline_data + start + skip, datal);
365     + }
366     +
367     + write_extent_buffer(path->nodes[0], inline_data,
368     + btrfs_item_ptr_offset(path->nodes[0],
369     + path->slots[0]),
370     + size);
371     + inode_add_bytes(dst, datal);
372     +
373     + return 0;
374     +}
375     +
376     /**
377     * btrfs_clone() - clone a range from inode file to another
378     *
379     @@ -3432,21 +3576,6 @@ process_slot:
380     } else if (type == BTRFS_FILE_EXTENT_INLINE) {
381     u64 skip = 0;
382     u64 trim = 0;
383     - u64 aligned_end = 0;
384     -
385     - /*
386     - * Don't copy an inline extent into an offset
387     - * greater than zero. Having an inline extent
388     - * at such an offset results in chaos as btrfs
389     - * isn't prepared for such cases. Just skip
390     - * this case for the same reasons as commented
391     - * at btrfs_ioctl_clone().
392     - */
393     - if (last_dest_end > 0) {
394     - ret = -EOPNOTSUPP;
395     - btrfs_end_transaction(trans, root);
396     - goto out;
397     - }
398    
399     if (off > key.offset) {
400     skip = off - key.offset;
401     @@ -3464,42 +3593,22 @@ process_slot:
402     size -= skip + trim;
403     datal -= skip + trim;
404    
405     - aligned_end = ALIGN(new_key.offset + datal,
406     - root->sectorsize);
407     - ret = btrfs_drop_extents(trans, root, inode,
408     - drop_start,
409     - aligned_end,
410     - 1);
411     + ret = clone_copy_inline_extent(src, inode,
412     + trans, path,
413     + &new_key,
414     + drop_start,
415     + datal,
416     + skip, size, buf);
417     if (ret) {
418     if (ret != -EOPNOTSUPP)
419     btrfs_abort_transaction(trans,
420     - root, ret);
421     - btrfs_end_transaction(trans, root);
422     - goto out;
423     - }
424     -
425     - ret = btrfs_insert_empty_item(trans, root, path,
426     - &new_key, size);
427     - if (ret) {
428     - btrfs_abort_transaction(trans, root,
429     - ret);
430     + root,
431     + ret);
432     btrfs_end_transaction(trans, root);
433     goto out;
434     }
435     -
436     - if (skip) {
437     - u32 start =
438     - btrfs_file_extent_calc_inline_size(0);
439     - memmove(buf+start, buf+start+skip,
440     - datal);
441     - }
442     -
443     leaf = path->nodes[0];
444     slot = path->slots[0];
445     - write_extent_buffer(leaf, buf,
446     - btrfs_item_ptr_offset(leaf, slot),
447     - size);
448     - inode_add_bytes(inode, datal);
449     }
450    
451     /* If we have an implicit hole (NO_HOLES feature). */
452     diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
453     index 6f518c90e1c1..1fcd7b6e7564 100644
454     --- a/fs/btrfs/xattr.c
455     +++ b/fs/btrfs/xattr.c
456     @@ -313,8 +313,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
457     /* check to make sure this item is what we want */
458     if (found_key.objectid != key.objectid)
459     break;
460     - if (found_key.type != BTRFS_XATTR_ITEM_KEY)
461     + if (found_key.type > BTRFS_XATTR_ITEM_KEY)
462     break;
463     + if (found_key.type < BTRFS_XATTR_ITEM_KEY)
464     + goto next;
465    
466     di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
467     if (verify_dir_item(root, leaf, di))
468     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
469     index 84f37f34f9aa..1e99b29650a9 100644
470     --- a/fs/ceph/mds_client.c
471     +++ b/fs/ceph/mds_client.c
472     @@ -1905,7 +1905,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
473    
474     len = sizeof(*head) +
475     pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
476     - sizeof(struct timespec);
477     + sizeof(struct ceph_timespec);
478    
479     /* calculate (max) length for cap releases */
480     len += sizeof(struct ceph_mds_request_release) *
481     diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
482     index 12756040ca20..8bec8f1e4b31 100644
483     --- a/fs/debugfs/inode.c
484     +++ b/fs/debugfs/inode.c
485     @@ -276,8 +276,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
486     dput(dentry);
487     dentry = ERR_PTR(-EEXIST);
488     }
489     - if (IS_ERR(dentry))
490     +
491     + if (IS_ERR(dentry)) {
492     mutex_unlock(&d_inode(parent)->i_mutex);
493     + simple_release_fs(&debugfs_mount, &debugfs_mount_count);
494     + }
495     +
496     return dentry;
497     }
498    
499     diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
500     index d41843181818..e770c1ee4613 100644
501     --- a/fs/ext4/ext4_jbd2.c
502     +++ b/fs/ext4/ext4_jbd2.c
503     @@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
504     return 0;
505     }
506    
507     + err = handle->h_err;
508     if (!handle->h_transaction) {
509     - err = jbd2_journal_stop(handle);
510     - return handle->h_err ? handle->h_err : err;
511     + rc = jbd2_journal_stop(handle);
512     + return err ? err : rc;
513     }
514    
515     sb = handle->h_transaction->t_journal->j_private;
516     - err = handle->h_err;
517     rc = jbd2_journal_stop(handle);
518    
519     if (!err)
520     diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
521     index 5765f88b3904..8082565c59a9 100644
522     --- a/fs/ext4/page-io.c
523     +++ b/fs/ext4/page-io.c
524     @@ -426,6 +426,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
525     struct buffer_head *bh, *head;
526     int ret = 0;
527     int nr_submitted = 0;
528     + int nr_to_submit = 0;
529    
530     blocksize = 1 << inode->i_blkbits;
531    
532     @@ -478,11 +479,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
533     unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
534     }
535     set_buffer_async_write(bh);
536     + nr_to_submit++;
537     } while ((bh = bh->b_this_page) != head);
538    
539     bh = head = page_buffers(page);
540    
541     - if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
542     + if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
543     + nr_to_submit) {
544     data_page = ext4_encrypt(inode, page);
545     if (IS_ERR(data_page)) {
546     ret = PTR_ERR(data_page);
547     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
548     index ff89971e3ee0..8a3b9f14d198 100644
549     --- a/fs/ext4/super.c
550     +++ b/fs/ext4/super.c
551     @@ -396,9 +396,13 @@ static void ext4_handle_error(struct super_block *sb)
552     smp_wmb();
553     sb->s_flags |= MS_RDONLY;
554     }
555     - if (test_opt(sb, ERRORS_PANIC))
556     + if (test_opt(sb, ERRORS_PANIC)) {
557     + if (EXT4_SB(sb)->s_journal &&
558     + !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
559     + return;
560     panic("EXT4-fs (device %s): panic forced after error\n",
561     sb->s_id);
562     + }
563     }
564    
565     #define ext4_error_ratelimit(sb) \
566     @@ -587,8 +591,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
567     jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
568     save_error_info(sb, function, line);
569     }
570     - if (test_opt(sb, ERRORS_PANIC))
571     + if (test_opt(sb, ERRORS_PANIC)) {
572     + if (EXT4_SB(sb)->s_journal &&
573     + !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
574     + return;
575     panic("EXT4-fs panic from previous error\n");
576     + }
577     }
578    
579     void __ext4_msg(struct super_block *sb,
580     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
581     index 7003c0925760..0469f32918a5 100644
582     --- a/fs/jbd2/journal.c
583     +++ b/fs/jbd2/journal.c
584     @@ -2086,8 +2086,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
585    
586     __jbd2_journal_abort_hard(journal);
587    
588     - if (errno)
589     + if (errno) {
590     jbd2_journal_update_sb_errno(journal);
591     + write_lock(&journal->j_state_lock);
592     + journal->j_flags |= JBD2_REC_ERR;
593     + write_unlock(&journal->j_state_lock);
594     + }
595     }
596    
597     /**
598     diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
599     index 976ba792fbc6..7f22b6c6fb50 100644
600     --- a/fs/nfs/inode.c
601     +++ b/fs/nfs/inode.c
602     @@ -1813,7 +1813,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
603     if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
604     nfsi->attr_gencount = fattr->gencount;
605     }
606     - invalid &= ~NFS_INO_INVALID_ATTR;
607     +
608     + /* Don't declare attrcache up to date if there were no attrs! */
609     + if (fattr->valid != 0)
610     + invalid &= ~NFS_INO_INVALID_ATTR;
611     +
612     /* Don't invalidate the data if we were to blame */
613     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
614     || S_ISLNK(inode->i_mode)))
615     diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
616     index e42be52a8c18..5dea913baf46 100644
617     --- a/fs/nfs/nfs4client.c
618     +++ b/fs/nfs/nfs4client.c
619     @@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
620     return ret;
621     idr_preload(GFP_KERNEL);
622     spin_lock(&nn->nfs_client_lock);
623     - ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
624     + ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
625     if (ret >= 0)
626     clp->cl_cb_ident = ret;
627     spin_unlock(&nn->nfs_client_lock);
628     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
629     index 397798368b1a..bb6c324f1f3d 100644
630     --- a/fs/nfsd/nfs4state.c
631     +++ b/fs/nfsd/nfs4state.c
632     @@ -765,16 +765,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
633     s->sc_type = 0;
634     }
635    
636     -static void
637     +/**
638     + * nfs4_get_existing_delegation - Discover if this delegation already exists
639     + * @clp: a pointer to the nfs4_client we're granting a delegation to
640     + * @fp: a pointer to the nfs4_file we're granting a delegation on
641     + *
642     + * Return:
643     + * On success: NULL if an existing delegation was not found.
644     + *
645     + * On error: -EAGAIN if one was previously granted to this nfs4_client
646     + * for this nfs4_file.
647     + *
648     + */
649     +
650     +static int
651     +nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
652     +{
653     + struct nfs4_delegation *searchdp = NULL;
654     + struct nfs4_client *searchclp = NULL;
655     +
656     + lockdep_assert_held(&state_lock);
657     + lockdep_assert_held(&fp->fi_lock);
658     +
659     + list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
660     + searchclp = searchdp->dl_stid.sc_client;
661     + if (clp == searchclp) {
662     + return -EAGAIN;
663     + }
664     + }
665     + return 0;
666     +}
667     +
668     +/**
669     + * hash_delegation_locked - Add a delegation to the appropriate lists
670     + * @dp: a pointer to the nfs4_delegation we are adding.
671     + * @fp: a pointer to the nfs4_file we're granting a delegation on
672     + *
673     + * Return:
674     + * On success: NULL if the delegation was successfully hashed.
675     + *
676     + * On error: -EAGAIN if one was previously granted to this
677     + * nfs4_client for this nfs4_file. Delegation is not hashed.
678     + *
679     + */
680     +
681     +static int
682     hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
683     {
684     + int status;
685     + struct nfs4_client *clp = dp->dl_stid.sc_client;
686     +
687     lockdep_assert_held(&state_lock);
688     lockdep_assert_held(&fp->fi_lock);
689    
690     + status = nfs4_get_existing_delegation(clp, fp);
691     + if (status)
692     + return status;
693     + ++fp->fi_delegees;
694     atomic_inc(&dp->dl_stid.sc_count);
695     dp->dl_stid.sc_type = NFS4_DELEG_STID;
696     list_add(&dp->dl_perfile, &fp->fi_delegations);
697     - list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
698     + list_add(&dp->dl_perclnt, &clp->cl_delegations);
699     + return 0;
700     }
701    
702     static bool
703     @@ -3351,6 +3403,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
704     stp->st_access_bmap = 0;
705     stp->st_deny_bmap = 0;
706     stp->st_openstp = NULL;
707     + init_rwsem(&stp->st_rwsem);
708     spin_lock(&oo->oo_owner.so_client->cl_lock);
709     list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
710     spin_lock(&fp->fi_lock);
711     @@ -3940,6 +3993,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
712     return fl;
713     }
714    
715     +/**
716     + * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
717     + * @dp: a pointer to the nfs4_delegation we're adding.
718     + *
719     + * Return:
720     + * On success: Return code will be 0 on success.
721     + *
722     + * On error: -EAGAIN if there was an existing delegation.
723     + * nonzero if there is an error in other cases.
724     + *
725     + */
726     +
727     static int nfs4_setlease(struct nfs4_delegation *dp)
728     {
729     struct nfs4_file *fp = dp->dl_stid.sc_file;
730     @@ -3971,16 +4036,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
731     goto out_unlock;
732     /* Race breaker */
733     if (fp->fi_deleg_file) {
734     - status = 0;
735     - ++fp->fi_delegees;
736     - hash_delegation_locked(dp, fp);
737     + status = hash_delegation_locked(dp, fp);
738     goto out_unlock;
739     }
740     fp->fi_deleg_file = filp;
741     - fp->fi_delegees = 1;
742     - hash_delegation_locked(dp, fp);
743     + fp->fi_delegees = 0;
744     + status = hash_delegation_locked(dp, fp);
745     spin_unlock(&fp->fi_lock);
746     spin_unlock(&state_lock);
747     + if (status) {
748     + /* Should never happen, this is a new fi_deleg_file */
749     + WARN_ON_ONCE(1);
750     + goto out_fput;
751     + }
752     return 0;
753     out_unlock:
754     spin_unlock(&fp->fi_lock);
755     @@ -4000,6 +4068,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
756     if (fp->fi_had_conflict)
757     return ERR_PTR(-EAGAIN);
758    
759     + spin_lock(&state_lock);
760     + spin_lock(&fp->fi_lock);
761     + status = nfs4_get_existing_delegation(clp, fp);
762     + spin_unlock(&fp->fi_lock);
763     + spin_unlock(&state_lock);
764     +
765     + if (status)
766     + return ERR_PTR(status);
767     +
768     dp = alloc_init_deleg(clp, fh, odstate);
769     if (!dp)
770     return ERR_PTR(-ENOMEM);
771     @@ -4018,9 +4095,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
772     status = -EAGAIN;
773     goto out_unlock;
774     }
775     - ++fp->fi_delegees;
776     - hash_delegation_locked(dp, fp);
777     - status = 0;
778     + status = hash_delegation_locked(dp, fp);
779     out_unlock:
780     spin_unlock(&fp->fi_lock);
781     spin_unlock(&state_lock);
782     @@ -4181,15 +4256,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
783     */
784     if (stp) {
785     /* Stateid was found, this is an OPEN upgrade */
786     + down_read(&stp->st_rwsem);
787     status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
788     - if (status)
789     + if (status) {
790     + up_read(&stp->st_rwsem);
791     goto out;
792     + }
793     } else {
794     stp = open->op_stp;
795     open->op_stp = NULL;
796     init_open_stateid(stp, fp, open);
797     + down_read(&stp->st_rwsem);
798     status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
799     if (status) {
800     + up_read(&stp->st_rwsem);
801     release_open_stateid(stp);
802     goto out;
803     }
804     @@ -4201,6 +4281,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
805     }
806     update_stateid(&stp->st_stid.sc_stateid);
807     memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
808     + up_read(&stp->st_rwsem);
809    
810     if (nfsd4_has_session(&resp->cstate)) {
811     if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
812     @@ -4777,10 +4858,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
813     * revoked delegations are kept only for free_stateid.
814     */
815     return nfserr_bad_stateid;
816     + down_write(&stp->st_rwsem);
817     status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
818     - if (status)
819     - return status;
820     - return nfs4_check_fh(current_fh, &stp->st_stid);
821     + if (status == nfs_ok)
822     + status = nfs4_check_fh(current_fh, &stp->st_stid);
823     + if (status != nfs_ok)
824     + up_write(&stp->st_rwsem);
825     + return status;
826     }
827    
828     /*
829     @@ -4827,6 +4911,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
830     return status;
831     oo = openowner(stp->st_stateowner);
832     if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
833     + up_write(&stp->st_rwsem);
834     nfs4_put_stid(&stp->st_stid);
835     return nfserr_bad_stateid;
836     }
837     @@ -4857,11 +4942,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
838     goto out;
839     oo = openowner(stp->st_stateowner);
840     status = nfserr_bad_stateid;
841     - if (oo->oo_flags & NFS4_OO_CONFIRMED)
842     + if (oo->oo_flags & NFS4_OO_CONFIRMED) {
843     + up_write(&stp->st_rwsem);
844     goto put_stateid;
845     + }
846     oo->oo_flags |= NFS4_OO_CONFIRMED;
847     update_stateid(&stp->st_stid.sc_stateid);
848     memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
849     + up_write(&stp->st_rwsem);
850     dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
851     __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
852    
853     @@ -4940,6 +5028,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
854     memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
855     status = nfs_ok;
856     put_stateid:
857     + up_write(&stp->st_rwsem);
858     nfs4_put_stid(&stp->st_stid);
859     out:
860     nfsd4_bump_seqid(cstate, status);
861     @@ -4993,6 +5082,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
862     goto out;
863     update_stateid(&stp->st_stid.sc_stateid);
864     memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
865     + up_write(&stp->st_rwsem);
866    
867     nfsd4_close_open_stateid(stp);
868    
869     @@ -5223,6 +5313,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
870     stp->st_access_bmap = 0;
871     stp->st_deny_bmap = open_stp->st_deny_bmap;
872     stp->st_openstp = open_stp;
873     + init_rwsem(&stp->st_rwsem);
874     list_add(&stp->st_locks, &open_stp->st_locks);
875     list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
876     spin_lock(&fp->fi_lock);
877     @@ -5391,6 +5482,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
878     &open_stp, nn);
879     if (status)
880     goto out;
881     + up_write(&open_stp->st_rwsem);
882     open_sop = openowner(open_stp->st_stateowner);
883     status = nfserr_bad_stateid;
884     if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
885     @@ -5398,6 +5490,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
886     goto out;
887     status = lookup_or_create_lock_state(cstate, open_stp, lock,
888     &lock_stp, &new);
889     + if (status == nfs_ok)
890     + down_write(&lock_stp->st_rwsem);
891     } else {
892     status = nfs4_preprocess_seqid_op(cstate,
893     lock->lk_old_lock_seqid,
894     @@ -5503,6 +5597,8 @@ out:
895     seqid_mutating_err(ntohl(status)))
896     lock_sop->lo_owner.so_seqid++;
897    
898     + up_write(&lock_stp->st_rwsem);
899     +
900     /*
901     * If this is a new, never-before-used stateid, and we are
902     * returning an error, then just go ahead and release it.
903     @@ -5673,6 +5769,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
904     fput:
905     fput(filp);
906     put_stateid:
907     + up_write(&stp->st_rwsem);
908     nfs4_put_stid(&stp->st_stid);
909     out:
910     nfsd4_bump_seqid(cstate, status);
911     diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
912     index dbc4f85a5008..67685b6cfef3 100644
913     --- a/fs/nfsd/state.h
914     +++ b/fs/nfsd/state.h
915     @@ -533,15 +533,16 @@ struct nfs4_file {
916     * Better suggestions welcome.
917     */
918     struct nfs4_ol_stateid {
919     - struct nfs4_stid st_stid; /* must be first field */
920     - struct list_head st_perfile;
921     - struct list_head st_perstateowner;
922     - struct list_head st_locks;
923     - struct nfs4_stateowner * st_stateowner;
924     - struct nfs4_clnt_odstate * st_clnt_odstate;
925     - unsigned char st_access_bmap;
926     - unsigned char st_deny_bmap;
927     - struct nfs4_ol_stateid * st_openstp;
928     + struct nfs4_stid st_stid;
929     + struct list_head st_perfile;
930     + struct list_head st_perstateowner;
931     + struct list_head st_locks;
932     + struct nfs4_stateowner *st_stateowner;
933     + struct nfs4_clnt_odstate *st_clnt_odstate;
934     + unsigned char st_access_bmap;
935     + unsigned char st_deny_bmap;
936     + struct nfs4_ol_stateid *st_openstp;
937     + struct rw_semaphore st_rwsem;
938     };
939    
940     static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
941     diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
942     index 176fe6afd94e..4d5e0a573f4f 100644
943     --- a/fs/ocfs2/namei.c
944     +++ b/fs/ocfs2/namei.c
945     @@ -365,6 +365,8 @@ static int ocfs2_mknod(struct inode *dir,
946     mlog_errno(status);
947     goto leave;
948     }
949     + /* update inode->i_mode after mask with "umask". */
950     + inode->i_mode = mode;
951    
952     handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
953     S_ISDIR(mode),
954     diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
955     index 82806c60aa42..e4b464983322 100644
956     --- a/include/linux/ipv6.h
957     +++ b/include/linux/ipv6.h
958     @@ -224,7 +224,7 @@ struct ipv6_pinfo {
959     struct ipv6_ac_socklist *ipv6_ac_list;
960     struct ipv6_fl_socklist __rcu *ipv6_fl_list;
961    
962     - struct ipv6_txoptions *opt;
963     + struct ipv6_txoptions __rcu *opt;
964     struct sk_buff *pktoptions;
965     struct sk_buff *rxpmtu;
966     struct inet6_cork cork;
967     diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
968     index eb1cebed3f36..c90c9b70e568 100644
969     --- a/include/linux/jbd2.h
970     +++ b/include/linux/jbd2.h
971     @@ -1007,6 +1007,7 @@ struct journal_s
972     #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
973     * data write error in ordered
974     * mode */
975     +#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
976    
977     /*
978     * Function declarations for the journaling transaction and buffer
979     diff --git a/include/net/af_unix.h b/include/net/af_unix.h
980     index dfe4ddfbb43c..e830c3dff61a 100644
981     --- a/include/net/af_unix.h
982     +++ b/include/net/af_unix.h
983     @@ -63,6 +63,7 @@ struct unix_sock {
984     #define UNIX_GC_CANDIDATE 0
985     #define UNIX_GC_MAYBE_CYCLE 1
986     struct socket_wq peer_wq;
987     + wait_queue_t peer_wake;
988     };
989    
990     static inline struct unix_sock *unix_sk(struct sock *sk)
991     diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
992     index b8529aa1dae7..b0f7445c0fdc 100644
993     --- a/include/net/ip6_tunnel.h
994     +++ b/include/net/ip6_tunnel.h
995     @@ -83,11 +83,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
996     err = ip6_local_out_sk(sk, skb);
997    
998     if (net_xmit_eval(err) == 0) {
999     - struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
1000     + struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
1001     u64_stats_update_begin(&tstats->syncp);
1002     tstats->tx_bytes += pkt_len;
1003     tstats->tx_packets++;
1004     u64_stats_update_end(&tstats->syncp);
1005     + put_cpu_ptr(tstats);
1006     } else {
1007     stats->tx_errors++;
1008     stats->tx_aborted_errors++;
1009     diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
1010     index d8214cb88bbc..9c2897e56ee1 100644
1011     --- a/include/net/ip_tunnels.h
1012     +++ b/include/net/ip_tunnels.h
1013     @@ -207,12 +207,13 @@ static inline void iptunnel_xmit_stats(int err,
1014     struct pcpu_sw_netstats __percpu *stats)
1015     {
1016     if (err > 0) {
1017     - struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
1018     + struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
1019    
1020     u64_stats_update_begin(&tstats->syncp);
1021     tstats->tx_bytes += err;
1022     tstats->tx_packets++;
1023     u64_stats_update_end(&tstats->syncp);
1024     + put_cpu_ptr(tstats);
1025     } else if (err < 0) {
1026     err_stats->tx_errors++;
1027     err_stats->tx_aborted_errors++;
1028     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1029     index eec8ad3c9843..df555ecd4002 100644
1030     --- a/include/net/ipv6.h
1031     +++ b/include/net/ipv6.h
1032     @@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
1033     */
1034    
1035     struct ipv6_txoptions {
1036     + atomic_t refcnt;
1037     /* Length of this structure */
1038     int tot_len;
1039    
1040     @@ -217,7 +218,7 @@ struct ipv6_txoptions {
1041     struct ipv6_opt_hdr *dst0opt;
1042     struct ipv6_rt_hdr *srcrt; /* Routing Header */
1043     struct ipv6_opt_hdr *dst1opt;
1044     -
1045     + struct rcu_head rcu;
1046     /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
1047     };
1048    
1049     @@ -250,6 +251,24 @@ struct ipv6_fl_socklist {
1050     struct rcu_head rcu;
1051     };
1052    
1053     +static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
1054     +{
1055     + struct ipv6_txoptions *opt;
1056     +
1057     + rcu_read_lock();
1058     + opt = rcu_dereference(np->opt);
1059     + if (opt && !atomic_inc_not_zero(&opt->refcnt))
1060     + opt = NULL;
1061     + rcu_read_unlock();
1062     + return opt;
1063     +}
1064     +
1065     +static inline void txopt_put(struct ipv6_txoptions *opt)
1066     +{
1067     + if (opt && atomic_dec_and_test(&opt->refcnt))
1068     + kfree_rcu(opt, rcu);
1069     +}
1070     +
1071     struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
1072     struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
1073     struct ip6_flowlabel *fl,
1074     @@ -488,6 +507,7 @@ struct ip6_create_arg {
1075     u32 user;
1076     const struct in6_addr *src;
1077     const struct in6_addr *dst;
1078     + int iif;
1079     u8 ecn;
1080     };
1081    
1082     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1083     index 6d778efcfdfd..080b657ef8fb 100644
1084     --- a/include/net/sch_generic.h
1085     +++ b/include/net/sch_generic.h
1086     @@ -61,6 +61,9 @@ struct Qdisc {
1087     */
1088     #define TCQ_F_WARN_NONWC (1 << 16)
1089     #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
1090     +#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
1091     + * qdisc_tree_decrease_qlen() should stop.
1092     + */
1093     u32 limit;
1094     const struct Qdisc_ops *ops;
1095     struct qdisc_size_table __rcu *stab;
1096     diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
1097     index 8a6616583f38..1c1b8ab34037 100644
1098     --- a/kernel/bpf/arraymap.c
1099     +++ b/kernel/bpf/arraymap.c
1100     @@ -109,7 +109,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
1101     /* all elements already exist */
1102     return -EEXIST;
1103    
1104     - memcpy(array->value + array->elem_size * index, value, array->elem_size);
1105     + memcpy(array->value + array->elem_size * index, value, map->value_size);
1106     return 0;
1107     }
1108    
1109     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1110     index 2237c1b3cdd2..d6e8cfcb6f7c 100644
1111     --- a/net/core/neighbour.c
1112     +++ b/net/core/neighbour.c
1113     @@ -2207,7 +2207,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
1114     ndm->ndm_pad2 = 0;
1115     ndm->ndm_flags = pn->flags | NTF_PROXY;
1116     ndm->ndm_type = RTN_UNICAST;
1117     - ndm->ndm_ifindex = pn->dev->ifindex;
1118     + ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
1119     ndm->ndm_state = NUD_NONE;
1120    
1121     if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
1122     @@ -2282,7 +2282,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1123     if (h > s_h)
1124     s_idx = 0;
1125     for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
1126     - if (dev_net(n->dev) != net)
1127     + if (pneigh_net(n) != net)
1128     continue;
1129     if (idx < s_idx)
1130     goto next;
1131     diff --git a/net/core/scm.c b/net/core/scm.c
1132     index 3b6899b7d810..8a1741b14302 100644
1133     --- a/net/core/scm.c
1134     +++ b/net/core/scm.c
1135     @@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
1136     err = put_user(cmlen, &cm->cmsg_len);
1137     if (!err) {
1138     cmlen = CMSG_SPACE(i*sizeof(int));
1139     + if (msg->msg_controllen < cmlen)
1140     + cmlen = msg->msg_controllen;
1141     msg->msg_control += cmlen;
1142     msg->msg_controllen -= cmlen;
1143     }
1144     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1145     index 5165571f397a..a0490508d213 100644
1146     --- a/net/dccp/ipv6.c
1147     +++ b/net/dccp/ipv6.c
1148     @@ -202,7 +202,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
1149     security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1150    
1151    
1152     - final_p = fl6_update_dst(&fl6, np->opt, &final);
1153     + rcu_read_lock();
1154     + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
1155     + rcu_read_unlock();
1156    
1157     dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1158     if (IS_ERR(dst)) {
1159     @@ -219,7 +221,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
1160     &ireq->ir_v6_loc_addr,
1161     &ireq->ir_v6_rmt_addr);
1162     fl6.daddr = ireq->ir_v6_rmt_addr;
1163     - err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
1164     + rcu_read_lock();
1165     + err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
1166     + np->tclass);
1167     + rcu_read_unlock();
1168     err = net_xmit_eval(err);
1169     }
1170    
1171     @@ -415,6 +420,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
1172     {
1173     struct inet_request_sock *ireq = inet_rsk(req);
1174     struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1175     + struct ipv6_txoptions *opt;
1176     struct inet_sock *newinet;
1177     struct dccp6_sock *newdp6;
1178     struct sock *newsk;
1179     @@ -534,13 +540,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
1180     * Yes, keeping reference count would be much more clever, but we make
1181     * one more one thing there: reattach optmem to newsk.
1182     */
1183     - if (np->opt != NULL)
1184     - newnp->opt = ipv6_dup_options(newsk, np->opt);
1185     -
1186     + opt = rcu_dereference(np->opt);
1187     + if (opt) {
1188     + opt = ipv6_dup_options(newsk, opt);
1189     + RCU_INIT_POINTER(newnp->opt, opt);
1190     + }
1191     inet_csk(newsk)->icsk_ext_hdr_len = 0;
1192     - if (newnp->opt != NULL)
1193     - inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1194     - newnp->opt->opt_flen);
1195     + if (opt)
1196     + inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1197     + opt->opt_flen;
1198    
1199     dccp_sync_mss(newsk, dst_mtu(dst));
1200    
1201     @@ -793,6 +801,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1202     struct ipv6_pinfo *np = inet6_sk(sk);
1203     struct dccp_sock *dp = dccp_sk(sk);
1204     struct in6_addr *saddr = NULL, *final_p, final;
1205     + struct ipv6_txoptions *opt;
1206     struct flowi6 fl6;
1207     struct dst_entry *dst;
1208     int addr_type;
1209     @@ -892,7 +901,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1210     fl6.fl6_sport = inet->inet_sport;
1211     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1212    
1213     - final_p = fl6_update_dst(&fl6, np->opt, &final);
1214     + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1215     + final_p = fl6_update_dst(&fl6, opt, &final);
1216    
1217     dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1218     if (IS_ERR(dst)) {
1219     @@ -912,9 +922,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1220     __ip6_dst_store(sk, dst, NULL, NULL);
1221    
1222     icsk->icsk_ext_hdr_len = 0;
1223     - if (np->opt != NULL)
1224     - icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1225     - np->opt->opt_nflen);
1226     + if (opt)
1227     + icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
1228    
1229     inet->inet_dport = usin->sin6_port;
1230    
1231     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1232     index df28693f32e1..c3bfebd501ed 100644
1233     --- a/net/ipv4/ipmr.c
1234     +++ b/net/ipv4/ipmr.c
1235     @@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1236     struct mfc_cache *c, struct rtmsg *rtm);
1237     static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
1238     int cmd);
1239     -static void mroute_clean_tables(struct mr_table *mrt);
1240     +static void mroute_clean_tables(struct mr_table *mrt, bool all);
1241     static void ipmr_expire_process(unsigned long arg);
1242    
1243     #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1244     @@ -351,7 +351,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1245     static void ipmr_free_table(struct mr_table *mrt)
1246     {
1247     del_timer_sync(&mrt->ipmr_expire_timer);
1248     - mroute_clean_tables(mrt);
1249     + mroute_clean_tables(mrt, true);
1250     kfree(mrt);
1251     }
1252    
1253     @@ -1209,7 +1209,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1254     * Close the multicast socket, and clear the vif tables etc
1255     */
1256    
1257     -static void mroute_clean_tables(struct mr_table *mrt)
1258     +static void mroute_clean_tables(struct mr_table *mrt, bool all)
1259     {
1260     int i;
1261     LIST_HEAD(list);
1262     @@ -1218,8 +1218,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
1263     /* Shut down all active vif entries */
1264    
1265     for (i = 0; i < mrt->maxvif; i++) {
1266     - if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1267     - vif_delete(mrt, i, 0, &list);
1268     + if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1269     + continue;
1270     + vif_delete(mrt, i, 0, &list);
1271     }
1272     unregister_netdevice_many(&list);
1273    
1274     @@ -1227,7 +1228,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1275    
1276     for (i = 0; i < MFC_LINES; i++) {
1277     list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1278     - if (c->mfc_flags & MFC_STATIC)
1279     + if (!all && (c->mfc_flags & MFC_STATIC))
1280     continue;
1281     list_del_rcu(&c->list);
1282     mroute_netlink_event(mrt, c, RTM_DELROUTE);
1283     @@ -1262,7 +1263,7 @@ static void mrtsock_destruct(struct sock *sk)
1284     NETCONFA_IFINDEX_ALL,
1285     net->ipv4.devconf_all);
1286     RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1287     - mroute_clean_tables(mrt);
1288     + mroute_clean_tables(mrt, false);
1289     }
1290     }
1291     rtnl_unlock();
1292     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1293     index c9ab964189a0..87463c814896 100644
1294     --- a/net/ipv4/tcp_input.c
1295     +++ b/net/ipv4/tcp_input.c
1296     @@ -4438,19 +4438,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
1297     int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
1298     {
1299     struct sk_buff *skb;
1300     + int err = -ENOMEM;
1301     + int data_len = 0;
1302     bool fragstolen;
1303    
1304     if (size == 0)
1305     return 0;
1306    
1307     - skb = alloc_skb(size, sk->sk_allocation);
1308     + if (size > PAGE_SIZE) {
1309     + int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
1310     +
1311     + data_len = npages << PAGE_SHIFT;
1312     + size = data_len + (size & ~PAGE_MASK);
1313     + }
1314     + skb = alloc_skb_with_frags(size - data_len, data_len,
1315     + PAGE_ALLOC_COSTLY_ORDER,
1316     + &err, sk->sk_allocation);
1317     if (!skb)
1318     goto err;
1319    
1320     + skb_put(skb, size - data_len);
1321     + skb->data_len = data_len;
1322     + skb->len = size;
1323     +
1324     if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
1325     goto err_free;
1326    
1327     - if (memcpy_from_msg(skb_put(skb, size), msg, size))
1328     + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1329     + if (err)
1330     goto err_free;
1331    
1332     TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
1333     @@ -4466,7 +4481,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
1334     err_free:
1335     kfree_skb(skb);
1336     err:
1337     - return -ENOMEM;
1338     + return err;
1339     +
1340     }
1341    
1342     static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
1343     @@ -5622,6 +5638,7 @@ discard:
1344     }
1345    
1346     tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1347     + tp->copied_seq = tp->rcv_nxt;
1348     tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
1349    
1350     /* RFC1323: The window in SYN & SYN/ACK segments is
1351     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1352     index 441ca6f38981..88203e755af8 100644
1353     --- a/net/ipv4/tcp_ipv4.c
1354     +++ b/net/ipv4/tcp_ipv4.c
1355     @@ -922,7 +922,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1356     }
1357    
1358     md5sig = rcu_dereference_protected(tp->md5sig_info,
1359     - sock_owned_by_user(sk));
1360     + sock_owned_by_user(sk) ||
1361     + lockdep_is_held(&sk->sk_lock.slock));
1362     if (!md5sig) {
1363     md5sig = kmalloc(sizeof(*md5sig), gfp);
1364     if (!md5sig)
1365     diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1366     index 8c65dc147d8b..c8f97858d6f6 100644
1367     --- a/net/ipv4/tcp_timer.c
1368     +++ b/net/ipv4/tcp_timer.c
1369     @@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
1370     syn_set = true;
1371     } else {
1372     if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
1373     + /* Some middle-boxes may black-hole Fast Open _after_
1374     + * the handshake. Therefore we conservatively disable
1375     + * Fast Open on this path on recurring timeouts with
1376     + * few or zero bytes acked after Fast Open.
1377     + */
1378     + if (tp->syn_data_acked &&
1379     + tp->bytes_acked <= tp->rx_opt.mss_clamp) {
1380     + tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
1381     + if (icsk->icsk_retransmits == sysctl_tcp_retries1)
1382     + NET_INC_STATS_BH(sock_net(sk),
1383     + LINUX_MIB_TCPFASTOPENACTIVEFAIL);
1384     + }
1385     /* Black hole detection */
1386     tcp_mtu_probing(icsk, sk);
1387    
1388     diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
1389     index eef63b394c5a..2d044d2a2ccf 100644
1390     --- a/net/ipv6/af_inet6.c
1391     +++ b/net/ipv6/af_inet6.c
1392     @@ -425,9 +425,11 @@ void inet6_destroy_sock(struct sock *sk)
1393    
1394     /* Free tx options */
1395    
1396     - opt = xchg(&np->opt, NULL);
1397     - if (opt)
1398     - sock_kfree_s(sk, opt, opt->tot_len);
1399     + opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
1400     + if (opt) {
1401     + atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1402     + txopt_put(opt);
1403     + }
1404     }
1405     EXPORT_SYMBOL_GPL(inet6_destroy_sock);
1406    
1407     @@ -656,7 +658,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
1408     fl6.fl6_sport = inet->inet_sport;
1409     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1410    
1411     - final_p = fl6_update_dst(&fl6, np->opt, &final);
1412     + rcu_read_lock();
1413     + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
1414     + &final);
1415     + rcu_read_unlock();
1416    
1417     dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1418     if (IS_ERR(dst)) {
1419     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1420     index b10a88986a98..13ca4cf5616f 100644
1421     --- a/net/ipv6/datagram.c
1422     +++ b/net/ipv6/datagram.c
1423     @@ -167,8 +167,10 @@ ipv4_connected:
1424    
1425     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1426    
1427     - opt = flowlabel ? flowlabel->opt : np->opt;
1428     + rcu_read_lock();
1429     + opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
1430     final_p = fl6_update_dst(&fl6, opt, &final);
1431     + rcu_read_unlock();
1432    
1433     dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1434     err = 0;
1435     diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
1436     index a7bbbe45570b..adbd6958c398 100644
1437     --- a/net/ipv6/exthdrs.c
1438     +++ b/net/ipv6/exthdrs.c
1439     @@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1440     *((char **)&opt2->dst1opt) += dif;
1441     if (opt2->srcrt)
1442     *((char **)&opt2->srcrt) += dif;
1443     + atomic_set(&opt2->refcnt, 1);
1444     }
1445     return opt2;
1446     }
1447     @@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1448     return ERR_PTR(-ENOBUFS);
1449    
1450     memset(opt2, 0, tot_len);
1451     -
1452     + atomic_set(&opt2->refcnt, 1);
1453     opt2->tot_len = tot_len;
1454     p = (char *)(opt2 + 1);
1455    
1456     diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
1457     index 6927f3fb5597..9beed302eb36 100644
1458     --- a/net/ipv6/inet6_connection_sock.c
1459     +++ b/net/ipv6/inet6_connection_sock.c
1460     @@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
1461     memset(fl6, 0, sizeof(*fl6));
1462     fl6->flowi6_proto = IPPROTO_TCP;
1463     fl6->daddr = ireq->ir_v6_rmt_addr;
1464     - final_p = fl6_update_dst(fl6, np->opt, &final);
1465     + rcu_read_lock();
1466     + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
1467     + rcu_read_unlock();
1468     fl6->saddr = ireq->ir_v6_loc_addr;
1469     fl6->flowi6_oif = ireq->ir_iif;
1470     fl6->flowi6_mark = ireq->ir_mark;
1471     @@ -207,7 +209,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
1472     fl6->fl6_dport = inet->inet_dport;
1473     security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
1474    
1475     - final_p = fl6_update_dst(fl6, np->opt, &final);
1476     + rcu_read_lock();
1477     + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
1478     + rcu_read_unlock();
1479    
1480     dst = __inet6_csk_dst_check(sk, np->dst_cookie);
1481     if (!dst) {
1482     @@ -240,7 +244,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
1483     /* Restore final destination back after routing done */
1484     fl6.daddr = sk->sk_v6_daddr;
1485    
1486     - res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
1487     + res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
1488     + np->tclass);
1489     rcu_read_unlock();
1490     return res;
1491     }
1492     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1493     index 5f36266b1f5e..a7aef4b52d65 100644
1494     --- a/net/ipv6/ip6mr.c
1495     +++ b/net/ipv6/ip6mr.c
1496     @@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
1497     int cmd);
1498     static int ip6mr_rtm_dumproute(struct sk_buff *skb,
1499     struct netlink_callback *cb);
1500     -static void mroute_clean_tables(struct mr6_table *mrt);
1501     +static void mroute_clean_tables(struct mr6_table *mrt, bool all);
1502     static void ipmr_expire_process(unsigned long arg);
1503    
1504     #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1505     @@ -335,7 +335,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
1506     static void ip6mr_free_table(struct mr6_table *mrt)
1507     {
1508     del_timer_sync(&mrt->ipmr_expire_timer);
1509     - mroute_clean_tables(mrt);
1510     + mroute_clean_tables(mrt, true);
1511     kfree(mrt);
1512     }
1513    
1514     @@ -1543,7 +1543,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1515     * Close the multicast socket, and clear the vif tables etc
1516     */
1517    
1518     -static void mroute_clean_tables(struct mr6_table *mrt)
1519     +static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1520     {
1521     int i;
1522     LIST_HEAD(list);
1523     @@ -1553,8 +1553,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1524     * Shut down all active vif entries
1525     */
1526     for (i = 0; i < mrt->maxvif; i++) {
1527     - if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1528     - mif6_delete(mrt, i, &list);
1529     + if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1530     + continue;
1531     + mif6_delete(mrt, i, &list);
1532     }
1533     unregister_netdevice_many(&list);
1534    
1535     @@ -1563,7 +1564,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1536     */
1537     for (i = 0; i < MFC6_LINES; i++) {
1538     list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1539     - if (c->mfc_flags & MFC_STATIC)
1540     + if (!all && (c->mfc_flags & MFC_STATIC))
1541     continue;
1542     write_lock_bh(&mrt_lock);
1543     list_del(&c->list);
1544     @@ -1626,7 +1627,7 @@ int ip6mr_sk_done(struct sock *sk)
1545     net->ipv6.devconf_all);
1546     write_unlock_bh(&mrt_lock);
1547    
1548     - mroute_clean_tables(mrt);
1549     + mroute_clean_tables(mrt, false);
1550     err = 0;
1551     break;
1552     }
1553     diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
1554     index 63e6956917c9..4449ad1f8114 100644
1555     --- a/net/ipv6/ipv6_sockglue.c
1556     +++ b/net/ipv6/ipv6_sockglue.c
1557     @@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
1558     icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
1559     }
1560     }
1561     - opt = xchg(&inet6_sk(sk)->opt, opt);
1562     + opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
1563     + opt);
1564     sk_dst_reset(sk);
1565    
1566     return opt;
1567     @@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1568     sk->sk_socket->ops = &inet_dgram_ops;
1569     sk->sk_family = PF_INET;
1570     }
1571     - opt = xchg(&np->opt, NULL);
1572     - if (opt)
1573     - sock_kfree_s(sk, opt, opt->tot_len);
1574     + opt = xchg((__force struct ipv6_txoptions **)&np->opt,
1575     + NULL);
1576     + if (opt) {
1577     + atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1578     + txopt_put(opt);
1579     + }
1580     pktopt = xchg(&np->pktoptions, NULL);
1581     kfree_skb(pktopt);
1582    
1583     @@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1584     if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
1585     break;
1586    
1587     - opt = ipv6_renew_options(sk, np->opt, optname,
1588     + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1589     + opt = ipv6_renew_options(sk, opt, optname,
1590     (struct ipv6_opt_hdr __user *)optval,
1591     optlen);
1592     if (IS_ERR(opt)) {
1593     @@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
1594     retv = 0;
1595     opt = ipv6_update_options(sk, opt);
1596     sticky_done:
1597     - if (opt)
1598     - sock_kfree_s(sk, opt, opt->tot_len);
1599     + if (opt) {
1600     + atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1601     + txopt_put(opt);
1602     + }
1603     break;
1604     }
1605    
1606     @@ -486,6 +493,7 @@ sticky_done:
1607     break;
1608    
1609     memset(opt, 0, sizeof(*opt));
1610     + atomic_set(&opt->refcnt, 1);
1611     opt->tot_len = sizeof(*opt) + optlen;
1612     retv = -EFAULT;
1613     if (copy_from_user(opt+1, optval, optlen))
1614     @@ -502,8 +510,10 @@ update:
1615     retv = 0;
1616     opt = ipv6_update_options(sk, opt);
1617     done:
1618     - if (opt)
1619     - sock_kfree_s(sk, opt, opt->tot_len);
1620     + if (opt) {
1621     + atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
1622     + txopt_put(opt);
1623     + }
1624     break;
1625     }
1626     case IPV6_UNICAST_HOPS:
1627     @@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1628     case IPV6_RTHDR:
1629     case IPV6_DSTOPTS:
1630     {
1631     + struct ipv6_txoptions *opt;
1632    
1633     lock_sock(sk);
1634     - len = ipv6_getsockopt_sticky(sk, np->opt,
1635     - optname, optval, len);
1636     + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1637     + len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
1638     release_sock(sk);
1639     /* check if ipv6_getsockopt_sticky() returns err code */
1640     if (len < 0)
1641     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1642     index 083b2927fc67..41e3b5ee8d0b 100644
1643     --- a/net/ipv6/mcast.c
1644     +++ b/net/ipv6/mcast.c
1645     @@ -1651,7 +1651,6 @@ out:
1646     if (!err) {
1647     ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1648     ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1649     - IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1650     } else {
1651     IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1652     }
1653     @@ -2014,7 +2013,6 @@ out:
1654     if (!err) {
1655     ICMP6MSGOUT_INC_STATS(net, idev, type);
1656     ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1657     - IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
1658     } else
1659     IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1660    
1661     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
1662     index 6f187c8d8a1b..d235ed7f47ab 100644
1663     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
1664     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
1665     @@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
1666     /* Creation primitives. */
1667     static inline struct frag_queue *fq_find(struct net *net, __be32 id,
1668     u32 user, struct in6_addr *src,
1669     - struct in6_addr *dst, u8 ecn)
1670     + struct in6_addr *dst, int iif, u8 ecn)
1671     {
1672     struct inet_frag_queue *q;
1673     struct ip6_create_arg arg;
1674     @@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
1675     arg.user = user;
1676     arg.src = src;
1677     arg.dst = dst;
1678     + arg.iif = iif;
1679     arg.ecn = ecn;
1680    
1681     local_bh_disable();
1682     @@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
1683     fhdr = (struct frag_hdr *)skb_transport_header(clone);
1684    
1685     fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
1686     - ip6_frag_ecn(hdr));
1687     + skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
1688     if (fq == NULL) {
1689     pr_debug("Can't find and can't create new queue\n");
1690     goto ret_orig;
1691     diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1692     index 8072bd4139b7..2c639aee12cb 100644
1693     --- a/net/ipv6/raw.c
1694     +++ b/net/ipv6/raw.c
1695     @@ -731,6 +731,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
1696    
1697     static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1698     {
1699     + struct ipv6_txoptions *opt_to_free = NULL;
1700     struct ipv6_txoptions opt_space;
1701     DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1702     struct in6_addr *daddr, *final_p, final;
1703     @@ -837,8 +838,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1704     if (!(opt->opt_nflen|opt->opt_flen))
1705     opt = NULL;
1706     }
1707     - if (!opt)
1708     - opt = np->opt;
1709     + if (!opt) {
1710     + opt = txopt_get(np);
1711     + opt_to_free = opt;
1712     + }
1713     if (flowlabel)
1714     opt = fl6_merge_options(&opt_space, flowlabel, opt);
1715     opt = ipv6_fixup_options(&opt_space, opt);
1716     @@ -901,6 +904,7 @@ done:
1717     dst_release(dst);
1718     out:
1719     fl6_sock_release(flowlabel);
1720     + txopt_put(opt_to_free);
1721     return err < 0 ? err : len;
1722     do_confirm:
1723     dst_confirm(dst);
1724     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1725     index 8ffa2c8cce77..9d1f6a28b284 100644
1726     --- a/net/ipv6/reassembly.c
1727     +++ b/net/ipv6/reassembly.c
1728     @@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
1729     return fq->id == arg->id &&
1730     fq->user == arg->user &&
1731     ipv6_addr_equal(&fq->saddr, arg->src) &&
1732     - ipv6_addr_equal(&fq->daddr, arg->dst);
1733     + ipv6_addr_equal(&fq->daddr, arg->dst) &&
1734     + (arg->iif == fq->iif ||
1735     + !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
1736     + IPV6_ADDR_LINKLOCAL)));
1737     }
1738     EXPORT_SYMBOL(ip6_frag_match);
1739    
1740     @@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
1741    
1742     static struct frag_queue *
1743     fq_find(struct net *net, __be32 id, const struct in6_addr *src,
1744     - const struct in6_addr *dst, u8 ecn)
1745     + const struct in6_addr *dst, int iif, u8 ecn)
1746     {
1747     struct inet_frag_queue *q;
1748     struct ip6_create_arg arg;
1749     @@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
1750     arg.user = IP6_DEFRAG_LOCAL_DELIVER;
1751     arg.src = src;
1752     arg.dst = dst;
1753     + arg.iif = iif;
1754     arg.ecn = ecn;
1755    
1756     hash = inet6_hash_frag(id, src, dst);
1757     @@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
1758     }
1759    
1760     fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
1761     - ip6_frag_ecn(hdr));
1762     + skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
1763     if (fq) {
1764     int ret;
1765    
1766     diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
1767     index 21bc2eb53c57..a4cf004f44d0 100644
1768     --- a/net/ipv6/syncookies.c
1769     +++ b/net/ipv6/syncookies.c
1770     @@ -242,7 +242,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
1771     memset(&fl6, 0, sizeof(fl6));
1772     fl6.flowi6_proto = IPPROTO_TCP;
1773     fl6.daddr = ireq->ir_v6_rmt_addr;
1774     - final_p = fl6_update_dst(&fl6, np->opt, &final);
1775     + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
1776     fl6.saddr = ireq->ir_v6_loc_addr;
1777     fl6.flowi6_oif = sk->sk_bound_dev_if;
1778     fl6.flowi6_mark = ireq->ir_mark;
1779     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1780     index e541d68dba8b..cfb27f56c62f 100644
1781     --- a/net/ipv6/tcp_ipv6.c
1782     +++ b/net/ipv6/tcp_ipv6.c
1783     @@ -121,6 +121,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1784     struct ipv6_pinfo *np = inet6_sk(sk);
1785     struct tcp_sock *tp = tcp_sk(sk);
1786     struct in6_addr *saddr = NULL, *final_p, final;
1787     + struct ipv6_txoptions *opt;
1788     struct rt6_info *rt;
1789     struct flowi6 fl6;
1790     struct dst_entry *dst;
1791     @@ -237,7 +238,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1792     fl6.fl6_dport = usin->sin6_port;
1793     fl6.fl6_sport = inet->inet_sport;
1794    
1795     - final_p = fl6_update_dst(&fl6, np->opt, &final);
1796     + opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1797     + final_p = fl6_update_dst(&fl6, opt, &final);
1798    
1799     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1800    
1801     @@ -266,9 +268,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1802     tcp_fetch_timewait_stamp(sk, dst);
1803    
1804     icsk->icsk_ext_hdr_len = 0;
1805     - if (np->opt)
1806     - icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
1807     - np->opt->opt_nflen);
1808     + if (opt)
1809     + icsk->icsk_ext_hdr_len = opt->opt_flen +
1810     + opt->opt_nflen;
1811    
1812     tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1813    
1814     @@ -464,7 +466,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
1815     fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
1816    
1817     skb_set_queue_mapping(skb, queue_mapping);
1818     - err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
1819     + err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
1820     + np->tclass);
1821     err = net_xmit_eval(err);
1822     }
1823    
1824     @@ -994,6 +997,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1825     struct inet_request_sock *ireq;
1826     struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1827     struct tcp6_sock *newtcp6sk;
1828     + struct ipv6_txoptions *opt;
1829     struct inet_sock *newinet;
1830     struct tcp_sock *newtp;
1831     struct sock *newsk;
1832     @@ -1129,13 +1133,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1833     but we make one more one thing there: reattach optmem
1834     to newsk.
1835     */
1836     - if (np->opt)
1837     - newnp->opt = ipv6_dup_options(newsk, np->opt);
1838     -
1839     + opt = rcu_dereference(np->opt);
1840     + if (opt) {
1841     + opt = ipv6_dup_options(newsk, opt);
1842     + RCU_INIT_POINTER(newnp->opt, opt);
1843     + }
1844     inet_csk(newsk)->icsk_ext_hdr_len = 0;
1845     - if (newnp->opt)
1846     - inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1847     - newnp->opt->opt_flen);
1848     + if (opt)
1849     + inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1850     + opt->opt_flen;
1851    
1852     tcp_ca_openreq_child(newsk, dst);
1853    
1854     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1855     index e51fc3eee6db..7333f3575fc5 100644
1856     --- a/net/ipv6/udp.c
1857     +++ b/net/ipv6/udp.c
1858     @@ -1107,6 +1107,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1859     DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1860     struct in6_addr *daddr, *final_p, final;
1861     struct ipv6_txoptions *opt = NULL;
1862     + struct ipv6_txoptions *opt_to_free = NULL;
1863     struct ip6_flowlabel *flowlabel = NULL;
1864     struct flowi6 fl6;
1865     struct dst_entry *dst;
1866     @@ -1260,8 +1261,10 @@ do_udp_sendmsg:
1867     opt = NULL;
1868     connected = 0;
1869     }
1870     - if (!opt)
1871     - opt = np->opt;
1872     + if (!opt) {
1873     + opt = txopt_get(np);
1874     + opt_to_free = opt;
1875     + }
1876     if (flowlabel)
1877     opt = fl6_merge_options(&opt_space, flowlabel, opt);
1878     opt = ipv6_fixup_options(&opt_space, opt);
1879     @@ -1370,6 +1373,7 @@ release_dst:
1880     out:
1881     dst_release(dst);
1882     fl6_sock_release(flowlabel);
1883     + txopt_put(opt_to_free);
1884     if (!err)
1885     return len;
1886     /*
1887     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1888     index d1ded3777815..0ce9da948ad7 100644
1889     --- a/net/l2tp/l2tp_ip6.c
1890     +++ b/net/l2tp/l2tp_ip6.c
1891     @@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1892     DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
1893     struct in6_addr *daddr, *final_p, final;
1894     struct ipv6_pinfo *np = inet6_sk(sk);
1895     + struct ipv6_txoptions *opt_to_free = NULL;
1896     struct ipv6_txoptions *opt = NULL;
1897     struct ip6_flowlabel *flowlabel = NULL;
1898     struct dst_entry *dst = NULL;
1899     @@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1900     opt = NULL;
1901     }
1902    
1903     - if (opt == NULL)
1904     - opt = np->opt;
1905     + if (!opt) {
1906     + opt = txopt_get(np);
1907     + opt_to_free = opt;
1908     + }
1909     if (flowlabel)
1910     opt = fl6_merge_options(&opt_space, flowlabel, opt);
1911     opt = ipv6_fixup_options(&opt_space, opt);
1912     @@ -631,6 +634,7 @@ done:
1913     dst_release(dst);
1914     out:
1915     fl6_sock_release(flowlabel);
1916     + txopt_put(opt_to_free);
1917    
1918     return err < 0 ? err : len;
1919    
1920     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1921     index 686e60187401..ebc39e66d704 100644
1922     --- a/net/packet/af_packet.c
1923     +++ b/net/packet/af_packet.c
1924     @@ -1524,6 +1524,20 @@ static void fanout_release(struct sock *sk)
1925     mutex_unlock(&fanout_mutex);
1926     }
1927    
1928     +static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1929     + struct sk_buff *skb)
1930     +{
1931     + /* Earlier code assumed this would be a VLAN pkt, double-check
1932     + * this now that we have the actual packet in hand. We can only
1933     + * do this check on Ethernet devices.
1934     + */
1935     + if (unlikely(dev->type != ARPHRD_ETHER))
1936     + return false;
1937     +
1938     + skb_reset_mac_header(skb);
1939     + return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1940     +}
1941     +
1942     static const struct proto_ops packet_ops;
1943    
1944     static const struct proto_ops packet_ops_spkt;
1945     @@ -1685,18 +1699,10 @@ retry:
1946     goto retry;
1947     }
1948    
1949     - if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1950     - /* Earlier code assumed this would be a VLAN pkt,
1951     - * double-check this now that we have the actual
1952     - * packet in hand.
1953     - */
1954     - struct ethhdr *ehdr;
1955     - skb_reset_mac_header(skb);
1956     - ehdr = eth_hdr(skb);
1957     - if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1958     - err = -EMSGSIZE;
1959     - goto out_unlock;
1960     - }
1961     + if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1962     + !packet_extra_vlan_len_allowed(dev, skb)) {
1963     + err = -EMSGSIZE;
1964     + goto out_unlock;
1965     }
1966    
1967     skb->protocol = proto;
1968     @@ -2115,6 +2121,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
1969     return false;
1970     }
1971    
1972     +static void tpacket_set_protocol(const struct net_device *dev,
1973     + struct sk_buff *skb)
1974     +{
1975     + if (dev->type == ARPHRD_ETHER) {
1976     + skb_reset_mac_header(skb);
1977     + skb->protocol = eth_hdr(skb)->h_proto;
1978     + }
1979     +}
1980     +
1981     static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1982     void *frame, struct net_device *dev, int size_max,
1983     __be16 proto, unsigned char *addr, int hlen)
1984     @@ -2151,8 +2166,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1985     skb_reserve(skb, hlen);
1986     skb_reset_network_header(skb);
1987    
1988     - if (!packet_use_direct_xmit(po))
1989     - skb_probe_transport_header(skb, 0);
1990     if (unlikely(po->tp_tx_has_off)) {
1991     int off_min, off_max, off;
1992     off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1993     @@ -2198,6 +2211,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1994     dev->hard_header_len);
1995     if (unlikely(err))
1996     return err;
1997     + if (!skb->protocol)
1998     + tpacket_set_protocol(dev, skb);
1999    
2000     data += dev->hard_header_len;
2001     to_write -= dev->hard_header_len;
2002     @@ -2232,6 +2247,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2003     len = ((to_write > len_max) ? len_max : to_write);
2004     }
2005    
2006     + skb_probe_transport_header(skb, 0);
2007     +
2008     return tp_len;
2009     }
2010    
2011     @@ -2276,12 +2293,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2012     if (unlikely(!(dev->flags & IFF_UP)))
2013     goto out_put;
2014    
2015     - reserve = dev->hard_header_len + VLAN_HLEN;
2016     + if (po->sk.sk_socket->type == SOCK_RAW)
2017     + reserve = dev->hard_header_len;
2018     size_max = po->tx_ring.frame_size
2019     - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2020    
2021     - if (size_max > dev->mtu + reserve)
2022     - size_max = dev->mtu + reserve;
2023     + if (size_max > dev->mtu + reserve + VLAN_HLEN)
2024     + size_max = dev->mtu + reserve + VLAN_HLEN;
2025    
2026     do {
2027     ph = packet_current_frame(po, &po->tx_ring,
2028     @@ -2308,18 +2326,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2029     tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2030     addr, hlen);
2031     if (likely(tp_len >= 0) &&
2032     - tp_len > dev->mtu + dev->hard_header_len) {
2033     - struct ethhdr *ehdr;
2034     - /* Earlier code assumed this would be a VLAN pkt,
2035     - * double-check this now that we have the actual
2036     - * packet in hand.
2037     - */
2038     + tp_len > dev->mtu + reserve &&
2039     + !packet_extra_vlan_len_allowed(dev, skb))
2040     + tp_len = -EMSGSIZE;
2041    
2042     - skb_reset_mac_header(skb);
2043     - ehdr = eth_hdr(skb);
2044     - if (ehdr->h_proto != htons(ETH_P_8021Q))
2045     - tp_len = -EMSGSIZE;
2046     - }
2047     if (unlikely(tp_len < 0)) {
2048     if (po->tp_loss) {
2049     __packet_set_status(po, ph,
2050     @@ -2540,18 +2550,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2051    
2052     sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2053    
2054     - if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2055     - /* Earlier code assumed this would be a VLAN pkt,
2056     - * double-check this now that we have the actual
2057     - * packet in hand.
2058     - */
2059     - struct ethhdr *ehdr;
2060     - skb_reset_mac_header(skb);
2061     - ehdr = eth_hdr(skb);
2062     - if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2063     - err = -EMSGSIZE;
2064     - goto out_free;
2065     - }
2066     + if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2067     + !packet_extra_vlan_len_allowed(dev, skb)) {
2068     + err = -EMSGSIZE;
2069     + goto out_free;
2070     }
2071    
2072     skb->protocol = proto;
2073     @@ -2582,8 +2584,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2074     len += vnet_hdr_len;
2075     }
2076    
2077     - if (!packet_use_direct_xmit(po))
2078     - skb_probe_transport_header(skb, reserve);
2079     + skb_probe_transport_header(skb, reserve);
2080     +
2081     if (unlikely(extra_len == 4))
2082     skb->no_fcs = 1;
2083    
2084     diff --git a/net/rds/connection.c b/net/rds/connection.c
2085     index 9d66705f9d41..da6da57e5f36 100644
2086     --- a/net/rds/connection.c
2087     +++ b/net/rds/connection.c
2088     @@ -187,12 +187,6 @@ new_conn:
2089     }
2090     }
2091    
2092     - if (trans == NULL) {
2093     - kmem_cache_free(rds_conn_slab, conn);
2094     - conn = ERR_PTR(-ENODEV);
2095     - goto out;
2096     - }
2097     -
2098     conn->c_trans = trans;
2099    
2100     ret = trans->conn_alloc(conn, gfp);
2101     diff --git a/net/rds/send.c b/net/rds/send.c
2102     index e9430f537f9c..7b30c0f3180d 100644
2103     --- a/net/rds/send.c
2104     +++ b/net/rds/send.c
2105     @@ -986,11 +986,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
2106     release_sock(sk);
2107     }
2108    
2109     - /* racing with another thread binding seems ok here */
2110     + lock_sock(sk);
2111     if (daddr == 0 || rs->rs_bound_addr == 0) {
2112     + release_sock(sk);
2113     ret = -ENOTCONN; /* XXX not a great errno */
2114     goto out;
2115     }
2116     + release_sock(sk);
2117    
2118     /* size of rm including all sgs */
2119     ret = rds_rm_size(msg, payload_len);
2120     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2121     index 1e1c89e51a11..d4b6f3682c14 100644
2122     --- a/net/sched/sch_api.c
2123     +++ b/net/sched/sch_api.c
2124     @@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
2125     }
2126    
2127     /* We know handle. Find qdisc among all qdisc's attached to device
2128     - (root qdisc, all its children, children of children etc.)
2129     + * (root qdisc, all its children, children of children etc.)
2130     + * Note: caller either uses rtnl or rcu_read_lock()
2131     */
2132    
2133     static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2134     @@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2135     root->handle == handle)
2136     return root;
2137    
2138     - list_for_each_entry(q, &root->list, list) {
2139     + list_for_each_entry_rcu(q, &root->list, list) {
2140     if (q->handle == handle)
2141     return q;
2142     }
2143     @@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
2144     struct Qdisc *root = qdisc_dev(q)->qdisc;
2145    
2146     WARN_ON_ONCE(root == &noop_qdisc);
2147     - list_add_tail(&q->list, &root->list);
2148     + ASSERT_RTNL();
2149     + list_add_tail_rcu(&q->list, &root->list);
2150     }
2151     }
2152     EXPORT_SYMBOL(qdisc_list_add);
2153    
2154     void qdisc_list_del(struct Qdisc *q)
2155     {
2156     - if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
2157     - list_del(&q->list);
2158     + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
2159     + ASSERT_RTNL();
2160     + list_del_rcu(&q->list);
2161     + }
2162     }
2163     EXPORT_SYMBOL(qdisc_list_del);
2164    
2165     @@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2166     if (n == 0)
2167     return;
2168     drops = max_t(int, n, 0);
2169     + rcu_read_lock();
2170     while ((parentid = sch->parent)) {
2171     if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
2172     - return;
2173     + break;
2174    
2175     + if (sch->flags & TCQ_F_NOPARENT)
2176     + break;
2177     + /* TODO: perform the search on a per txq basis */
2178     sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
2179     if (sch == NULL) {
2180     - WARN_ON(parentid != TC_H_ROOT);
2181     - return;
2182     + WARN_ON_ONCE(parentid != TC_H_ROOT);
2183     + break;
2184     }
2185     cops = sch->ops->cl_ops;
2186     if (cops->qlen_notify) {
2187     @@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
2188     sch->q.qlen -= n;
2189     __qdisc_qstats_drop(sch, drops);
2190     }
2191     + rcu_read_unlock();
2192     }
2193     EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
2194    
2195     @@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
2196     }
2197     lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
2198     if (!netif_is_multiqueue(dev))
2199     - sch->flags |= TCQ_F_ONETXQUEUE;
2200     + sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2201     }
2202    
2203     sch->handle = handle;
2204     diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2205     index 6efca30894aa..b453270be3fd 100644
2206     --- a/net/sched/sch_generic.c
2207     +++ b/net/sched/sch_generic.c
2208     @@ -743,7 +743,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
2209     return;
2210     }
2211     if (!netif_is_multiqueue(dev))
2212     - qdisc->flags |= TCQ_F_ONETXQUEUE;
2213     + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2214     }
2215     dev_queue->qdisc_sleeping = qdisc;
2216     }
2217     diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
2218     index f3cbaecd283a..3e82f047caaf 100644
2219     --- a/net/sched/sch_mq.c
2220     +++ b/net/sched/sch_mq.c
2221     @@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
2222     if (qdisc == NULL)
2223     goto err;
2224     priv->qdiscs[ntx] = qdisc;
2225     - qdisc->flags |= TCQ_F_ONETXQUEUE;
2226     + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2227     }
2228    
2229     sch->flags |= TCQ_F_MQROOT;
2230     @@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
2231    
2232     *old = dev_graft_qdisc(dev_queue, new);
2233     if (new)
2234     - new->flags |= TCQ_F_ONETXQUEUE;
2235     + new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2236     if (dev->flags & IFF_UP)
2237     dev_activate(dev);
2238     return 0;
2239     diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
2240     index 3811a745452c..ad70ecf57ce7 100644
2241     --- a/net/sched/sch_mqprio.c
2242     +++ b/net/sched/sch_mqprio.c
2243     @@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
2244     goto err;
2245     }
2246     priv->qdiscs[i] = qdisc;
2247     - qdisc->flags |= TCQ_F_ONETXQUEUE;
2248     + qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2249     }
2250    
2251     /* If the mqprio options indicate that hardware should own
2252     @@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
2253     *old = dev_graft_qdisc(dev_queue, new);
2254    
2255     if (new)
2256     - new->flags |= TCQ_F_ONETXQUEUE;
2257     + new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2258    
2259     if (dev->flags & IFF_UP)
2260     dev_activate(dev);
2261     diff --git a/net/sctp/auth.c b/net/sctp/auth.c
2262     index 4f15b7d730e1..1543e39f47c3 100644
2263     --- a/net/sctp/auth.c
2264     +++ b/net/sctp/auth.c
2265     @@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
2266     if (!has_sha1)
2267     return -EINVAL;
2268    
2269     - memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
2270     - hmacs->shmac_num_idents * sizeof(__u16));
2271     + for (i = 0; i < hmacs->shmac_num_idents; i++)
2272     + ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
2273     ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
2274     hmacs->shmac_num_idents * sizeof(__u16));
2275     return 0;
2276     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2277     index 5f6c4e61325b..66d796075050 100644
2278     --- a/net/sctp/socket.c
2279     +++ b/net/sctp/socket.c
2280     @@ -7387,6 +7387,13 @@ struct proto sctp_prot = {
2281    
2282     #if IS_ENABLED(CONFIG_IPV6)
2283    
2284     +#include <net/transp_v6.h>
2285     +static void sctp_v6_destroy_sock(struct sock *sk)
2286     +{
2287     + sctp_destroy_sock(sk);
2288     + inet6_destroy_sock(sk);
2289     +}
2290     +
2291     struct proto sctpv6_prot = {
2292     .name = "SCTPv6",
2293     .owner = THIS_MODULE,
2294     @@ -7396,7 +7403,7 @@ struct proto sctpv6_prot = {
2295     .accept = sctp_accept,
2296     .ioctl = sctp_ioctl,
2297     .init = sctp_init_sock,
2298     - .destroy = sctp_destroy_sock,
2299     + .destroy = sctp_v6_destroy_sock,
2300     .shutdown = sctp_shutdown,
2301     .setsockopt = sctp_setsockopt,
2302     .getsockopt = sctp_getsockopt,
2303     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2304     index 76e66695621c..1975fd8d1c10 100644
2305     --- a/net/unix/af_unix.c
2306     +++ b/net/unix/af_unix.c
2307     @@ -316,6 +316,118 @@ found:
2308     return s;
2309     }
2310    
2311     +/* Support code for asymmetrically connected dgram sockets
2312     + *
2313     + * If a datagram socket is connected to a socket not itself connected
2314     + * to the first socket (eg, /dev/log), clients may only enqueue more
2315     + * messages if the present receive queue of the server socket is not
2316     + * "too large". This means there's a second writeability condition
2317     + * poll and sendmsg need to test. The dgram recv code will do a wake
2318     + * up on the peer_wait wait queue of a socket upon reception of a
2319     + * datagram which needs to be propagated to sleeping would-be writers
2320     + * since these might not have sent anything so far. This can't be
2321     + * accomplished via poll_wait because the lifetime of the server
2322     + * socket might be less than that of its clients if these break their
2323     + * association with it or if the server socket is closed while clients
2324     + * are still connected to it and there's no way to inform "a polling
2325     + * implementation" that it should let go of a certain wait queue
2326     + *
2327     + * In order to propagate a wake up, a wait_queue_t of the client
2328     + * socket is enqueued on the peer_wait queue of the server socket
2329     + * whose wake function does a wake_up on the ordinary client socket
2330     + * wait queue. This connection is established whenever a write (or
2331     + * poll for write) hit the flow control condition and broken when the
2332     + * association to the server socket is dissolved or after a wake up
2333     + * was relayed.
2334     + */
2335     +
2336     +static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
2337     + void *key)
2338     +{
2339     + struct unix_sock *u;
2340     + wait_queue_head_t *u_sleep;
2341     +
2342     + u = container_of(q, struct unix_sock, peer_wake);
2343     +
2344     + __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
2345     + q);
2346     + u->peer_wake.private = NULL;
2347     +
2348     + /* relaying can only happen while the wq still exists */
2349     + u_sleep = sk_sleep(&u->sk);
2350     + if (u_sleep)
2351     + wake_up_interruptible_poll(u_sleep, key);
2352     +
2353     + return 0;
2354     +}
2355     +
2356     +static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
2357     +{
2358     + struct unix_sock *u, *u_other;
2359     + int rc;
2360     +
2361     + u = unix_sk(sk);
2362     + u_other = unix_sk(other);
2363     + rc = 0;
2364     + spin_lock(&u_other->peer_wait.lock);
2365     +
2366     + if (!u->peer_wake.private) {
2367     + u->peer_wake.private = other;
2368     + __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
2369     +
2370     + rc = 1;
2371     + }
2372     +
2373     + spin_unlock(&u_other->peer_wait.lock);
2374     + return rc;
2375     +}
2376     +
2377     +static void unix_dgram_peer_wake_disconnect(struct sock *sk,
2378     + struct sock *other)
2379     +{
2380     + struct unix_sock *u, *u_other;
2381     +
2382     + u = unix_sk(sk);
2383     + u_other = unix_sk(other);
2384     + spin_lock(&u_other->peer_wait.lock);
2385     +
2386     + if (u->peer_wake.private == other) {
2387     + __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
2388     + u->peer_wake.private = NULL;
2389     + }
2390     +
2391     + spin_unlock(&u_other->peer_wait.lock);
2392     +}
2393     +
2394     +static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
2395     + struct sock *other)
2396     +{
2397     + unix_dgram_peer_wake_disconnect(sk, other);
2398     + wake_up_interruptible_poll(sk_sleep(sk),
2399     + POLLOUT |
2400     + POLLWRNORM |
2401     + POLLWRBAND);
2402     +}
2403     +
2404     +/* preconditions:
2405     + * - unix_peer(sk) == other
2406     + * - association is stable
2407     + */
2408     +static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
2409     +{
2410     + int connected;
2411     +
2412     + connected = unix_dgram_peer_wake_connect(sk, other);
2413     +
2414     + if (unix_recvq_full(other))
2415     + return 1;
2416     +
2417     + if (connected)
2418     + unix_dgram_peer_wake_disconnect(sk, other);
2419     +
2420     + return 0;
2421     +}
2422     +
2423     static inline int unix_writable(struct sock *sk)
2424     {
2425     return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
2426     @@ -420,6 +532,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
2427     skpair->sk_state_change(skpair);
2428     sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
2429     }
2430     +
2431     + unix_dgram_peer_wake_disconnect(sk, skpair);
2432     sock_put(skpair); /* It may now die */
2433     unix_peer(sk) = NULL;
2434     }
2435     @@ -648,6 +762,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
2436     INIT_LIST_HEAD(&u->link);
2437     mutex_init(&u->readlock); /* single task reading lock */
2438     init_waitqueue_head(&u->peer_wait);
2439     + init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
2440     unix_insert_socket(unix_sockets_unbound(sk), sk);
2441     out:
2442     if (sk == NULL)
2443     @@ -1015,6 +1130,8 @@ restart:
2444     if (unix_peer(sk)) {
2445     struct sock *old_peer = unix_peer(sk);
2446     unix_peer(sk) = other;
2447     + unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
2448     +
2449     unix_state_double_unlock(sk, other);
2450    
2451     if (other != old_peer)
2452     @@ -1453,6 +1570,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
2453     struct scm_cookie scm;
2454     int max_level;
2455     int data_len = 0;
2456     + int sk_locked;
2457    
2458     wait_for_unix_gc();
2459     err = scm_send(sock, msg, &scm, false);
2460     @@ -1532,12 +1650,14 @@ restart:
2461     goto out_free;
2462     }
2463    
2464     + sk_locked = 0;
2465     unix_state_lock(other);
2466     +restart_locked:
2467     err = -EPERM;
2468     if (!unix_may_send(sk, other))
2469     goto out_unlock;
2470    
2471     - if (sock_flag(other, SOCK_DEAD)) {
2472     + if (unlikely(sock_flag(other, SOCK_DEAD))) {
2473     /*
2474     * Check with 1003.1g - what should
2475     * datagram error
2476     @@ -1545,10 +1665,14 @@ restart:
2477     unix_state_unlock(other);
2478     sock_put(other);
2479    
2480     + if (!sk_locked)
2481     + unix_state_lock(sk);
2482     +
2483     err = 0;
2484     - unix_state_lock(sk);
2485     if (unix_peer(sk) == other) {
2486     unix_peer(sk) = NULL;
2487     + unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2488     +
2489     unix_state_unlock(sk);
2490    
2491     unix_dgram_disconnected(sk, other);
2492     @@ -1574,21 +1698,38 @@ restart:
2493     goto out_unlock;
2494     }
2495    
2496     - if (unix_peer(other) != sk && unix_recvq_full(other)) {
2497     - if (!timeo) {
2498     - err = -EAGAIN;
2499     - goto out_unlock;
2500     + if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
2501     + if (timeo) {
2502     + timeo = unix_wait_for_peer(other, timeo);
2503     +
2504     + err = sock_intr_errno(timeo);
2505     + if (signal_pending(current))
2506     + goto out_free;
2507     +
2508     + goto restart;
2509     }
2510    
2511     - timeo = unix_wait_for_peer(other, timeo);
2512     + if (!sk_locked) {
2513     + unix_state_unlock(other);
2514     + unix_state_double_lock(sk, other);
2515     + }
2516    
2517     - err = sock_intr_errno(timeo);
2518     - if (signal_pending(current))
2519     - goto out_free;
2520     + if (unix_peer(sk) != other ||
2521     + unix_dgram_peer_wake_me(sk, other)) {
2522     + err = -EAGAIN;
2523     + sk_locked = 1;
2524     + goto out_unlock;
2525     + }
2526    
2527     - goto restart;
2528     + if (!sk_locked) {
2529     + sk_locked = 1;
2530     + goto restart_locked;
2531     + }
2532     }
2533    
2534     + if (unlikely(sk_locked))
2535     + unix_state_unlock(sk);
2536     +
2537     if (sock_flag(other, SOCK_RCVTSTAMP))
2538     __net_timestamp(skb);
2539     maybe_add_creds(skb, sock, other);
2540     @@ -1602,6 +1743,8 @@ restart:
2541     return len;
2542    
2543     out_unlock:
2544     + if (sk_locked)
2545     + unix_state_unlock(sk);
2546     unix_state_unlock(other);
2547     out_free:
2548     kfree_skb(skb);
2549     @@ -2245,14 +2388,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2550     return mask;
2551    
2552     writable = unix_writable(sk);
2553     - other = unix_peer_get(sk);
2554     - if (other) {
2555     - if (unix_peer(other) != sk) {
2556     - sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2557     - if (unix_recvq_full(other))
2558     - writable = 0;
2559     - }
2560     - sock_put(other);
2561     + if (writable) {
2562     + unix_state_lock(sk);
2563     +
2564     + other = unix_peer(sk);
2565     + if (other && unix_peer(other) != sk &&
2566     + unix_recvq_full(other) &&
2567     + unix_dgram_peer_wake_me(sk, other))
2568     + writable = 0;
2569     +
2570     + unix_state_unlock(sk);
2571     }
2572    
2573     if (writable)
2574     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2575     index 225b78b4ef12..d02eccd51f6e 100644
2576     --- a/sound/pci/hda/patch_hdmi.c
2577     +++ b/sound/pci/hda/patch_hdmi.c
2578     @@ -48,8 +48,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
2579     #define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
2580     #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
2581     #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
2582     +#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
2583     #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
2584     - || is_skylake(codec))
2585     + || is_skylake(codec) || is_broxton(codec))
2586    
2587     #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
2588     #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
2589     diff --git a/tools/net/Makefile b/tools/net/Makefile
2590     index ee577ea03ba5..ddf888010652 100644
2591     --- a/tools/net/Makefile
2592     +++ b/tools/net/Makefile
2593     @@ -4,6 +4,9 @@ CC = gcc
2594     LEX = flex
2595     YACC = bison
2596    
2597     +CFLAGS += -Wall -O2
2598     +CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
2599     +
2600     %.yacc.c: %.y
2601     $(YACC) -o $@ -d $<
2602    
2603     @@ -12,15 +15,13 @@ YACC = bison
2604    
2605     all : bpf_jit_disasm bpf_dbg bpf_asm
2606    
2607     -bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
2608     +bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
2609     bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
2610     bpf_jit_disasm : bpf_jit_disasm.o
2611    
2612     -bpf_dbg : CFLAGS = -Wall -O2
2613     bpf_dbg : LDLIBS = -lreadline
2614     bpf_dbg : bpf_dbg.o
2615    
2616     -bpf_asm : CFLAGS = -Wall -O2 -I.
2617     bpf_asm : LDLIBS =
2618     bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
2619     bpf_exp.lex.o : bpf_exp.yacc.c