Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.7/0100-4.7.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2820 - (hide annotations) (download)
Wed Aug 31 11:00:12 2016 UTC (7 years, 8 months ago) by niro
File size: 37757 byte(s)
-linux-4.7.1
1 niro 2820 diff --git a/Documentation/cpu-freq/pcc-cpufreq.txt b/Documentation/cpu-freq/pcc-cpufreq.txt
2     index 0a94224ad296..9e3c3b33514c 100644
3     --- a/Documentation/cpu-freq/pcc-cpufreq.txt
4     +++ b/Documentation/cpu-freq/pcc-cpufreq.txt
5     @@ -159,8 +159,8 @@ to be strictly associated with a P-state.
6    
7     2.2 cpuinfo_transition_latency:
8     -------------------------------
9     -The cpuinfo_transition_latency field is CPUFREQ_ETERNAL. The PCC specification
10     -does not include a field to expose this value currently.
11     +The cpuinfo_transition_latency field is 0. The PCC specification does
12     +not include a field to expose this value currently.
13    
14     2.3 cpuinfo_cur_freq:
15     ---------------------
16     diff --git a/Makefile b/Makefile
17     index 66da9a38b13b..84335c0b2eda 100644
18     --- a/Makefile
19     +++ b/Makefile
20     @@ -1,6 +1,6 @@
21     VERSION = 4
22     PATCHLEVEL = 7
23     -SUBLEVEL = 0
24     +SUBLEVEL = 1
25     EXTRAVERSION =
26     NAME = Psychotic Stoned Sheep
27    
28     diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
29     index 087acb569b63..5f221acd21ae 100644
30     --- a/arch/arm/kernel/sys_oabi-compat.c
31     +++ b/arch/arm/kernel/sys_oabi-compat.c
32     @@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
33     mm_segment_t fs;
34     long ret, err, i;
35    
36     - if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
37     + if (maxevents <= 0 ||
38     + maxevents > (INT_MAX/sizeof(*kbuf)) ||
39     + maxevents > (INT_MAX/sizeof(*events)))
40     return -EINVAL;
41     + if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
42     + return -EFAULT;
43     kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
44     if (!kbuf)
45     return -ENOMEM;
46     @@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
47    
48     if (nsops < 1 || nsops > SEMOPM)
49     return -EINVAL;
50     + if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
51     + return -EFAULT;
52     sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
53     if (!sops)
54     return -ENOMEM;
55     diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
56     index 9c0b387d6427..51d3988933f8 100644
57     --- a/arch/mips/kernel/scall64-n32.S
58     +++ b/arch/mips/kernel/scall64-n32.S
59     @@ -348,7 +348,7 @@ EXPORT(sysn32_call_table)
60     PTR sys_ni_syscall /* available, was setaltroot */
61     PTR sys_add_key
62     PTR sys_request_key
63     - PTR sys_keyctl /* 6245 */
64     + PTR compat_sys_keyctl /* 6245 */
65     PTR sys_set_thread_area
66     PTR sys_inotify_init
67     PTR sys_inotify_add_watch
68     diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
69     index f4f28b1580de..6efa7136748f 100644
70     --- a/arch/mips/kernel/scall64-o32.S
71     +++ b/arch/mips/kernel/scall64-o32.S
72     @@ -504,7 +504,7 @@ EXPORT(sys32_call_table)
73     PTR sys_ni_syscall /* available, was setaltroot */
74     PTR sys_add_key /* 4280 */
75     PTR sys_request_key
76     - PTR sys_keyctl
77     + PTR compat_sys_keyctl
78     PTR sys_set_thread_area
79     PTR sys_inotify_init
80     PTR sys_inotify_add_watch /* 4285 */
81     diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
82     index 4cddd17153fb..f848572169ea 100644
83     --- a/arch/x86/entry/syscalls/syscall_32.tbl
84     +++ b/arch/x86/entry/syscalls/syscall_32.tbl
85     @@ -294,7 +294,7 @@
86     # 285 sys_setaltroot
87     286 i386 add_key sys_add_key
88     287 i386 request_key sys_request_key
89     -288 i386 keyctl sys_keyctl
90     +288 i386 keyctl sys_keyctl compat_sys_keyctl
91     289 i386 ioprio_set sys_ioprio_set
92     290 i386 ioprio_get sys_ioprio_get
93     291 i386 inotify_init sys_inotify_init
94     diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
95     index 9d3a96c4da78..01c2d14ec05f 100644
96     --- a/arch/x86/include/asm/microcode.h
97     +++ b/arch/x86/include/asm/microcode.h
98     @@ -133,13 +133,11 @@ static inline unsigned int x86_cpuid_family(void)
99     #ifdef CONFIG_MICROCODE
100     extern void __init load_ucode_bsp(void);
101     extern void load_ucode_ap(void);
102     -extern int __init save_microcode_in_initrd(void);
103     void reload_early_microcode(void);
104     extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
105     #else
106     static inline void __init load_ucode_bsp(void) { }
107     static inline void load_ucode_ap(void) { }
108     -static inline int __init save_microcode_in_initrd(void) { return 0; }
109     static inline void reload_early_microcode(void) { }
110     static inline bool
111     get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
112     diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
113     index ac360bfbbdb6..12823b6ebd6d 100644
114     --- a/arch/x86/kernel/cpu/microcode/core.c
115     +++ b/arch/x86/kernel/cpu/microcode/core.c
116     @@ -175,7 +175,7 @@ void load_ucode_ap(void)
117     }
118     }
119    
120     -int __init save_microcode_in_initrd(void)
121     +static int __init save_microcode_in_initrd(void)
122     {
123     struct cpuinfo_x86 *c = &boot_cpu_data;
124    
125     @@ -691,4 +691,5 @@ int __init microcode_init(void)
126     return error;
127    
128     }
129     +fs_initcall(save_microcode_in_initrd);
130     late_initcall(microcode_init);
131     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
132     index 372aad2b3291..dffd162db0a4 100644
133     --- a/arch/x86/mm/init.c
134     +++ b/arch/x86/mm/init.c
135     @@ -696,13 +696,6 @@ void free_initmem(void)
136     void __init free_initrd_mem(unsigned long start, unsigned long end)
137     {
138     /*
139     - * Remember, initrd memory may contain microcode or other useful things.
140     - * Before we lose initrd mem, we need to find a place to hold them
141     - * now that normal virtual memory is enabled.
142     - */
143     - save_microcode_in_initrd();
144     -
145     - /*
146     * end could be not aligned, and We can not align that,
147     * decompresser could be confused by aligned initrd_end
148     * We already reserve the end partial page before in
149     diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
150     index 3177c2bc26f6..8eee0e9c93f0 100644
151     --- a/arch/x86/power/hibernate_asm_64.S
152     +++ b/arch/x86/power/hibernate_asm_64.S
153     @@ -24,7 +24,6 @@
154     #include <asm/frame.h>
155    
156     ENTRY(swsusp_arch_suspend)
157     - FRAME_BEGIN
158     movq $saved_context, %rax
159     movq %rsp, pt_regs_sp(%rax)
160     movq %rbp, pt_regs_bp(%rax)
161     @@ -48,6 +47,7 @@ ENTRY(swsusp_arch_suspend)
162     movq %cr3, %rax
163     movq %rax, restore_cr3(%rip)
164    
165     + FRAME_BEGIN
166     call swsusp_save
167     FRAME_END
168     ret
169     @@ -104,7 +104,6 @@ ENTRY(core_restore_code)
170     /* code below belongs to the image kernel */
171     .align PAGE_SIZE
172     ENTRY(restore_registers)
173     - FRAME_BEGIN
174     /* go back to the original page tables */
175     movq %r9, %cr3
176    
177     @@ -145,6 +144,5 @@ ENTRY(restore_registers)
178     /* tell the hibernation core that we've just restored the memory */
179     movq %rax, in_suspend(%rip)
180    
181     - FRAME_END
182     ret
183     ENDPROC(restore_registers)
184     diff --git a/block/genhd.c b/block/genhd.c
185     index 9f42526b4d62..3eebd256b765 100644
186     --- a/block/genhd.c
187     +++ b/block/genhd.c
188     @@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
189     if (iter) {
190     class_dev_iter_exit(iter);
191     kfree(iter);
192     + seqf->private = NULL;
193     }
194     }
195    
196     diff --git a/crypto/gcm.c b/crypto/gcm.c
197     index bec329b3de8d..d9ea5f9c0574 100644
198     --- a/crypto/gcm.c
199     +++ b/crypto/gcm.c
200     @@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
201    
202     ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
203     CRYPTO_ALG_TYPE_HASH,
204     - CRYPTO_ALG_TYPE_AHASH_MASK);
205     + CRYPTO_ALG_TYPE_AHASH_MASK |
206     + crypto_requires_sync(algt->type,
207     + algt->mask));
208     if (IS_ERR(ghash_alg))
209     return PTR_ERR(ghash_alg);
210    
211     diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
212     index ea5815c5e128..bc769c448d4a 100644
213     --- a/crypto/scatterwalk.c
214     +++ b/crypto/scatterwalk.c
215     @@ -72,7 +72,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
216    
217     void scatterwalk_done(struct scatter_walk *walk, int out, int more)
218     {
219     - if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
220     + if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
221     + !(walk->offset & (PAGE_SIZE - 1)))
222     scatterwalk_pagedone(walk, out, more);
223     }
224     EXPORT_SYMBOL_GPL(scatterwalk_done);
225     diff --git a/drivers/char/random.c b/drivers/char/random.c
226     index 0158d3bff7e5..87ab9f6b4112 100644
227     --- a/drivers/char/random.c
228     +++ b/drivers/char/random.c
229     @@ -723,15 +723,18 @@ retry:
230     }
231     }
232    
233     -static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
234     +static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
235     {
236     const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
237    
238     + if (nbits < 0)
239     + return -EINVAL;
240     +
241     /* Cap the value to avoid overflows */
242     nbits = min(nbits, nbits_max);
243     - nbits = max(nbits, -nbits_max);
244    
245     credit_entropy_bits(r, nbits);
246     + return 0;
247     }
248    
249     /*********************************************************************
250     @@ -1543,8 +1546,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
251     return -EPERM;
252     if (get_user(ent_count, p))
253     return -EFAULT;
254     - credit_entropy_bits_safe(&input_pool, ent_count);
255     - return 0;
256     + return credit_entropy_bits_safe(&input_pool, ent_count);
257     case RNDADDENTROPY:
258     if (!capable(CAP_SYS_ADMIN))
259     return -EPERM;
260     @@ -1558,8 +1560,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
261     size);
262     if (retval < 0)
263     return retval;
264     - credit_entropy_bits_safe(&input_pool, ent_count);
265     - return 0;
266     + return credit_entropy_bits_safe(&input_pool, ent_count);
267     case RNDZAPENTCNT:
268     case RNDCLEARPOOL:
269     /*
270     diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
271     index a7ecb9a84c15..3f0ce2ae35ee 100644
272     --- a/drivers/cpufreq/pcc-cpufreq.c
273     +++ b/drivers/cpufreq/pcc-cpufreq.c
274     @@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
275     policy->min = policy->cpuinfo.min_freq =
276     ioread32(&pcch_hdr->minimum_frequency) * 1000;
277    
278     - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
279     -
280     pr_debug("init: policy->max is %d, policy->min is %d\n",
281     policy->max, policy->min);
282     out:
283     diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
284     index a925fb0db706..f846fd51b85b 100644
285     --- a/drivers/infiniband/hw/hfi1/Kconfig
286     +++ b/drivers/infiniband/hw/hfi1/Kconfig
287     @@ -3,7 +3,6 @@ config INFINIBAND_HFI1
288     depends on X86_64 && INFINIBAND_RDMAVT
289     select MMU_NOTIFIER
290     select CRC32
291     - default m
292     ---help---
293     This is a low-level driver for Intel OPA Gen1 adapter.
294     config HFI1_DEBUG_SDMA_ORDER
295     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
296     index a2afa3be17a4..4d7981946f79 100644
297     --- a/drivers/net/bonding/bond_main.c
298     +++ b/drivers/net/bonding/bond_main.c
299     @@ -1422,7 +1422,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
300     return -EINVAL;
301     }
302    
303     - if (slave_ops->ndo_set_mac_address == NULL) {
304     + if (slave_dev->type == ARPHRD_INFINIBAND &&
305     + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
306     + netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
307     + slave_dev->type);
308     + res = -EOPNOTSUPP;
309     + goto err_undo_flags;
310     + }
311     +
312     + if (!slave_ops->ndo_set_mac_address ||
313     + slave_dev->type == ARPHRD_INFINIBAND) {
314     netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
315     if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
316     bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
317     diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
318     index b122f6013b6c..03601dfc0642 100644
319     --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
320     +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
321     @@ -809,13 +809,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
322     * in a bitmap and increasing the chain consumer only
323     * for the first successive completed entries.
324     */
325     - bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
326     + __set_bit(pos, p_spq->p_comp_bitmap);
327    
328     while (test_bit(p_spq->comp_bitmap_idx,
329     p_spq->p_comp_bitmap)) {
330     - bitmap_clear(p_spq->p_comp_bitmap,
331     - p_spq->comp_bitmap_idx,
332     - SPQ_RING_SIZE);
333     + __clear_bit(p_spq->comp_bitmap_idx,
334     + p_spq->p_comp_bitmap);
335     p_spq->comp_bitmap_idx++;
336     qed_chain_return_produced(&p_spq->chain);
337     }
338     diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
339     index 8bcd78f94966..a70b6c460178 100644
340     --- a/drivers/net/macsec.c
341     +++ b/drivers/net/macsec.c
342     @@ -942,7 +942,6 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
343     }
344    
345     macsec_skb_cb(skb)->req = req;
346     - macsec_skb_cb(skb)->rx_sa = rx_sa;
347     skb->dev = dev;
348     aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
349    
350     @@ -1169,6 +1168,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
351     }
352     }
353    
354     + macsec_skb_cb(skb)->rx_sa = rx_sa;
355     +
356     /* Disabled && !changed text => skip validation */
357     if (hdr->tci_an & MACSEC_TCI_C ||
358     secy->validate_frames != MACSEC_VALIDATE_DISABLED)
359     diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
360     index ce362bd51de7..45b57c294d13 100644
361     --- a/drivers/tty/serial/mvebu-uart.c
362     +++ b/drivers/tty/serial/mvebu-uart.c
363     @@ -300,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port)
364     static void mvebu_uart_shutdown(struct uart_port *port)
365     {
366     writel(0, port->membase + UART_CTRL);
367     +
368     + free_irq(port->irq, port);
369     }
370    
371     static void mvebu_uart_set_termios(struct uart_port *port,
372     diff --git a/fs/dcache.c b/fs/dcache.c
373     index d6847d7b123d..1ed81bb80500 100644
374     --- a/fs/dcache.c
375     +++ b/fs/dcache.c
376     @@ -622,7 +622,6 @@ static struct dentry *dentry_kill(struct dentry *dentry)
377    
378     failed:
379     spin_unlock(&dentry->d_lock);
380     - cpu_relax();
381     return dentry; /* try again with same dentry */
382     }
383    
384     @@ -796,6 +795,8 @@ void dput(struct dentry *dentry)
385     return;
386    
387     repeat:
388     + might_sleep();
389     +
390     rcu_read_lock();
391     if (likely(fast_dput(dentry))) {
392     rcu_read_unlock();
393     @@ -829,8 +830,10 @@ repeat:
394    
395     kill_it:
396     dentry = dentry_kill(dentry);
397     - if (dentry)
398     + if (dentry) {
399     + cond_resched();
400     goto repeat;
401     + }
402     }
403     EXPORT_SYMBOL(dput);
404    
405     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
406     index 3020fd70c392..1ea505434a6e 100644
407     --- a/fs/ext4/balloc.c
408     +++ b/fs/ext4/balloc.c
409     @@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb,
410     memset(bh->b_data, 0, sb->s_blocksize);
411    
412     bit_max = ext4_num_base_meta_clusters(sb, block_group);
413     + if ((bit_max >> 3) >= bh->b_size)
414     + return -EFSCORRUPTED;
415     +
416     for (bit = 0; bit < bit_max; bit++)
417     ext4_set_bit(bit, bh->b_data);
418    
419     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
420     index 2a2eef9c14e4..d7ccb7f51dfc 100644
421     --- a/fs/ext4/extents.c
422     +++ b/fs/ext4/extents.c
423     @@ -381,9 +381,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
424     ext4_fsblk_t block = ext4_ext_pblock(ext);
425     int len = ext4_ext_get_actual_len(ext);
426     ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
427     - ext4_lblk_t last = lblock + len - 1;
428    
429     - if (len == 0 || lblock > last)
430     + /*
431     + * We allow neither:
432     + * - zero length
433     + * - overflow/wrap-around
434     + */
435     + if (lblock + len <= lblock)
436     return 0;
437     return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
438     }
439     @@ -474,6 +478,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
440     error_msg = "invalid extent entries";
441     goto corrupted;
442     }
443     + if (unlikely(depth > 32)) {
444     + error_msg = "too large eh_depth";
445     + goto corrupted;
446     + }
447     /* Verify checksum on non-root extent tree nodes */
448     if (ext_depth(inode) != depth &&
449     !ext4_extent_block_csum_verify(inode, eh)) {
450     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
451     index f7140ca66e3b..b747ec09c1ac 100644
452     --- a/fs/ext4/inode.c
453     +++ b/fs/ext4/inode.c
454     @@ -205,9 +205,9 @@ void ext4_evict_inode(struct inode *inode)
455     * Note that directories do not have this problem because they
456     * don't use page cache.
457     */
458     - if (ext4_should_journal_data(inode) &&
459     - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
460     - inode->i_ino != EXT4_JOURNAL_INO) {
461     + if (inode->i_ino != EXT4_JOURNAL_INO &&
462     + ext4_should_journal_data(inode) &&
463     + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
464     journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
465     tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
466    
467     @@ -2748,13 +2748,36 @@ retry:
468     done = true;
469     }
470     }
471     - ext4_journal_stop(handle);
472     + /*
473     + * Caution: If the handle is synchronous,
474     + * ext4_journal_stop() can wait for transaction commit
475     + * to finish which may depend on writeback of pages to
476     + * complete or on page lock to be released. In that
477     + * case, we have to wait until after after we have
478     + * submitted all the IO, released page locks we hold,
479     + * and dropped io_end reference (for extent conversion
480     + * to be able to complete) before stopping the handle.
481     + */
482     + if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
483     + ext4_journal_stop(handle);
484     + handle = NULL;
485     + }
486     /* Submit prepared bio */
487     ext4_io_submit(&mpd.io_submit);
488     /* Unlock pages we didn't use */
489     mpage_release_unused_pages(&mpd, give_up_on_write);
490     - /* Drop our io_end reference we got from init */
491     - ext4_put_io_end(mpd.io_submit.io_end);
492     + /*
493     + * Drop our io_end reference we got from init. We have
494     + * to be careful and use deferred io_end finishing if
495     + * we are still holding the transaction as we can
496     + * release the last reference to io_end which may end
497     + * up doing unwritten extent conversion.
498     + */
499     + if (handle) {
500     + ext4_put_io_end_defer(mpd.io_submit.io_end);
501     + ext4_journal_stop(handle);
502     + } else
503     + ext4_put_io_end(mpd.io_submit.io_end);
504    
505     if (ret == -ENOSPC && sbi->s_journal) {
506     /*
507     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
508     index c1ab3ec30423..7f42eda52523 100644
509     --- a/fs/ext4/mballoc.c
510     +++ b/fs/ext4/mballoc.c
511     @@ -2939,7 +2939,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
512     ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
513     "fs metadata", block, block+len);
514     /* File system mounted not to panic on error
515     - * Fix the bitmap and repeat the block allocation
516     + * Fix the bitmap and return EFSCORRUPTED
517     * We leak some of the blocks here.
518     */
519     ext4_lock_group(sb, ac->ac_b_ex.fe_group);
520     @@ -2948,7 +2948,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
521     ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
522     err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
523     if (!err)
524     - err = -EAGAIN;
525     + err = -EFSCORRUPTED;
526     goto out_err;
527     }
528    
529     @@ -4513,18 +4513,7 @@ repeat:
530     }
531     if (likely(ac->ac_status == AC_STATUS_FOUND)) {
532     *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
533     - if (*errp == -EAGAIN) {
534     - /*
535     - * drop the reference that we took
536     - * in ext4_mb_use_best_found
537     - */
538     - ext4_mb_release_context(ac);
539     - ac->ac_b_ex.fe_group = 0;
540     - ac->ac_b_ex.fe_start = 0;
541     - ac->ac_b_ex.fe_len = 0;
542     - ac->ac_status = AC_STATUS_CONTINUE;
543     - goto repeat;
544     - } else if (*errp) {
545     + if (*errp) {
546     ext4_discard_allocated_blocks(ac);
547     goto errout;
548     } else {
549     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
550     index 3822a5aedc61..639bd756a8d8 100644
551     --- a/fs/ext4/super.c
552     +++ b/fs/ext4/super.c
553     @@ -2278,6 +2278,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
554     while (es->s_last_orphan) {
555     struct inode *inode;
556    
557     + /*
558     + * We may have encountered an error during cleanup; if
559     + * so, skip the rest.
560     + */
561     + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
562     + jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
563     + es->s_last_orphan = 0;
564     + break;
565     + }
566     +
567     inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
568     if (IS_ERR(inode)) {
569     es->s_last_orphan = 0;
570     @@ -3416,6 +3426,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
571     goto failed_mount;
572     }
573    
574     + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
575     + ext4_msg(sb, KERN_ERR,
576     + "Number of reserved GDT blocks insanely large: %d",
577     + le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
578     + goto failed_mount;
579     + }
580     +
581     if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
582     err = bdev_dax_supported(sb, blocksize);
583     if (err)
584     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
585     index 9154f8679024..6cac3dc33521 100644
586     --- a/fs/fuse/file.c
587     +++ b/fs/fuse/file.c
588     @@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id)
589     fuse_sync_writes(inode);
590     inode_unlock(inode);
591    
592     + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
593     + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
594     + err = -ENOSPC;
595     + if (test_bit(AS_EIO, &file->f_mapping->flags) &&
596     + test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
597     + err = -EIO;
598     + if (err)
599     + return err;
600     +
601     req = fuse_get_req_nofail_nopages(fc, file);
602     memset(&inarg, 0, sizeof(inarg));
603     inarg.fh = ff->fh;
604     @@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
605     goto out;
606    
607     fuse_sync_writes(inode);
608     +
609     + /*
610     + * Due to implementation of fuse writeback
611     + * filemap_write_and_wait_range() does not catch errors.
612     + * We have to do this directly after fuse_sync_writes()
613     + */
614     + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
615     + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
616     + err = -ENOSPC;
617     + if (test_bit(AS_EIO, &file->f_mapping->flags) &&
618     + test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
619     + err = -EIO;
620     + if (err)
621     + goto out;
622     +
623     err = sync_inode_metadata(inode, 1);
624     if (err)
625     goto out;
626     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
627     index 9961d8432ce3..9b7cb37b4ba8 100644
628     --- a/fs/fuse/inode.c
629     +++ b/fs/fuse/inode.c
630     @@ -942,7 +942,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
631     arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
632     FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
633     FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
634     - FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
635     + FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
636     FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
637     FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
638     FUSE_PARALLEL_DIROPS;
639     diff --git a/fs/inode.c b/fs/inode.c
640     index 4ccbc21b30ce..9ea421948742 100644
641     --- a/fs/inode.c
642     +++ b/fs/inode.c
643     @@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
644     void address_space_init_once(struct address_space *mapping)
645     {
646     memset(mapping, 0, sizeof(*mapping));
647     - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
648     + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
649     spin_lock_init(&mapping->tree_lock);
650     init_rwsem(&mapping->i_mmap_rwsem);
651     INIT_LIST_HEAD(&mapping->private_list);
652     @@ -1740,8 +1740,8 @@ static int __remove_privs(struct dentry *dentry, int kill)
653     */
654     int file_remove_privs(struct file *file)
655     {
656     - struct dentry *dentry = file->f_path.dentry;
657     - struct inode *inode = d_inode(dentry);
658     + struct dentry *dentry = file_dentry(file);
659     + struct inode *inode = file_inode(file);
660     int kill;
661     int error = 0;
662    
663     @@ -1749,7 +1749,7 @@ int file_remove_privs(struct file *file)
664     if (IS_NOSEC(inode))
665     return 0;
666    
667     - kill = file_needs_remove_privs(file);
668     + kill = dentry_needs_remove_privs(dentry);
669     if (kill < 0)
670     return kill;
671     if (kill)
672     diff --git a/fs/ioctl.c b/fs/ioctl.c
673     index 116a333e9c77..0f56deb24ce6 100644
674     --- a/fs/ioctl.c
675     +++ b/fs/ioctl.c
676     @@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
677     goto out;
678     }
679    
680     + same->dest_count = count;
681     ret = vfs_dedupe_file_range(file, same);
682     if (ret)
683     goto out;
684     diff --git a/ipc/msg.c b/ipc/msg.c
685     index 1471db9a7e61..c6521c205cb4 100644
686     --- a/ipc/msg.c
687     +++ b/ipc/msg.c
688     @@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
689     rcu_read_lock();
690     ipc_lock_object(&msq->q_perm);
691    
692     - ipc_rcu_putref(msq, ipc_rcu_free);
693     + ipc_rcu_putref(msq, msg_rcu_free);
694     /* raced with RMID? */
695     if (!ipc_valid_object(&msq->q_perm)) {
696     err = -EIDRM;
697     diff --git a/ipc/sem.c b/ipc/sem.c
698     index b3757ea0694b..5d2f875e8e2e 100644
699     --- a/ipc/sem.c
700     +++ b/ipc/sem.c
701     @@ -449,7 +449,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
702     static inline void sem_lock_and_putref(struct sem_array *sma)
703     {
704     sem_lock(sma, NULL, -1);
705     - ipc_rcu_putref(sma, ipc_rcu_free);
706     + ipc_rcu_putref(sma, sem_rcu_free);
707     }
708    
709     static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
710     @@ -1392,7 +1392,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
711     rcu_read_unlock();
712     sem_io = ipc_alloc(sizeof(ushort)*nsems);
713     if (sem_io == NULL) {
714     - ipc_rcu_putref(sma, ipc_rcu_free);
715     + ipc_rcu_putref(sma, sem_rcu_free);
716     return -ENOMEM;
717     }
718    
719     @@ -1426,20 +1426,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
720     if (nsems > SEMMSL_FAST) {
721     sem_io = ipc_alloc(sizeof(ushort)*nsems);
722     if (sem_io == NULL) {
723     - ipc_rcu_putref(sma, ipc_rcu_free);
724     + ipc_rcu_putref(sma, sem_rcu_free);
725     return -ENOMEM;
726     }
727     }
728    
729     if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
730     - ipc_rcu_putref(sma, ipc_rcu_free);
731     + ipc_rcu_putref(sma, sem_rcu_free);
732     err = -EFAULT;
733     goto out_free;
734     }
735    
736     for (i = 0; i < nsems; i++) {
737     if (sem_io[i] > SEMVMX) {
738     - ipc_rcu_putref(sma, ipc_rcu_free);
739     + ipc_rcu_putref(sma, sem_rcu_free);
740     err = -ERANGE;
741     goto out_free;
742     }
743     @@ -1731,7 +1731,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
744     /* step 2: allocate new undo structure */
745     new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
746     if (!new) {
747     - ipc_rcu_putref(sma, ipc_rcu_free);
748     + ipc_rcu_putref(sma, sem_rcu_free);
749     return ERR_PTR(-ENOMEM);
750     }
751    
752     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
753     index 8b7d8459bb9d..bc7852f95443 100644
754     --- a/lib/radix-tree.c
755     +++ b/lib/radix-tree.c
756     @@ -274,10 +274,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
757    
758     /*
759     * Even if the caller has preloaded, try to allocate from the
760     - * cache first for the new node to get accounted.
761     + * cache first for the new node to get accounted to the memory
762     + * cgroup.
763     */
764     ret = kmem_cache_alloc(radix_tree_node_cachep,
765     - gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
766     + gfp_mask | __GFP_NOWARN);
767     if (ret)
768     goto out;
769    
770     @@ -300,8 +301,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
771     kmemleak_update_trace(ret);
772     goto out;
773     }
774     - ret = kmem_cache_alloc(radix_tree_node_cachep,
775     - gfp_mask | __GFP_ACCOUNT);
776     + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
777     out:
778     BUG_ON(radix_tree_is_internal_node(ret));
779     return ret;
780     @@ -348,6 +348,12 @@ static int __radix_tree_preload(gfp_t gfp_mask)
781     struct radix_tree_node *node;
782     int ret = -ENOMEM;
783    
784     + /*
785     + * Nodes preloaded by one cgroup can be be used by another cgroup, so
786     + * they should never be accounted to any particular memory cgroup.
787     + */
788     + gfp_mask &= ~__GFP_ACCOUNT;
789     +
790     preempt_disable();
791     rtp = this_cpu_ptr(&radix_tree_preloads);
792     while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
793     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
794     index 5339c89dff63..ca847d96a980 100644
795     --- a/mm/memcontrol.c
796     +++ b/mm/memcontrol.c
797     @@ -4083,14 +4083,32 @@ static struct cftype mem_cgroup_legacy_files[] = {
798    
799     static DEFINE_IDR(mem_cgroup_idr);
800    
801     -static void mem_cgroup_id_get(struct mem_cgroup *memcg)
802     +static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
803     {
804     - atomic_inc(&memcg->id.ref);
805     + atomic_add(n, &memcg->id.ref);
806     }
807    
808     -static void mem_cgroup_id_put(struct mem_cgroup *memcg)
809     +static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
810     {
811     - if (atomic_dec_and_test(&memcg->id.ref)) {
812     + while (!atomic_inc_not_zero(&memcg->id.ref)) {
813     + /*
814     + * The root cgroup cannot be destroyed, so it's refcount must
815     + * always be >= 1.
816     + */
817     + if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
818     + VM_BUG_ON(1);
819     + break;
820     + }
821     + memcg = parent_mem_cgroup(memcg);
822     + if (!memcg)
823     + memcg = root_mem_cgroup;
824     + }
825     + return memcg;
826     +}
827     +
828     +static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
829     +{
830     + if (atomic_sub_and_test(n, &memcg->id.ref)) {
831     idr_remove(&mem_cgroup_idr, memcg->id.id);
832     memcg->id.id = 0;
833    
834     @@ -4099,6 +4117,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
835     }
836     }
837    
838     +static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
839     +{
840     + mem_cgroup_id_get_many(memcg, 1);
841     +}
842     +
843     +static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
844     +{
845     + mem_cgroup_id_put_many(memcg, 1);
846     +}
847     +
848     /**
849     * mem_cgroup_from_id - look up a memcg from a memcg id
850     * @id: the memcg id to look up
851     @@ -4736,6 +4764,8 @@ static void __mem_cgroup_clear_mc(void)
852     if (!mem_cgroup_is_root(mc.from))
853     page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
854    
855     + mem_cgroup_id_put_many(mc.from, mc.moved_swap);
856     +
857     /*
858     * we charged both to->memory and to->memsw, so we
859     * should uncharge to->memory.
860     @@ -4743,9 +4773,9 @@ static void __mem_cgroup_clear_mc(void)
861     if (!mem_cgroup_is_root(mc.to))
862     page_counter_uncharge(&mc.to->memory, mc.moved_swap);
863    
864     - css_put_many(&mc.from->css, mc.moved_swap);
865     + mem_cgroup_id_get_many(mc.to, mc.moved_swap);
866     + css_put_many(&mc.to->css, mc.moved_swap);
867    
868     - /* we've already done css_get(mc.to) */
869     mc.moved_swap = 0;
870     }
871     memcg_oom_recover(from);
872     @@ -5805,7 +5835,7 @@ subsys_initcall(mem_cgroup_init);
873     */
874     void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
875     {
876     - struct mem_cgroup *memcg;
877     + struct mem_cgroup *memcg, *swap_memcg;
878     unsigned short oldid;
879    
880     VM_BUG_ON_PAGE(PageLRU(page), page);
881     @@ -5820,16 +5850,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
882     if (!memcg)
883     return;
884    
885     - mem_cgroup_id_get(memcg);
886     - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
887     + /*
888     + * In case the memcg owning these pages has been offlined and doesn't
889     + * have an ID allocated to it anymore, charge the closest online
890     + * ancestor for the swap instead and transfer the memory+swap charge.
891     + */
892     + swap_memcg = mem_cgroup_id_get_online(memcg);
893     + oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
894     VM_BUG_ON_PAGE(oldid, page);
895     - mem_cgroup_swap_statistics(memcg, true);
896     + mem_cgroup_swap_statistics(swap_memcg, true);
897    
898     page->mem_cgroup = NULL;
899    
900     if (!mem_cgroup_is_root(memcg))
901     page_counter_uncharge(&memcg->memory, 1);
902    
903     + if (memcg != swap_memcg) {
904     + if (!mem_cgroup_is_root(swap_memcg))
905     + page_counter_charge(&swap_memcg->memsw, 1);
906     + page_counter_uncharge(&memcg->memsw, 1);
907     + }
908     +
909     /*
910     * Interrupts should be disabled here because the caller holds the
911     * mapping->tree_lock lock which is taken with interrupts-off. It is
912     @@ -5868,11 +5909,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
913     if (!memcg)
914     return 0;
915    
916     + memcg = mem_cgroup_id_get_online(memcg);
917     +
918     if (!mem_cgroup_is_root(memcg) &&
919     - !page_counter_try_charge(&memcg->swap, 1, &counter))
920     + !page_counter_try_charge(&memcg->swap, 1, &counter)) {
921     + mem_cgroup_id_put(memcg);
922     return -ENOMEM;
923     + }
924    
925     - mem_cgroup_id_get(memcg);
926     oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
927     VM_BUG_ON_PAGE(oldid, page);
928     mem_cgroup_swap_statistics(memcg, true);
929     diff --git a/mm/mempool.c b/mm/mempool.c
930     index 8f65464da5de..47a659dedd44 100644
931     --- a/mm/mempool.c
932     +++ b/mm/mempool.c
933     @@ -306,7 +306,7 @@ EXPORT_SYMBOL(mempool_resize);
934     * returns NULL. Note that due to preallocation, this function
935     * *never* fails when called from process contexts. (it might
936     * fail if called from an IRQ context.)
937     - * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
938     + * Note: using __GFP_ZERO is not supported.
939     */
940     void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
941     {
942     @@ -315,27 +315,16 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
943     wait_queue_t wait;
944     gfp_t gfp_temp;
945    
946     - /* If oom killed, memory reserves are essential to prevent livelock */
947     - VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
948     - /* No element size to zero on allocation */
949     VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
950     -
951     might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
952    
953     + gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
954     gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
955     gfp_mask |= __GFP_NOWARN; /* failures are OK */
956    
957     gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
958    
959     repeat_alloc:
960     - if (likely(pool->curr_nr)) {
961     - /*
962     - * Don't allocate from emergency reserves if there are
963     - * elements available. This check is racy, but it will
964     - * be rechecked each loop.
965     - */
966     - gfp_temp |= __GFP_NOMEMALLOC;
967     - }
968    
969     element = pool->alloc(gfp_temp, pool->pool_data);
970     if (likely(element != NULL))
971     @@ -359,12 +348,11 @@ repeat_alloc:
972     * We use gfp mask w/o direct reclaim or IO for the first round. If
973     * alloc failed with that and @pool was empty, retry immediately.
974     */
975     - if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
976     + if (gfp_temp != gfp_mask) {
977     spin_unlock_irqrestore(&pool->lock, flags);
978     gfp_temp = gfp_mask;
979     goto repeat_alloc;
980     }
981     - gfp_temp = gfp_mask;
982    
983     /* We must not sleep if !__GFP_DIRECT_RECLAIM */
984     if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
985     diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
986     index 43d2cd862bc2..28d5ec269e48 100644
987     --- a/net/bridge/br_input.c
988     +++ b/net/bridge/br_input.c
989     @@ -288,6 +288,14 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
990     case 0x01: /* IEEE MAC (Pause) */
991     goto drop;
992    
993     + case 0x0E: /* 802.1AB LLDP */
994     + fwd_mask |= p->br->group_fwd_mask;
995     + if (fwd_mask & (1u << dest[5]))
996     + goto forward;
997     + *pskb = skb;
998     + __br_handle_local_finish(skb);
999     + return RX_HANDLER_PASS;
1000     +
1001     default:
1002     /* Allow selective forwarding for most other protocols */
1003     fwd_mask |= p->br->group_fwd_mask;
1004     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1005     index e00e972c4e6a..700b72ca5912 100644
1006     --- a/net/ipv4/tcp_output.c
1007     +++ b/net/ipv4/tcp_output.c
1008     @@ -236,7 +236,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
1009     /* Set window scaling on max possible window
1010     * See RFC1323 for an explanation of the limit to 14
1011     */
1012     - space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
1013     + space = max_t(u32, space, sysctl_tcp_rmem[2]);
1014     + space = max_t(u32, space, sysctl_rmem_max);
1015     space = min_t(u32, space, *window_clamp);
1016     while (space > 65535 && (*rcv_wscale) < 14) {
1017     space >>= 1;
1018     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1019     index 4aed8fc23d32..e61f7cd65d08 100644
1020     --- a/net/ipv4/udp.c
1021     +++ b/net/ipv4/udp.c
1022     @@ -1581,9 +1581,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1023     udp_lib_checksum_complete(skb))
1024     goto csum_error;
1025    
1026     - if (sk_filter(sk, skb))
1027     - goto drop;
1028     - if (unlikely(skb->len < sizeof(struct udphdr)))
1029     + if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
1030     goto drop;
1031    
1032     udp_csum_pull_header(skb);
1033     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1034     index 47f837a58e0a..047c75a798b1 100644
1035     --- a/net/ipv6/addrconf.c
1036     +++ b/net/ipv6/addrconf.c
1037     @@ -3562,6 +3562,10 @@ restart:
1038     if (state != INET6_IFADDR_STATE_DEAD) {
1039     __ipv6_ifa_notify(RTM_DELADDR, ifa);
1040     inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
1041     + } else {
1042     + if (idev->cnf.forwarding)
1043     + addrconf_leave_anycast(ifa);
1044     + addrconf_leave_solict(ifa->idev, &ifa->addr);
1045     }
1046    
1047     write_lock_bh(&idev->lock);
1048     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1049     index acc09705618b..42a2edf7c9ef 100644
1050     --- a/net/ipv6/udp.c
1051     +++ b/net/ipv6/udp.c
1052     @@ -618,9 +618,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1053     udp_lib_checksum_complete(skb))
1054     goto csum_error;
1055    
1056     - if (sk_filter(sk, skb))
1057     - goto drop;
1058     - if (unlikely(skb->len < sizeof(struct udphdr)))
1059     + if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
1060     goto drop;
1061    
1062     udp_csum_pull_header(skb);
1063     diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1064     index 923abd6b3064..8d2f7c9b491d 100644
1065     --- a/net/irda/af_irda.c
1066     +++ b/net/irda/af_irda.c
1067     @@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1068     }
1069    
1070     /* Check if we have opened a local TSAP */
1071     - if (!self->tsap)
1072     - irda_open_tsap(self, LSAP_ANY, addr->sir_name);
1073     + if (!self->tsap) {
1074     + err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
1075     + if (err)
1076     + goto out;
1077     + }
1078    
1079     /* Move to connecting socket, start sending Connect Requests */
1080     sock->state = SS_CONNECTING;
1081     diff --git a/net/sctp/input.c b/net/sctp/input.c
1082     index 47cf4604d19c..f093322560e6 100644
1083     --- a/net/sctp/input.c
1084     +++ b/net/sctp/input.c
1085     @@ -328,6 +328,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1086     */
1087    
1088     sk = rcvr->sk;
1089     + local_bh_disable();
1090     bh_lock_sock(sk);
1091    
1092     if (sock_owned_by_user(sk)) {
1093     @@ -339,6 +340,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1094     sctp_inq_push(inqueue, chunk);
1095    
1096     bh_unlock_sock(sk);
1097     + local_bh_enable();
1098    
1099     /* If the chunk was backloged again, don't drop refs */
1100     if (backloged)
1101     diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
1102     index 9d87bba0ff1d..b335ffcef0b9 100644
1103     --- a/net/sctp/inqueue.c
1104     +++ b/net/sctp/inqueue.c
1105     @@ -89,12 +89,10 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
1106     * Eventually, we should clean up inqueue to not rely
1107     * on the BH related data structures.
1108     */
1109     - local_bh_disable();
1110     list_add_tail(&chunk->list, &q->in_chunk_list);
1111     if (chunk->asoc)
1112     chunk->asoc->stats.ipackets++;
1113     q->immediate.func(&q->immediate);
1114     - local_bh_enable();
1115     }
1116    
1117     /* Peek at the next chunk on the inqeue. */
1118     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1119     index 67154b848aa9..7f5689a93de9 100644
1120     --- a/net/sctp/socket.c
1121     +++ b/net/sctp/socket.c
1122     @@ -4301,6 +4301,7 @@ int sctp_transport_walk_start(struct rhashtable_iter *iter)
1123    
1124     err = rhashtable_walk_start(iter);
1125     if (err && err != -EAGAIN) {
1126     + rhashtable_walk_stop(iter);
1127     rhashtable_walk_exit(iter);
1128     return err;
1129     }
1130     diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
1131     index ad4fa49ad1db..9068369f8a1b 100644
1132     --- a/security/apparmor/apparmorfs.c
1133     +++ b/security/apparmor/apparmorfs.c
1134     @@ -331,6 +331,7 @@ static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
1135     seq_printf(seq, "%.2x", profile->hash[i]);
1136     seq_puts(seq, "\n");
1137     }
1138     + aa_put_profile(profile);
1139    
1140     return 0;
1141     }