Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0218-4.9.119-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 15926 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 0940f11fa071..0723bbe1d4a7 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 118
9     +SUBLEVEL = 119
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
14     index 47fc1f1acff7..4d297d554e52 100644
15     --- a/drivers/i2c/busses/i2c-imx.c
16     +++ b/drivers/i2c/busses/i2c-imx.c
17     @@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
18     goto err_desc;
19     }
20    
21     + reinit_completion(&dma->cmd_complete);
22     txdesc->callback = i2c_imx_dma_callback;
23     txdesc->callback_param = i2c_imx;
24     if (dma_submit_error(dmaengine_submit(txdesc))) {
25     @@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
26     * The first byte must be transmitted by the CPU.
27     */
28     imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
29     - reinit_completion(&i2c_imx->dma->cmd_complete);
30     time_left = wait_for_completion_timeout(
31     &i2c_imx->dma->cmd_complete,
32     msecs_to_jiffies(DMA_TIMEOUT));
33     @@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
34     if (result)
35     return result;
36    
37     - reinit_completion(&i2c_imx->dma->cmd_complete);
38     time_left = wait_for_completion_timeout(
39     &i2c_imx->dma->cmd_complete,
40     msecs_to_jiffies(DMA_TIMEOUT));
41     diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
42     index 613074e963bb..e8e0fa58cb71 100644
43     --- a/drivers/infiniband/hw/hfi1/rc.c
44     +++ b/drivers/infiniband/hw/hfi1/rc.c
45     @@ -397,7 +397,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
46    
47     lockdep_assert_held(&qp->s_lock);
48     ps->s_txreq = get_txreq(ps->dev, qp);
49     - if (IS_ERR(ps->s_txreq))
50     + if (!ps->s_txreq)
51     goto bail_no_tx;
52    
53     ohdr = &ps->s_txreq->phdr.hdr.u.oth;
54     diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
55     index 5e6d1bac4914..de21128a0181 100644
56     --- a/drivers/infiniband/hw/hfi1/uc.c
57     +++ b/drivers/infiniband/hw/hfi1/uc.c
58     @@ -1,5 +1,5 @@
59     /*
60     - * Copyright(c) 2015, 2016 Intel Corporation.
61     + * Copyright(c) 2015 - 2018 Intel Corporation.
62     *
63     * This file is provided under a dual BSD/GPLv2 license. When using or
64     * redistributing this file, you may do so under either license.
65     @@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
66     int middle = 0;
67    
68     ps->s_txreq = get_txreq(ps->dev, qp);
69     - if (IS_ERR(ps->s_txreq))
70     + if (!ps->s_txreq)
71     goto bail_no_tx;
72    
73     if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
74     diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
75     index 97ae24b6314c..1a7ce1d740ce 100644
76     --- a/drivers/infiniband/hw/hfi1/ud.c
77     +++ b/drivers/infiniband/hw/hfi1/ud.c
78     @@ -1,5 +1,5 @@
79     /*
80     - * Copyright(c) 2015, 2016 Intel Corporation.
81     + * Copyright(c) 2015 - 2018 Intel Corporation.
82     *
83     * This file is provided under a dual BSD/GPLv2 license. When using or
84     * redistributing this file, you may do so under either license.
85     @@ -285,7 +285,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
86     u8 sc5;
87    
88     ps->s_txreq = get_txreq(ps->dev, qp);
89     - if (IS_ERR(ps->s_txreq))
90     + if (!ps->s_txreq)
91     goto bail_no_tx;
92    
93     if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
94     diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
95     index 094ab829ec42..d8a5bad49680 100644
96     --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
97     +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
98     @@ -1,5 +1,5 @@
99     /*
100     - * Copyright(c) 2016 Intel Corporation.
101     + * Copyright(c) 2016 - 2018 Intel Corporation.
102     *
103     * This file is provided under a dual BSD/GPLv2 license. When using or
104     * redistributing this file, you may do so under either license.
105     @@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
106     struct rvt_qp *qp)
107     __must_hold(&qp->s_lock)
108     {
109     - struct verbs_txreq *tx = ERR_PTR(-EBUSY);
110     + struct verbs_txreq *tx = NULL;
111    
112     write_seqlock(&dev->iowait_lock);
113     if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
114     diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
115     index 5660897593ba..31ded57592ee 100644
116     --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
117     +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
118     @@ -1,5 +1,5 @@
119     /*
120     - * Copyright(c) 2016 Intel Corporation.
121     + * Copyright(c) 2016 - 2018 Intel Corporation.
122     *
123     * This file is provided under a dual BSD/GPLv2 license. When using or
124     * redistributing this file, you may do so under either license.
125     @@ -82,7 +82,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
126     if (unlikely(!tx)) {
127     /* call slow path to get the lock */
128     tx = __get_txreq(dev, qp);
129     - if (IS_ERR(tx))
130     + if (!tx)
131     return tx;
132     }
133     tx->qp = qp;
134     diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
135     index d966d47c9e80..d38d379bb5c8 100644
136     --- a/drivers/pci/pci-acpi.c
137     +++ b/drivers/pci/pci-acpi.c
138     @@ -567,7 +567,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
139     union acpi_object *obj;
140     struct pci_host_bridge *bridge;
141    
142     - if (acpi_pci_disabled || !bus->bridge)
143     + if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
144     return;
145    
146     acpi_pci_slot_enumerate(bus);
147     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
148     index 34bbcfcae67c..5f66b6da65f2 100644
149     --- a/drivers/scsi/qla2xxx/qla_init.c
150     +++ b/drivers/scsi/qla2xxx/qla_init.c
151     @@ -329,11 +329,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
152    
153     wait_for_completion(&tm_iocb->u.tmf.comp);
154    
155     - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
156     - QLA_SUCCESS : QLA_FUNCTION_FAILED;
157     + rval = tm_iocb->u.tmf.data;
158    
159     - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
160     - ql_dbg(ql_dbg_taskm, vha, 0x8030,
161     + if (rval != QLA_SUCCESS) {
162     + ql_log(ql_log_warn, vha, 0x8030,
163     "TM IOCB failed (%x).\n", rval);
164     }
165    
166     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
167     index baccd116f864..c813c9b75a10 100644
168     --- a/drivers/scsi/qla2xxx/qla_os.c
169     +++ b/drivers/scsi/qla2xxx/qla_os.c
170     @@ -5218,8 +5218,9 @@ qla2x00_do_dpc(void *data)
171     }
172     }
173    
174     - if (test_and_clear_bit(ISP_ABORT_NEEDED,
175     - &base_vha->dpc_flags)) {
176     + if (test_and_clear_bit
177     + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
178     + !test_bit(UNLOADING, &base_vha->dpc_flags)) {
179    
180     ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
181     "ISP abort scheduled.\n");
182     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
183     index 03ac3ab4b3b4..2b96ca68dc10 100644
184     --- a/fs/btrfs/extent_io.c
185     +++ b/fs/btrfs/extent_io.c
186     @@ -4298,6 +4298,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
187     struct extent_map *em;
188     u64 start = page_offset(page);
189     u64 end = start + PAGE_SIZE - 1;
190     + struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
191    
192     if (gfpflags_allow_blocking(mask) &&
193     page->mapping->host->i_size > SZ_16M) {
194     @@ -4320,6 +4321,8 @@ int try_release_extent_mapping(struct extent_map_tree *map,
195     extent_map_end(em) - 1,
196     EXTENT_LOCKED | EXTENT_WRITEBACK,
197     0, NULL)) {
198     + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
199     + &btrfs_inode->runtime_flags);
200     remove_extent_mapping(map, em);
201     /* once for the rb tree */
202     free_extent_map(em);
203     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
204     index 41ef83471ea5..6cbb0f7ead2f 100644
205     --- a/fs/ext4/super.c
206     +++ b/fs/ext4/super.c
207     @@ -2231,7 +2231,7 @@ static int ext4_check_descriptors(struct super_block *sb,
208     struct ext4_sb_info *sbi = EXT4_SB(sb);
209     ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
210     ext4_fsblk_t last_block;
211     - ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
212     + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
213     ext4_fsblk_t block_bitmap;
214     ext4_fsblk_t inode_bitmap;
215     ext4_fsblk_t inode_table;
216     @@ -3941,13 +3941,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
217     goto failed_mount2;
218     }
219     }
220     + sbi->s_gdb_count = db_count;
221     if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
222     ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
223     ret = -EFSCORRUPTED;
224     goto failed_mount2;
225     }
226    
227     - sbi->s_gdb_count = db_count;
228     get_random_bytes(&sbi->s_next_generation, sizeof(u32));
229     spin_lock_init(&sbi->s_next_gen_lock);
230    
231     diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
232     index c60f3d32ee91..a6797986b625 100644
233     --- a/fs/jfs/xattr.c
234     +++ b/fs/jfs/xattr.c
235     @@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
236     if (size > PSIZE) {
237     /*
238     * To keep the rest of the code simple. Allocate a
239     - * contiguous buffer to work with
240     + * contiguous buffer to work with. Make the buffer large
241     + * enough to make use of the whole extent.
242     */
243     - ea_buf->xattr = kmalloc(size, GFP_KERNEL);
244     + ea_buf->max_size = (size + sb->s_blocksize - 1) &
245     + ~(sb->s_blocksize - 1);
246     +
247     + ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
248     if (ea_buf->xattr == NULL)
249     return -ENOMEM;
250    
251     ea_buf->flag = EA_MALLOC;
252     - ea_buf->max_size = (size + sb->s_blocksize - 1) &
253     - ~(sb->s_blocksize - 1);
254    
255     if (ea_size == 0)
256     return 0;
257     diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
258     index 4acc552e9279..19d0778ec382 100644
259     --- a/include/linux/ring_buffer.h
260     +++ b/include/linux/ring_buffer.h
261     @@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
262     void ring_buffer_record_off(struct ring_buffer *buffer);
263     void ring_buffer_record_on(struct ring_buffer *buffer);
264     int ring_buffer_record_is_on(struct ring_buffer *buffer);
265     +int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
266     void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
267     void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
268    
269     diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
270     index 2873baf5372a..5e6436741f96 100644
271     --- a/include/linux/thread_info.h
272     +++ b/include/linux/thread_info.h
273     @@ -59,12 +59,7 @@ extern long do_no_restart_syscall(struct restart_block *parm);
274    
275     #ifdef __KERNEL__
276    
277     -#ifdef CONFIG_DEBUG_STACK_USAGE
278     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
279     - __GFP_ZERO)
280     -#else
281     -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
282     -#endif
283     +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
284    
285     /*
286     * flag set/clear/test wrappers
287     diff --git a/kernel/fork.c b/kernel/fork.c
288     index 70e10cb49be0..2c98b987808d 100644
289     --- a/kernel/fork.c
290     +++ b/kernel/fork.c
291     @@ -184,6 +184,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
292     continue;
293     this_cpu_write(cached_stacks[i], NULL);
294    
295     + /* Clear stale pointers from reused stack. */
296     + memset(s->addr, 0, THREAD_SIZE);
297     +
298     tsk->stack_vm_area = s;
299     local_irq_enable();
300     return s->addr;
301     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
302     index 5927da596d42..e121645bb8a1 100644
303     --- a/kernel/irq/manage.c
304     +++ b/kernel/irq/manage.c
305     @@ -1026,6 +1026,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
306     if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
307     return 0;
308    
309     + /*
310     + * No further action required for interrupts which are requested as
311     + * threaded interrupts already
312     + */
313     + if (new->handler == irq_default_primary_handler)
314     + return 0;
315     +
316     new->flags |= IRQF_ONESHOT;
317    
318     /*
319     @@ -1033,7 +1040,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
320     * thread handler. We force thread them as well by creating a
321     * secondary action.
322     */
323     - if (new->handler != irq_default_primary_handler && new->thread_fn) {
324     + if (new->handler && new->thread_fn) {
325     /* Allocate the secondary action */
326     new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
327     if (!new->secondary)
328     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
329     index dae1a45be504..b6bebe28a3e0 100644
330     --- a/kernel/time/tick-sched.c
331     +++ b/kernel/time/tick-sched.c
332     @@ -665,7 +665,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
333    
334     static inline bool local_timer_softirq_pending(void)
335     {
336     - return local_softirq_pending() & TIMER_SOFTIRQ;
337     + return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
338     }
339    
340     static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
341     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
342     index 3e1d11f4fe44..dc29b600d2cb 100644
343     --- a/kernel/trace/ring_buffer.c
344     +++ b/kernel/trace/ring_buffer.c
345     @@ -3136,6 +3136,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
346     return !atomic_read(&buffer->record_disabled);
347     }
348    
349     +/**
350     + * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
351     + * @buffer: The ring buffer to see if write is set enabled
352     + *
353     + * Returns true if the ring buffer is set writable by ring_buffer_record_on().
354     + * Note that this does NOT mean it is in a writable state.
355     + *
356     + * It may return true when the ring buffer has been disabled by
357     + * ring_buffer_record_disable(), as that is a temporary disabling of
358     + * the ring buffer.
359     + */
360     +int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
361     +{
362     + return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
363     +}
364     +
365     /**
366     * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
367     * @buffer: The ring buffer to stop writes to.
368     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
369     index 15b02645ce8b..901c7f15f6e2 100644
370     --- a/kernel/trace/trace.c
371     +++ b/kernel/trace/trace.c
372     @@ -1323,6 +1323,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
373    
374     arch_spin_lock(&tr->max_lock);
375    
376     + /* Inherit the recordable setting from trace_buffer */
377     + if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
378     + ring_buffer_record_on(tr->max_buffer.buffer);
379     + else
380     + ring_buffer_record_off(tr->max_buffer.buffer);
381     +
382     buf = tr->trace_buffer.buffer;
383     tr->trace_buffer.buffer = tr->max_buffer.buffer;
384     tr->max_buffer.buffer = buf;
385     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
386     index a9be8df108b4..9d0b73aa649f 100644
387     --- a/net/ipv4/tcp_input.c
388     +++ b/net/ipv4/tcp_input.c
389     @@ -4370,6 +4370,23 @@ static bool tcp_try_coalesce(struct sock *sk,
390     return true;
391     }
392    
393     +static bool tcp_ooo_try_coalesce(struct sock *sk,
394     + struct sk_buff *to,
395     + struct sk_buff *from,
396     + bool *fragstolen)
397     +{
398     + bool res = tcp_try_coalesce(sk, to, from, fragstolen);
399     +
400     + /* In case tcp_drop() is called later, update to->gso_segs */
401     + if (res) {
402     + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
403     + max_t(u16, 1, skb_shinfo(from)->gso_segs);
404     +
405     + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
406     + }
407     + return res;
408     +}
409     +
410     static void tcp_drop(struct sock *sk, struct sk_buff *skb)
411     {
412     sk_drops_add(sk, skb);
413     @@ -4493,7 +4510,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
414     /* In the typical case, we are adding an skb to the end of the list.
415     * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
416     */
417     - if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
418     + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
419     + skb, &fragstolen)) {
420     coalesce_done:
421     tcp_grow_window(sk, skb);
422     kfree_skb_partial(skb, fragstolen);
423     @@ -4543,7 +4561,8 @@ coalesce_done:
424     tcp_drop(sk, skb1);
425     goto merge_right;
426     }
427     - } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
428     + } else if (tcp_ooo_try_coalesce(sk, skb1,
429     + skb, &fragstolen)) {
430     goto coalesce_done;
431     }
432     p = &parent->rb_right;
433     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
434     index 8d0aafbdbbc3..025487436438 100644
435     --- a/net/netlink/af_netlink.c
436     +++ b/net/netlink/af_netlink.c
437     @@ -986,6 +986,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
438     return err;
439     }
440    
441     + if (nlk->ngroups == 0)
442     + groups = 0;
443     + else if (nlk->ngroups < 8*sizeof(groups))
444     + groups &= (1UL << nlk->ngroups) - 1;
445     +
446     bound = nlk->bound;
447     if (bound) {
448     /* Ensure nlk->portid is up-to-date. */