Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.17/0102-4.17.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3142 - (hide annotations) (download)
Tue Jun 26 13:38:06 2018 UTC (5 years, 11 months ago) by niro
File size: 78454 byte(s)
-linux-4.17.3
1 niro 3142 diff --git a/Makefile b/Makefile
2     index f43cd522b175..31dc3a08295a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 17
9     -SUBLEVEL = 2
10     +SUBLEVEL = 3
11     EXTRAVERSION =
12     NAME = Merciless Moray
13    
14     diff --git a/arch/um/drivers/vector_transports.c b/arch/um/drivers/vector_transports.c
15     index 9065047f844b..77e4ebc206ae 100644
16     --- a/arch/um/drivers/vector_transports.c
17     +++ b/arch/um/drivers/vector_transports.c
18     @@ -120,7 +120,8 @@ static int raw_form_header(uint8_t *header,
19     skb,
20     vheader,
21     virtio_legacy_is_little_endian(),
22     - false
23     + false,
24     + 0
25     );
26    
27     return 0;
28     diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
29     index 08acd954f00e..74a9e06b6cfd 100644
30     --- a/arch/x86/include/asm/apic.h
31     +++ b/arch/x86/include/asm/apic.h
32     @@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
33    
34     #endif /* CONFIG_X86_LOCAL_APIC */
35    
36     +extern void apic_ack_irq(struct irq_data *data);
37     +
38     static inline void ack_APIC_irq(void)
39     {
40     /*
41     diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
42     index 22647a642e98..0af81b590a0c 100644
43     --- a/arch/x86/include/asm/trace/irq_vectors.h
44     +++ b/arch/x86/include/asm/trace/irq_vectors.h
45     @@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
46     TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
47     int ret),
48    
49     - TP_ARGS(irq, vector, ret, reserved),
50     + TP_ARGS(irq, vector, reserved, ret),
51    
52     TP_STRUCT__entry(
53     __field( unsigned int, irq )
54     diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
55     index 7553819c74c3..3982f79d2377 100644
56     --- a/arch/x86/kernel/apic/io_apic.c
57     +++ b/arch/x86/kernel/apic/io_apic.c
58     @@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
59     * intr-remapping table entry. Hence for the io-apic
60     * EOI we use the pin number.
61     */
62     - ack_APIC_irq();
63     + apic_ack_irq(irq_data);
64     eoi_ioapic_pin(data->entry.vector, data);
65     }
66    
67     diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
68     index bb6f7a2148d7..b708f597eee3 100644
69     --- a/arch/x86/kernel/apic/vector.c
70     +++ b/arch/x86/kernel/apic/vector.c
71     @@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
72     if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
73     return 0;
74    
75     + /*
76     + * Careful here. @apicd might either have move_in_progress set or
77     + * be enqueued for cleanup. Assigning a new vector would either
78     + * leave a stale vector on some CPU around or in case of a pending
79     + * cleanup corrupt the hlist.
80     + */
81     + if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
82     + return -EBUSY;
83     +
84     vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
85     if (vector > 0)
86     apic_update_vector(irqd, vector, cpu);
87     @@ -800,13 +809,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
88     return 1;
89     }
90    
91     -void apic_ack_edge(struct irq_data *irqd)
92     +void apic_ack_irq(struct irq_data *irqd)
93     {
94     - irq_complete_move(irqd_cfg(irqd));
95     irq_move_irq(irqd);
96     ack_APIC_irq();
97     }
98    
99     +void apic_ack_edge(struct irq_data *irqd)
100     +{
101     + irq_complete_move(irqd_cfg(irqd));
102     + apic_ack_irq(irqd);
103     +}
104     +
105     static struct irq_chip lapic_controller = {
106     .name = "APIC",
107     .irq_ack = apic_ack_edge,
108     diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
109     index 589b948e6e01..316a8875bd90 100644
110     --- a/arch/x86/kernel/cpu/intel_rdt.c
111     +++ b/arch/x86/kernel/cpu/intel_rdt.c
112     @@ -821,6 +821,8 @@ static __init void rdt_quirks(void)
113     case INTEL_FAM6_SKYLAKE_X:
114     if (boot_cpu_data.x86_stepping <= 4)
115     set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
116     + else
117     + set_rdt_options("!l3cat");
118     }
119     }
120    
121     diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
122     index 475cb4f5f14f..c805a06e14c3 100644
123     --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
124     +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
125     @@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
126    
127     static u8 n_banks;
128    
129     -#define MAX_FLAG_OPT_SIZE 3
130     +#define MAX_FLAG_OPT_SIZE 4
131     #define NBCFG 0x44
132    
133     enum injection_type {
134     diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
135     index e4cb9f4cde8a..fc13cbbb2dce 100644
136     --- a/arch/x86/platform/uv/uv_irq.c
137     +++ b/arch/x86/platform/uv/uv_irq.c
138     @@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
139    
140     static void uv_noop(struct irq_data *data) { }
141    
142     -static void uv_ack_apic(struct irq_data *data)
143     -{
144     - ack_APIC_irq();
145     -}
146     -
147     static int
148     uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
149     bool force)
150     @@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
151     .name = "UV-CORE",
152     .irq_mask = uv_noop,
153     .irq_unmask = uv_noop,
154     - .irq_eoi = uv_ack_apic,
155     + .irq_eoi = apic_ack_irq,
156     .irq_set_affinity = uv_set_irq_affinity,
157     };
158    
159     diff --git a/block/blk-mq.c b/block/blk-mq.c
160     index 9ce9cac16c3f..90ffd8151c57 100644
161     --- a/block/blk-mq.c
162     +++ b/block/blk-mq.c
163     @@ -2473,7 +2473,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
164    
165     mutex_lock(&set->tag_list_lock);
166     list_del_rcu(&q->tag_set_list);
167     - INIT_LIST_HEAD(&q->tag_set_list);
168     if (list_is_singular(&set->tag_list)) {
169     /* just transitioned to unshared */
170     set->flags &= ~BLK_MQ_F_TAG_SHARED;
171     @@ -2481,8 +2480,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
172     blk_mq_update_tag_set_depth(set, false);
173     }
174     mutex_unlock(&set->tag_list_lock);
175     -
176     synchronize_rcu();
177     + INIT_LIST_HEAD(&q->tag_set_list);
178     }
179    
180     static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
181     diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
182     index 68422afc365f..bc5f05906bd1 100644
183     --- a/drivers/acpi/acpica/psloop.c
184     +++ b/drivers/acpi/acpica/psloop.c
185     @@ -515,6 +515,22 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
186     if (ACPI_FAILURE(status)) {
187     return_ACPI_STATUS(status);
188     }
189     + if (walk_state->opcode == AML_SCOPE_OP) {
190     + /*
191     + * If the scope op fails to parse, skip the body of the
192     + * scope op because the parse failure indicates that the
193     + * device may not exist.
194     + */
195     + walk_state->parser_state.aml =
196     + walk_state->aml + 1;
197     + walk_state->parser_state.aml =
198     + acpi_ps_get_next_package_end
199     + (&walk_state->parser_state);
200     + walk_state->aml =
201     + walk_state->parser_state.aml;
202     + ACPI_ERROR((AE_INFO,
203     + "Skipping Scope block"));
204     + }
205    
206     continue;
207     }
208     @@ -557,7 +573,40 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
209     if (ACPI_FAILURE(status)) {
210     return_ACPI_STATUS(status);
211     }
212     -
213     + if ((walk_state->control_state) &&
214     + ((walk_state->control_state->control.
215     + opcode == AML_IF_OP)
216     + || (walk_state->control_state->control.
217     + opcode == AML_WHILE_OP))) {
218     + /*
219     + * If the if/while op fails to parse, we will skip parsing
220     + * the body of the op.
221     + */
222     + parser_state->aml =
223     + walk_state->control_state->control.
224     + aml_predicate_start + 1;
225     + parser_state->aml =
226     + acpi_ps_get_next_package_end
227     + (parser_state);
228     + walk_state->aml = parser_state->aml;
229     +
230     + ACPI_ERROR((AE_INFO,
231     + "Skipping While/If block"));
232     + if (*walk_state->aml == AML_ELSE_OP) {
233     + ACPI_ERROR((AE_INFO,
234     + "Skipping Else block"));
235     + walk_state->parser_state.aml =
236     + walk_state->aml + 1;
237     + walk_state->parser_state.aml =
238     + acpi_ps_get_next_package_end
239     + (parser_state);
240     + walk_state->aml =
241     + parser_state->aml;
242     + }
243     + ACPI_FREE(acpi_ut_pop_generic_state
244     + (&walk_state->control_state));
245     + }
246     + op = NULL;
247     continue;
248     }
249     }
250     diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
251     index 7d9d0151ee54..3138e7a00da8 100644
252     --- a/drivers/acpi/acpica/psobject.c
253     +++ b/drivers/acpi/acpica/psobject.c
254     @@ -12,6 +12,7 @@
255     #include "acparser.h"
256     #include "amlcode.h"
257     #include "acconvert.h"
258     +#include "acnamesp.h"
259    
260     #define _COMPONENT ACPI_PARSER
261     ACPI_MODULE_NAME("psobject")
262     @@ -549,6 +550,21 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
263    
264     do {
265     if (*op) {
266     + /*
267     + * These Opcodes need to be removed from the namespace because they
268     + * get created even if these opcodes cannot be created due to
269     + * errors.
270     + */
271     + if (((*op)->common.aml_opcode == AML_REGION_OP)
272     + || ((*op)->common.aml_opcode ==
273     + AML_DATA_REGION_OP)) {
274     + acpi_ns_delete_children((*op)->common.
275     + node);
276     + acpi_ns_remove_node((*op)->common.node);
277     + (*op)->common.node = NULL;
278     + acpi_ps_delete_parse_tree(*op);
279     + }
280     +
281     status2 =
282     acpi_ps_complete_this_op(walk_state, *op);
283     if (ACPI_FAILURE(status2)) {
284     @@ -574,6 +590,20 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
285     #endif
286     walk_state->prev_op = NULL;
287     walk_state->prev_arg_types = walk_state->arg_types;
288     +
289     + if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) {
290     + /*
291     + * There was something that went wrong while executing code at the
292     + * module-level. We need to skip parsing whatever caused the
293     + * error and keep going. One runtime error during the table load
294     + * should not cause the entire table to not be loaded. This is
295     + * because there could be correct AML beyond the parts that caused
296     + * the runtime error.
297     + */
298     + ACPI_ERROR((AE_INFO,
299     + "Ignore error and continue table load"));
300     + return_ACPI_STATUS(AE_OK);
301     + }
302     return_ACPI_STATUS(status);
303     }
304    
305     diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
306     index 12d4a0f6b8d2..5a64ddaed8a3 100644
307     --- a/drivers/acpi/acpica/uterror.c
308     +++ b/drivers/acpi/acpica/uterror.c
309     @@ -182,20 +182,20 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
310     switch (lookup_status) {
311     case AE_ALREADY_EXISTS:
312    
313     - acpi_os_printf(ACPI_MSG_BIOS_ERROR);
314     + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
315     message = "Failure creating";
316     break;
317    
318     case AE_NOT_FOUND:
319    
320     - acpi_os_printf(ACPI_MSG_BIOS_ERROR);
321     - message = "Failure looking up";
322     + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
323     + message = "Could not resolve";
324     break;
325    
326     default:
327    
328     - acpi_os_printf(ACPI_MSG_ERROR);
329     - message = "Failure looking up";
330     + acpi_os_printf("\n" ACPI_MSG_ERROR);
331     + message = "Failure resolving";
332     break;
333     }
334    
335     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
336     index 346b163f6e89..9bfd2f7e4542 100644
337     --- a/drivers/ata/libata-core.c
338     +++ b/drivers/ata/libata-core.c
339     @@ -4557,9 +4557,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
340     { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
341     { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
342    
343     - /* Sandisk devices which are known to not handle LPM well */
344     - { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
345     -
346     /* devices that don't properly handle queued TRIM commands */
347     { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
348     ATA_HORKAGE_ZERO_AFTER_TRIM, },
349     diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
350     index de4ddd0e8550..b3ed8f9953a8 100644
351     --- a/drivers/ata/libata-zpodd.c
352     +++ b/drivers/ata/libata-zpodd.c
353     @@ -35,7 +35,7 @@ struct zpodd {
354     static int eject_tray(struct ata_device *dev)
355     {
356     struct ata_taskfile tf;
357     - static const char cdb[] = { GPCMD_START_STOP_UNIT,
358     + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
359     0, 0, 0,
360     0x02, /* LoEj */
361     0, 0, 0, 0, 0, 0, 0,
362     diff --git a/drivers/base/core.c b/drivers/base/core.c
363     index b610816eb887..d680fd030316 100644
364     --- a/drivers/base/core.c
365     +++ b/drivers/base/core.c
366     @@ -1467,7 +1467,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
367    
368     dir = kzalloc(sizeof(*dir), GFP_KERNEL);
369     if (!dir)
370     - return NULL;
371     + return ERR_PTR(-ENOMEM);
372    
373     dir->class = class;
374     kobject_init(&dir->kobj, &class_dir_ktype);
375     @@ -1477,7 +1477,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
376     retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
377     if (retval < 0) {
378     kobject_put(&dir->kobj);
379     - return NULL;
380     + return ERR_PTR(retval);
381     }
382     return &dir->kobj;
383     }
384     @@ -1784,6 +1784,10 @@ int device_add(struct device *dev)
385    
386     parent = get_device(dev->parent);
387     kobj = get_device_parent(dev, parent);
388     + if (IS_ERR(kobj)) {
389     + error = PTR_ERR(kobj);
390     + goto parent_error;
391     + }
392     if (kobj)
393     dev->kobj.parent = kobj;
394    
395     @@ -1882,6 +1886,7 @@ int device_add(struct device *dev)
396     kobject_del(&dev->kobj);
397     Error:
398     cleanup_glue_dir(dev, glue_dir);
399     +parent_error:
400     put_device(parent);
401     name_error:
402     kfree(dev->p);
403     @@ -2701,6 +2706,11 @@ int device_move(struct device *dev, struct device *new_parent,
404     device_pm_lock();
405     new_parent = get_device(new_parent);
406     new_parent_kobj = get_device_parent(dev, new_parent);
407     + if (IS_ERR(new_parent_kobj)) {
408     + error = PTR_ERR(new_parent_kobj);
409     + put_device(new_parent);
410     + goto out;
411     + }
412    
413     pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
414     __func__, new_parent ? dev_name(new_parent) : "<NULL>");
415     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
416     index afbc202ca6fd..64278f472efe 100644
417     --- a/drivers/block/nbd.c
418     +++ b/drivers/block/nbd.c
419     @@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = {
420     static void nbd_dev_remove(struct nbd_device *nbd)
421     {
422     struct gendisk *disk = nbd->disk;
423     + struct request_queue *q;
424     +
425     if (disk) {
426     + q = disk->queue;
427     del_gendisk(disk);
428     - blk_cleanup_queue(disk->queue);
429     + blk_cleanup_queue(q);
430     blk_mq_free_tag_set(&nbd->tag_set);
431     disk->private_data = NULL;
432     put_disk(disk);
433     @@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd)
434     static void nbd_size_update(struct nbd_device *nbd)
435     {
436     struct nbd_config *config = nbd->config;
437     + struct block_device *bdev = bdget_disk(nbd->disk, 0);
438     +
439     blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
440     blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
441     set_capacity(nbd->disk, config->bytesize >> 9);
442     + if (bdev) {
443     + if (bdev->bd_disk)
444     + bd_set_size(bdev, config->bytesize);
445     + else
446     + bdev->bd_invalidated = 1;
447     + bdput(bdev);
448     + }
449     kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
450     }
451    
452     @@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
453     struct nbd_config *config = nbd->config;
454     config->blksize = blocksize;
455     config->bytesize = blocksize * nr_blocks;
456     + if (nbd->task_recv != NULL)
457     + nbd_size_update(nbd);
458     }
459    
460     static void nbd_complete_rq(struct request *req)
461     @@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
462     if (ret)
463     return ret;
464    
465     - bd_set_size(bdev, config->bytesize);
466     if (max_part)
467     bdev->bd_invalidated = 1;
468     mutex_unlock(&nbd->config_lock);
469     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
470     index 075d18f6ba7a..54d4c0f999ec 100644
471     --- a/drivers/cpufreq/cpufreq.c
472     +++ b/drivers/cpufreq/cpufreq.c
473     @@ -696,6 +696,8 @@ static ssize_t store_##file_name \
474     struct cpufreq_policy new_policy; \
475     \
476     memcpy(&new_policy, policy, sizeof(*policy)); \
477     + new_policy.min = policy->user_policy.min; \
478     + new_policy.max = policy->user_policy.max; \
479     \
480     ret = sscanf(buf, "%u", &new_policy.object); \
481     if (ret != 1) \
482     diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
483     index ca38229b045a..43e14bb512c8 100644
484     --- a/drivers/cpufreq/cpufreq_governor.c
485     +++ b/drivers/cpufreq/cpufreq_governor.c
486     @@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
487     * calls, so the previous load value can be used then.
488     */
489     load = j_cdbs->prev_load;
490     - } else if (unlikely(time_elapsed > 2 * sampling_rate &&
491     + } else if (unlikely((int)idle_time > 2 * sampling_rate &&
492     j_cdbs->prev_load)) {
493     /*
494     * If the CPU had gone completely idle and a task has
495     @@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
496     * clear prev_load to guarantee that the load will be
497     * computed again next time.
498     *
499     - * Detecting this situation is easy: the governor's
500     - * utilization update handler would not have run during
501     - * CPU-idle periods. Hence, an unusually large
502     - * 'time_elapsed' (as compared to the sampling rate)
503     + * Detecting this situation is easy: an unusually large
504     + * 'idle_time' (as compared to the sampling rate)
505     * indicates this scenario.
506     */
507     load = j_cdbs->prev_load;
508     @@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
509     j_cdbs->prev_load = load;
510     }
511    
512     - if (time_elapsed > 2 * sampling_rate) {
513     - unsigned int periods = time_elapsed / sampling_rate;
514     + if (unlikely((int)idle_time > 2 * sampling_rate)) {
515     + unsigned int periods = idle_time / sampling_rate;
516    
517     if (periods < idle_periods)
518     idle_periods = periods;
519     diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
520     index 6ba709b6f095..896caba5dfe5 100644
521     --- a/drivers/cpufreq/ti-cpufreq.c
522     +++ b/drivers/cpufreq/ti-cpufreq.c
523     @@ -226,7 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
524     opp_data->cpu_dev = get_cpu_device(0);
525     if (!opp_data->cpu_dev) {
526     pr_err("%s: Failed to get device for CPU0\n", __func__);
527     - ret = ENODEV;
528     + ret = -ENODEV;
529     goto free_opp_data;
530     }
531    
532     diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
533     index 582e449be9fe..a2c53ea3b5ed 100644
534     --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
535     +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
536     @@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
537     kfree(ishtp_dev);
538     }
539    
540     -#ifdef CONFIG_PM
541     -static struct device *ish_resume_device;
542     +static struct device __maybe_unused *ish_resume_device;
543    
544     /* 50ms to get resume response */
545     #define WAIT_FOR_RESUME_ACK_MS 50
546     @@ -220,7 +219,7 @@ static struct device *ish_resume_device;
547     * in that case a simple resume message is enough, others we need
548     * a reset sequence.
549     */
550     -static void ish_resume_handler(struct work_struct *work)
551     +static void __maybe_unused ish_resume_handler(struct work_struct *work)
552     {
553     struct pci_dev *pdev = to_pci_dev(ish_resume_device);
554     struct ishtp_device *dev = pci_get_drvdata(pdev);
555     @@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
556     *
557     * Return: 0 to the pm core
558     */
559     -static int ish_suspend(struct device *device)
560     +static int __maybe_unused ish_suspend(struct device *device)
561     {
562     struct pci_dev *pdev = to_pci_dev(device);
563     struct ishtp_device *dev = pci_get_drvdata(pdev);
564     @@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
565     return 0;
566     }
567    
568     -static DECLARE_WORK(resume_work, ish_resume_handler);
569     +static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
570     /**
571     * ish_resume() - ISH resume callback
572     * @device: device pointer
573     @@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
574     *
575     * Return: 0 to the pm core
576     */
577     -static int ish_resume(struct device *device)
578     +static int __maybe_unused ish_resume(struct device *device)
579     {
580     struct pci_dev *pdev = to_pci_dev(device);
581     struct ishtp_device *dev = pci_get_drvdata(pdev);
582     @@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
583     return 0;
584     }
585    
586     -static const struct dev_pm_ops ish_pm_ops = {
587     - .suspend = ish_suspend,
588     - .resume = ish_resume,
589     -};
590     -#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
591     -#else
592     -#define ISHTP_ISH_PM_OPS NULL
593     -#endif /* CONFIG_PM */
594     +static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
595    
596     static struct pci_driver ish_driver = {
597     .name = KBUILD_MODNAME,
598     .id_table = ish_pci_tbl,
599     .probe = ish_probe,
600     .remove = ish_remove,
601     - .driver.pm = ISHTP_ISH_PM_OPS,
602     + .driver.pm = &ish_pm_ops,
603     };
604    
605     module_pci_driver(ish_driver);
606     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
607     index ee7a37eb159a..545986cfb978 100644
608     --- a/drivers/hid/wacom_sys.c
609     +++ b/drivers/hid/wacom_sys.c
610     @@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
611     }
612     }
613    
614     + /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
615     + if (hdev->vendor == USB_VENDOR_ID_WACOM &&
616     + hdev->product == 0x0358 &&
617     + WACOM_PEN_FIELD(field) &&
618     + wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
619     + field->logical_maximum = 43200;
620     + }
621     +
622     switch (usage->hid) {
623     case HID_GD_X:
624     features->x_max = field->logical_maximum;
625     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
626     index 8fb8c737fffe..b0b30a568db7 100644
627     --- a/drivers/iommu/amd_iommu.c
628     +++ b/drivers/iommu/amd_iommu.c
629     @@ -4379,7 +4379,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
630    
631     static struct irq_chip amd_ir_chip = {
632     .name = "AMD-IR",
633     - .irq_ack = ir_ack_apic_edge,
634     + .irq_ack = apic_ack_irq,
635     .irq_set_affinity = amd_ir_set_affinity,
636     .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
637     .irq_compose_msi_msg = ir_compose_msi_msg,
638     diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
639     index 3062a154a9fb..967450bd421a 100644
640     --- a/drivers/iommu/intel_irq_remapping.c
641     +++ b/drivers/iommu/intel_irq_remapping.c
642     @@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
643    
644     static struct irq_chip intel_ir_chip = {
645     .name = "INTEL-IR",
646     - .irq_ack = ir_ack_apic_edge,
647     + .irq_ack = apic_ack_irq,
648     .irq_set_affinity = intel_ir_set_affinity,
649     .irq_compose_msi_msg = intel_ir_compose_msi_msg,
650     .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
651     diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
652     index 496deee3ae3a..7d0f3074d41d 100644
653     --- a/drivers/iommu/irq_remapping.c
654     +++ b/drivers/iommu/irq_remapping.c
655     @@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
656     panic(msg);
657     }
658    
659     -void ir_ack_apic_edge(struct irq_data *data)
660     -{
661     - ack_APIC_irq();
662     -}
663     -
664     /**
665     * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
666     * device serving request @info
667     diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
668     index 039c7af7b190..0afef6e43be4 100644
669     --- a/drivers/iommu/irq_remapping.h
670     +++ b/drivers/iommu/irq_remapping.h
671     @@ -65,8 +65,6 @@ struct irq_remap_ops {
672     extern struct irq_remap_ops intel_irq_remap_ops;
673     extern struct irq_remap_ops amd_iommu_irq_ops;
674    
675     -extern void ir_ack_apic_edge(struct irq_data *data);
676     -
677     #else /* CONFIG_IRQ_REMAP */
678    
679     #define irq_remapping_enabled 0
680     diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
681     index b67be33bd62f..cea7b2d2e60a 100644
682     --- a/drivers/media/rc/rc-main.c
683     +++ b/drivers/media/rc/rc-main.c
684     @@ -1860,6 +1860,8 @@ int rc_register_device(struct rc_dev *dev)
685     dev->device_name ?: "Unspecified device", path ?: "N/A");
686     kfree(path);
687    
688     + dev->registered = true;
689     +
690     if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
691     rc = rc_setup_rx_device(dev);
692     if (rc)
693     @@ -1879,8 +1881,6 @@ int rc_register_device(struct rc_dev *dev)
694     goto out_lirc;
695     }
696    
697     - dev->registered = true;
698     -
699     dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
700     dev->driver_name ? dev->driver_name : "unknown");
701    
702     diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
703     index 102594ec3e97..a36b4fb949fa 100644
704     --- a/drivers/media/usb/uvc/uvc_ctrl.c
705     +++ b/drivers/media/usb/uvc/uvc_ctrl.c
706     @@ -1607,14 +1607,12 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
707     ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
708     info->selector, data, 1);
709     if (!ret)
710     - info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
711     - | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF
712     - | (data[0] & UVC_CONTROL_CAP_GET ?
713     - UVC_CTRL_FLAG_GET_CUR : 0)
714     - | (data[0] & UVC_CONTROL_CAP_SET ?
715     - UVC_CTRL_FLAG_SET_CUR : 0)
716     - | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
717     - UVC_CTRL_FLAG_AUTO_UPDATE : 0);
718     + info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
719     + UVC_CTRL_FLAG_GET_CUR : 0)
720     + | (data[0] & UVC_CONTROL_CAP_SET ?
721     + UVC_CTRL_FLAG_SET_CUR : 0)
722     + | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
723     + UVC_CTRL_FLAG_AUTO_UPDATE : 0);
724    
725     kfree(data);
726     return ret;
727     @@ -1689,6 +1687,9 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
728    
729     info->size = le16_to_cpup((__le16 *)data);
730    
731     + info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
732     + | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF;
733     +
734     ret = uvc_ctrl_get_flags(dev, ctrl, info);
735     if (ret < 0) {
736     uvc_trace(UVC_TRACE_CONTROL,
737     diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
738     index 58c705f24f96..b594bae1adbd 100644
739     --- a/drivers/net/bonding/bond_options.c
740     +++ b/drivers/net/bonding/bond_options.c
741     @@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
742     slave->dev->name);
743     rcu_assign_pointer(bond->primary_slave, slave);
744     strcpy(bond->params.primary, slave->dev->name);
745     + bond->force_primary = true;
746     bond_select_active_slave(bond);
747     goto out;
748     }
749     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
750     index a50e08bb4748..750007513f9d 100644
751     --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
752     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
753     @@ -267,14 +267,13 @@ static int aq_pci_probe(struct pci_dev *pdev,
754     numvecs = min(numvecs, num_online_cpus());
755     /*enable interrupts */
756     #if !AQ_CFG_FORCE_LEGACY_INT
757     - numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
758     - PCI_IRQ_MSIX | PCI_IRQ_MSI |
759     - PCI_IRQ_LEGACY);
760     + err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
761     + PCI_IRQ_MSIX | PCI_IRQ_MSI |
762     + PCI_IRQ_LEGACY);
763    
764     - if (numvecs < 0) {
765     - err = numvecs;
766     + if (err < 0)
767     goto err_hwinit;
768     - }
769     + numvecs = err;
770     #endif
771     self->irqvecs = numvecs;
772    
773     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
774     index da07ccdf84bf..eb8dccd24abf 100644
775     --- a/drivers/net/hyperv/netvsc_drv.c
776     +++ b/drivers/net/hyperv/netvsc_drv.c
777     @@ -126,8 +126,10 @@ static int netvsc_open(struct net_device *net)
778     }
779    
780     rdev = nvdev->extension;
781     - if (!rdev->link_state)
782     + if (!rdev->link_state) {
783     netif_carrier_on(net);
784     + netif_tx_wake_all_queues(net);
785     + }
786    
787     if (vf_netdev) {
788     /* Setting synthetic device up transparently sets
789     diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
790     index cd09c3af2117..6e8e42361fd5 100644
791     --- a/drivers/net/phy/dp83848.c
792     +++ b/drivers/net/phy/dp83848.c
793     @@ -74,6 +74,25 @@ static int dp83848_config_intr(struct phy_device *phydev)
794     return phy_write(phydev, DP83848_MICR, control);
795     }
796    
797     +static int dp83848_config_init(struct phy_device *phydev)
798     +{
799     + int err;
800     + int val;
801     +
802     + err = genphy_config_init(phydev);
803     + if (err < 0)
804     + return err;
805     +
806     + /* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
807     + * we check initial value of BMCR Auto negotiation enable bit
808     + */
809     + val = phy_read(phydev, MII_BMCR);
810     + if (!(val & BMCR_ANENABLE))
811     + phydev->autoneg = AUTONEG_DISABLE;
812     +
813     + return 0;
814     +}
815     +
816     static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
817     { TI_DP83848C_PHY_ID, 0xfffffff0 },
818     { NS_DP83848C_PHY_ID, 0xfffffff0 },
819     @@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
820     };
821     MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
822    
823     -#define DP83848_PHY_DRIVER(_id, _name) \
824     +#define DP83848_PHY_DRIVER(_id, _name, _config_init) \
825     { \
826     .phy_id = _id, \
827     .phy_id_mask = 0xfffffff0, \
828     @@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
829     .flags = PHY_HAS_INTERRUPT, \
830     \
831     .soft_reset = genphy_soft_reset, \
832     - .config_init = genphy_config_init, \
833     + .config_init = _config_init, \
834     .suspend = genphy_suspend, \
835     .resume = genphy_resume, \
836     \
837     @@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
838     }
839    
840     static struct phy_driver dp83848_driver[] = {
841     - DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
842     - DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
843     - DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
844     - DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
845     + DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
846     + genphy_config_init),
847     + DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
848     + genphy_config_init),
849     + DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
850     + dp83848_config_init),
851     + DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
852     + genphy_config_init),
853     };
854     module_phy_driver(dp83848_driver);
855    
856     diff --git a/drivers/net/tap.c b/drivers/net/tap.c
857     index 9b6cb780affe..f0f7cd977667 100644
858     --- a/drivers/net/tap.c
859     +++ b/drivers/net/tap.c
860     @@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
861     int total;
862    
863     if (q->flags & IFF_VNET_HDR) {
864     + int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
865     struct virtio_net_hdr vnet_hdr;
866     +
867     vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
868     if (iov_iter_count(iter) < vnet_hdr_len)
869     return -EINVAL;
870    
871     if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
872     - tap_is_little_endian(q), true))
873     + tap_is_little_endian(q), true,
874     + vlan_hlen))
875     BUG();
876    
877     if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
878     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
879     index 23e9eb66197f..409eb8b74740 100644
880     --- a/drivers/net/tun.c
881     +++ b/drivers/net/tun.c
882     @@ -2078,7 +2078,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
883     return -EINVAL;
884    
885     if (virtio_net_hdr_from_skb(skb, &gso,
886     - tun_is_little_endian(tun), true)) {
887     + tun_is_little_endian(tun), true,
888     + vlan_hlen)) {
889     struct skb_shared_info *sinfo = skb_shinfo(skb);
890     pr_err("unexpected GSO type: "
891     "0x%x, gso_size %d, hdr_len %d\n",
892     diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
893     index 90d07ed224d5..b0e8b9613054 100644
894     --- a/drivers/net/usb/cdc_ncm.c
895     +++ b/drivers/net/usb/cdc_ncm.c
896     @@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
897     * accordingly. Otherwise, we should check here.
898     */
899     if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
900     - delayed_ndp_size = ctx->max_ndp_size;
901     + delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
902     else
903     delayed_ndp_size = 0;
904    
905     @@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
906     /* If requested, put NDP at end of frame. */
907     if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
908     nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
909     - cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
910     + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
911     nth16->wNdpIndex = cpu_to_le16(skb_out->len);
912     skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
913    
914     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
915     index 032e1ac10a30..8c7207535179 100644
916     --- a/drivers/net/virtio_net.c
917     +++ b/drivers/net/virtio_net.c
918     @@ -1358,7 +1358,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
919     hdr = skb_vnet_hdr(skb);
920    
921     if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
922     - virtio_is_little_endian(vi->vdev), false))
923     + virtio_is_little_endian(vi->vdev), false,
924     + 0))
925     BUG();
926    
927     if (vi->mergeable_rx_bufs)
928     diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
929     index 1fec8e3a6b35..6afcfd1f0eec 100644
930     --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
931     +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
932     @@ -8,6 +8,7 @@
933     * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
934     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
935     * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
936     + * Copyright(c) 2018 Intel Corporation
937     *
938     * This program is free software; you can redistribute it and/or modify
939     * it under the terms of version 2 of the GNU General Public License as
940     @@ -30,6 +31,7 @@
941     * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
942     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
943     * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
944     + * Copyright(c) 2018 Intel Corporation
945     * All rights reserved.
946     *
947     * Redistribution and use in source and binary forms, with or without
948     @@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
949     static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
950     const struct fw_img *image)
951     {
952     - int sec_idx, idx;
953     + int sec_idx, idx, ret;
954     u32 offset = 0;
955    
956     /*
957     @@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
958     */
959     if (sec_idx >= image->num_sec - 1) {
960     IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
961     - iwl_free_fw_paging(fwrt);
962     - return -EINVAL;
963     + ret = -EINVAL;
964     + goto err;
965     }
966    
967     /* copy the CSS block to the dram */
968     IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
969     sec_idx);
970    
971     + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
972     + IWL_ERR(fwrt, "CSS block is larger than paging size\n");
973     + ret = -EINVAL;
974     + goto err;
975     + }
976     +
977     memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
978     image->sec[sec_idx].data,
979     - fwrt->fw_paging_db[0].fw_paging_size);
980     + image->sec[sec_idx].len);
981     dma_sync_single_for_device(fwrt->trans->dev,
982     fwrt->fw_paging_db[0].fw_paging_phys,
983     fwrt->fw_paging_db[0].fw_paging_size,
984     @@ -221,6 +229,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
985     for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
986     struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
987    
988     + if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
989     + IWL_ERR(fwrt,
990     + "Paging: paging size is larger than remaining data in block %d\n",
991     + idx);
992     + ret = -EINVAL;
993     + goto err;
994     + }
995     +
996     memcpy(page_address(block->fw_paging_block),
997     image->sec[sec_idx].data + offset,
998     block->fw_paging_size);
999     @@ -231,19 +247,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
1000    
1001     IWL_DEBUG_FW(fwrt,
1002     "Paging: copied %d paging bytes to block %d\n",
1003     - fwrt->fw_paging_db[idx].fw_paging_size,
1004     - idx);
1005     + block->fw_paging_size, idx);
1006    
1007     - offset += fwrt->fw_paging_db[idx].fw_paging_size;
1008     + offset += block->fw_paging_size;
1009     +
1010     + if (offset > image->sec[sec_idx].len) {
1011     + IWL_ERR(fwrt,
1012     + "Paging: offset goes over section size\n");
1013     + ret = -EINVAL;
1014     + goto err;
1015     + }
1016     }
1017    
1018     /* copy the last paging block */
1019     if (fwrt->num_of_pages_in_last_blk > 0) {
1020     struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
1021    
1022     + if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
1023     + IWL_ERR(fwrt,
1024     + "Paging: last block is larger than paging size\n");
1025     + ret = -EINVAL;
1026     + goto err;
1027     + }
1028     +
1029     memcpy(page_address(block->fw_paging_block),
1030     image->sec[sec_idx].data + offset,
1031     - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
1032     + image->sec[sec_idx].len - offset);
1033     dma_sync_single_for_device(fwrt->trans->dev,
1034     block->fw_paging_phys,
1035     block->fw_paging_size,
1036     @@ -255,6 +284,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
1037     }
1038    
1039     return 0;
1040     +
1041     +err:
1042     + iwl_free_fw_paging(fwrt);
1043     + return ret;
1044     }
1045    
1046     static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
1047     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1048     index 17a0190bd88f..5dbb0f0c02ef 100644
1049     --- a/drivers/nvme/host/pci.c
1050     +++ b/drivers/nvme/host/pci.c
1051     @@ -2679,8 +2679,15 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
1052    
1053     dev_info(dev->ctrl.device, "restart after slot reset\n");
1054     pci_restore_state(pdev);
1055     - nvme_reset_ctrl(&dev->ctrl);
1056     - return PCI_ERS_RESULT_RECOVERED;
1057     + nvme_reset_ctrl_sync(&dev->ctrl);
1058     +
1059     + switch (dev->ctrl.state) {
1060     + case NVME_CTRL_LIVE:
1061     + case NVME_CTRL_ADMIN_ONLY:
1062     + return PCI_ERS_RESULT_RECOVERED;
1063     + default:
1064     + return PCI_ERS_RESULT_DISCONNECT;
1065     + }
1066     }
1067    
1068     static void nvme_error_resume(struct pci_dev *pdev)
1069     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1070     index f0be5f35ab28..9beefa6ed1ce 100644
1071     --- a/drivers/vhost/vhost.c
1072     +++ b/drivers/vhost/vhost.c
1073     @@ -2345,6 +2345,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
1074     struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
1075     if (!node)
1076     return NULL;
1077     +
1078     + /* Make sure all padding within the structure is initialized. */
1079     + memset(&node->msg, 0, sizeof node->msg);
1080     node->vq = vq;
1081     node->msg.type = type;
1082     return node;
1083     diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
1084     index 74f2e6e6202a..8851d441e5fd 100644
1085     --- a/drivers/w1/masters/mxc_w1.c
1086     +++ b/drivers/w1/masters/mxc_w1.c
1087     @@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
1088     if (IS_ERR(mdev->clk))
1089     return PTR_ERR(mdev->clk);
1090    
1091     + err = clk_prepare_enable(mdev->clk);
1092     + if (err)
1093     + return err;
1094     +
1095     clkrate = clk_get_rate(mdev->clk);
1096     if (clkrate < 10000000)
1097     dev_warn(&pdev->dev,
1098     @@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
1099    
1100     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1101     mdev->regs = devm_ioremap_resource(&pdev->dev, res);
1102     - if (IS_ERR(mdev->regs))
1103     - return PTR_ERR(mdev->regs);
1104     -
1105     - err = clk_prepare_enable(mdev->clk);
1106     - if (err)
1107     - return err;
1108     + if (IS_ERR(mdev->regs)) {
1109     + err = PTR_ERR(mdev->regs);
1110     + goto out_disable_clk;
1111     + }
1112    
1113     /* Software reset 1-Wire module */
1114     writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
1115     @@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
1116    
1117     err = w1_add_master_device(&mdev->bus_master);
1118     if (err)
1119     - clk_disable_unprepare(mdev->clk);
1120     + goto out_disable_clk;
1121    
1122     + return 0;
1123     +
1124     +out_disable_clk:
1125     + clk_disable_unprepare(mdev->clk);
1126     return err;
1127     }
1128    
1129     diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
1130     index a41b48f82a70..4de191563261 100644
1131     --- a/fs/binfmt_misc.c
1132     +++ b/fs/binfmt_misc.c
1133     @@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
1134     s = strchr(p, del);
1135     if (!s)
1136     goto einval;
1137     - *s++ = '\0';
1138     - e->offset = simple_strtoul(p, &p, 10);
1139     + *s = '\0';
1140     + if (p != s) {
1141     + int r = kstrtoint(p, 10, &e->offset);
1142     + if (r != 0 || e->offset < 0)
1143     + goto einval;
1144     + }
1145     + p = s;
1146     if (*p++)
1147     goto einval;
1148     pr_debug("register: offset: %#x\n", e->offset);
1149     @@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
1150     if (e->mask &&
1151     string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
1152     goto einval;
1153     - if (e->size + e->offset > BINPRM_BUF_SIZE)
1154     + if (e->size > BINPRM_BUF_SIZE ||
1155     + BINPRM_BUF_SIZE - e->size < e->offset)
1156     goto einval;
1157     pr_debug("register: magic/mask length: %i\n", e->size);
1158     if (USE_DEBUG) {
1159     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1160     index 0b86cf10cf2a..775a0f2d0b45 100644
1161     --- a/fs/btrfs/inode.c
1162     +++ b/fs/btrfs/inode.c
1163     @@ -1018,8 +1018,10 @@ static noinline int cow_file_range(struct inode *inode,
1164     ram_size, /* ram_bytes */
1165     BTRFS_COMPRESS_NONE, /* compress_type */
1166     BTRFS_ORDERED_REGULAR /* type */);
1167     - if (IS_ERR(em))
1168     + if (IS_ERR(em)) {
1169     + ret = PTR_ERR(em);
1170     goto out_reserve;
1171     + }
1172     free_extent_map(em);
1173    
1174     ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1175     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1176     index 632e26d6f7ce..28fed3e8960b 100644
1177     --- a/fs/btrfs/ioctl.c
1178     +++ b/fs/btrfs/ioctl.c
1179     @@ -2654,8 +2654,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
1180     }
1181    
1182     /* Check for compatibility reject unknown flags */
1183     - if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
1184     - return -EOPNOTSUPP;
1185     + if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
1186     + ret = -EOPNOTSUPP;
1187     + goto out;
1188     + }
1189    
1190     if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1191     ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1192     @@ -3826,11 +3828,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
1193     src->i_sb != inode->i_sb)
1194     return -EXDEV;
1195    
1196     - /* don't make the dst file partly checksummed */
1197     - if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
1198     - (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
1199     - return -EINVAL;
1200     -
1201     if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
1202     return -EISDIR;
1203    
1204     @@ -3840,6 +3837,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
1205     inode_lock(src);
1206     }
1207    
1208     + /* don't make the dst file partly checksummed */
1209     + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
1210     + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
1211     + ret = -EINVAL;
1212     + goto out_unlock;
1213     + }
1214     +
1215     /* determine range to clone */
1216     ret = -EINVAL;
1217     if (off + len > src->i_size || off + len < off)
1218     diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
1219     index 52b39a0924e9..ad8a69ba7f13 100644
1220     --- a/fs/btrfs/scrub.c
1221     +++ b/fs/btrfs/scrub.c
1222     @@ -2799,7 +2799,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
1223     have_csum = scrub_find_csum(sctx, logical, csum);
1224     if (have_csum == 0)
1225     ++sctx->stat.no_csum;
1226     - if (sctx->is_dev_replace && !have_csum) {
1227     + if (0 && sctx->is_dev_replace && !have_csum) {
1228     ret = copy_nocow_pages(sctx, logical, l,
1229     mirror_num,
1230     physical_for_dev_replace);
1231     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1232     index 0628092b0b1b..f82152a0cb38 100644
1233     --- a/fs/btrfs/super.c
1234     +++ b/fs/btrfs/super.c
1235     @@ -323,6 +323,7 @@ enum {
1236     Opt_ssd, Opt_nossd,
1237     Opt_ssd_spread, Opt_nossd_spread,
1238     Opt_subvol,
1239     + Opt_subvol_empty,
1240     Opt_subvolid,
1241     Opt_thread_pool,
1242     Opt_treelog, Opt_notreelog,
1243     @@ -388,6 +389,7 @@ static const match_table_t tokens = {
1244     {Opt_ssd_spread, "ssd_spread"},
1245     {Opt_nossd_spread, "nossd_spread"},
1246     {Opt_subvol, "subvol=%s"},
1247     + {Opt_subvol_empty, "subvol="},
1248     {Opt_subvolid, "subvolid=%s"},
1249     {Opt_thread_pool, "thread_pool=%u"},
1250     {Opt_treelog, "treelog"},
1251     @@ -461,6 +463,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1252     btrfs_set_opt(info->mount_opt, DEGRADED);
1253     break;
1254     case Opt_subvol:
1255     + case Opt_subvol_empty:
1256     case Opt_subvolid:
1257     case Opt_subvolrootid:
1258     case Opt_device:
1259     diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
1260     index 4f3884835267..dd95a6fa24bf 100644
1261     --- a/fs/cifs/cifsacl.h
1262     +++ b/fs/cifs/cifsacl.h
1263     @@ -98,4 +98,18 @@ struct cifs_ace {
1264     struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
1265     } __attribute__((packed));
1266    
1267     +/*
1268     + * Minimum security identifier can be one for system defined Users
1269     + * and Groups such as NULL SID and World or Built-in accounts such
1270     + * as Administrator and Guest and consists of
1271     + * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
1272     + */
1273     +#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */
1274     +
1275     +/*
1276     + * Minimum security descriptor can be one without any SACL and DACL and can
1277     + * consist of revision, type, and two sids of minimum size for owner and group
1278     + */
1279     +#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
1280     +
1281     #endif /* _CIFSACL_H */
1282     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1283     index 9c6d95ffca97..4ee32488ff74 100644
1284     --- a/fs/cifs/smb2ops.c
1285     +++ b/fs/cifs/smb2ops.c
1286     @@ -1277,10 +1277,11 @@ smb2_is_session_expired(char *buf)
1287     {
1288     struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
1289    
1290     - if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
1291     + if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
1292     + shdr->Status != STATUS_USER_SESSION_DELETED)
1293     return false;
1294    
1295     - cifs_dbg(FYI, "Session expired\n");
1296     + cifs_dbg(FYI, "Session expired or deleted\n");
1297     return true;
1298     }
1299    
1300     @@ -1593,8 +1594,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
1301     oparms.create_options = 0;
1302    
1303     utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1304     - if (!utf16_path)
1305     - return ERR_PTR(-ENOMEM);
1306     + if (!utf16_path) {
1307     + rc = -ENOMEM;
1308     + free_xid(xid);
1309     + return ERR_PTR(rc);
1310     + }
1311    
1312     oparms.tcon = tcon;
1313     oparms.desired_access = READ_CONTROL;
1314     @@ -1652,8 +1656,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1315     access_flags = WRITE_DAC;
1316    
1317     utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1318     - if (!utf16_path)
1319     - return -ENOMEM;
1320     + if (!utf16_path) {
1321     + rc = -ENOMEM;
1322     + free_xid(xid);
1323     + return rc;
1324     + }
1325    
1326     oparms.tcon = tcon;
1327     oparms.desired_access = access_flags;
1328     @@ -1713,15 +1720,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
1329    
1330     /* if file not oplocked can't be sure whether asking to extend size */
1331     if (!CIFS_CACHE_READ(cifsi))
1332     - if (keep_size == false)
1333     - return -EOPNOTSUPP;
1334     + if (keep_size == false) {
1335     + rc = -EOPNOTSUPP;
1336     + free_xid(xid);
1337     + return rc;
1338     + }
1339    
1340     /*
1341     * Must check if file sparse since fallocate -z (zero range) assumes
1342     * non-sparse allocation
1343     */
1344     - if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
1345     - return -EOPNOTSUPP;
1346     + if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
1347     + rc = -EOPNOTSUPP;
1348     + free_xid(xid);
1349     + return rc;
1350     + }
1351    
1352     /*
1353     * need to make sure we are not asked to extend the file since the SMB3
1354     @@ -1730,8 +1743,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
1355     * which for a non sparse file would zero the newly extended range
1356     */
1357     if (keep_size == false)
1358     - if (i_size_read(inode) < offset + len)
1359     - return -EOPNOTSUPP;
1360     + if (i_size_read(inode) < offset + len) {
1361     + rc = -EOPNOTSUPP;
1362     + free_xid(xid);
1363     + return rc;
1364     + }
1365    
1366     cifs_dbg(FYI, "offset %lld len %lld", offset, len);
1367    
1368     @@ -1764,8 +1780,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
1369    
1370     /* Need to make file sparse, if not already, before freeing range. */
1371     /* Consider adding equivalent for compressed since it could also work */
1372     - if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
1373     - return -EOPNOTSUPP;
1374     + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
1375     + rc = -EOPNOTSUPP;
1376     + free_xid(xid);
1377     + return rc;
1378     + }
1379    
1380     cifs_dbg(FYI, "offset %lld len %lld", offset, len);
1381    
1382     @@ -1796,8 +1815,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1383    
1384     /* if file not oplocked can't be sure whether asking to extend size */
1385     if (!CIFS_CACHE_READ(cifsi))
1386     - if (keep_size == false)
1387     - return -EOPNOTSUPP;
1388     + if (keep_size == false) {
1389     + free_xid(xid);
1390     + return rc;
1391     + }
1392    
1393     /*
1394     * Files are non-sparse by default so falloc may be a no-op
1395     @@ -1806,14 +1827,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1396     */
1397     if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
1398     if (keep_size == true)
1399     - return 0;
1400     + rc = 0;
1401     /* check if extending file */
1402     else if (i_size_read(inode) >= off + len)
1403     /* not extending file and already not sparse */
1404     - return 0;
1405     + rc = 0;
1406     /* BB: in future add else clause to extend file */
1407     else
1408     - return -EOPNOTSUPP;
1409     + rc = -EOPNOTSUPP;
1410     + free_xid(xid);
1411     + return rc;
1412     }
1413    
1414     if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
1415     @@ -1825,8 +1848,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1416     * ie potentially making a few extra pages at the beginning
1417     * or end of the file non-sparse via set_sparse is harmless.
1418     */
1419     - if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
1420     - return -EOPNOTSUPP;
1421     + if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
1422     + rc = -EOPNOTSUPP;
1423     + free_xid(xid);
1424     + return rc;
1425     + }
1426    
1427     rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
1428     }
1429     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1430     index 0f48741a0130..32d7fd830aae 100644
1431     --- a/fs/cifs/smb2pdu.c
1432     +++ b/fs/cifs/smb2pdu.c
1433     @@ -1276,6 +1276,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1434     sess_data->ses = ses;
1435     sess_data->buf0_type = CIFS_NO_BUFFER;
1436     sess_data->nls_cp = (struct nls_table *) nls_cp;
1437     + sess_data->previous_session = ses->Suid;
1438    
1439     #ifdef CONFIG_CIFS_SMB311
1440     /*
1441     @@ -2377,8 +2378,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
1442    
1443     return query_info(xid, tcon, persistent_fid, volatile_fid,
1444     0, SMB2_O_INFO_SECURITY, additional_info,
1445     - SMB2_MAX_BUFFER_SIZE,
1446     - sizeof(struct smb2_file_all_info), data, plen);
1447     + SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
1448     }
1449    
1450     int
1451     diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
1452     index c32802c956d5..bf7fa1507e81 100644
1453     --- a/fs/ext4/indirect.c
1454     +++ b/fs/ext4/indirect.c
1455     @@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
1456     unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
1457     int i;
1458    
1459     - /* Count number blocks in a subtree under 'partial' */
1460     - count = 1;
1461     - for (i = 0; partial + i != chain + depth - 1; i++)
1462     - count *= epb;
1463     + /*
1464     + * Count number blocks in a subtree under 'partial'. At each
1465     + * level we count number of complete empty subtrees beyond
1466     + * current offset and then descend into the subtree only
1467     + * partially beyond current offset.
1468     + */
1469     + count = 0;
1470     + for (i = partial - chain + 1; i < depth; i++)
1471     + count = count * epb + (epb - offsets[i] - 1);
1472     + count++;
1473     /* Fill in size of a hole we found */
1474     map->m_pblk = 0;
1475     map->m_len = min_t(unsigned int, map->m_len, count);
1476     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1477     index 70cf4c7b268a..44b4fcdc3755 100644
1478     --- a/fs/ext4/inline.c
1479     +++ b/fs/ext4/inline.c
1480     @@ -144,6 +144,12 @@ int ext4_find_inline_data_nolock(struct inode *inode)
1481     goto out;
1482    
1483     if (!is.s.not_found) {
1484     + if (is.s.here->e_value_inum) {
1485     + EXT4_ERROR_INODE(inode, "inline data xattr refers "
1486     + "to an external xattr inode");
1487     + error = -EFSCORRUPTED;
1488     + goto out;
1489     + }
1490     EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
1491     (void *)ext4_raw_inode(&is.iloc));
1492     EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
1493     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1494     index 1e50c5efae67..c73cb9346aee 100644
1495     --- a/fs/ext4/inode.c
1496     +++ b/fs/ext4/inode.c
1497     @@ -4298,28 +4298,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1498     EXT4_BLOCK_SIZE_BITS(sb);
1499     stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
1500    
1501     - /* If there are no blocks to remove, return now */
1502     - if (first_block >= stop_block)
1503     - goto out_stop;
1504     + /* If there are blocks to remove, do it */
1505     + if (stop_block > first_block) {
1506    
1507     - down_write(&EXT4_I(inode)->i_data_sem);
1508     - ext4_discard_preallocations(inode);
1509     + down_write(&EXT4_I(inode)->i_data_sem);
1510     + ext4_discard_preallocations(inode);
1511    
1512     - ret = ext4_es_remove_extent(inode, first_block,
1513     - stop_block - first_block);
1514     - if (ret) {
1515     - up_write(&EXT4_I(inode)->i_data_sem);
1516     - goto out_stop;
1517     - }
1518     + ret = ext4_es_remove_extent(inode, first_block,
1519     + stop_block - first_block);
1520     + if (ret) {
1521     + up_write(&EXT4_I(inode)->i_data_sem);
1522     + goto out_stop;
1523     + }
1524    
1525     - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1526     - ret = ext4_ext_remove_space(inode, first_block,
1527     - stop_block - 1);
1528     - else
1529     - ret = ext4_ind_remove_space(handle, inode, first_block,
1530     - stop_block);
1531     + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1532     + ret = ext4_ext_remove_space(inode, first_block,
1533     + stop_block - 1);
1534     + else
1535     + ret = ext4_ind_remove_space(handle, inode, first_block,
1536     + stop_block);
1537    
1538     - up_write(&EXT4_I(inode)->i_data_sem);
1539     + up_write(&EXT4_I(inode)->i_data_sem);
1540     + }
1541     if (IS_SYNC(inode))
1542     ext4_handle_sync(handle);
1543    
1544     @@ -4701,19 +4701,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
1545     }
1546     }
1547    
1548     -static inline void ext4_iget_extra_inode(struct inode *inode,
1549     +static inline int ext4_iget_extra_inode(struct inode *inode,
1550     struct ext4_inode *raw_inode,
1551     struct ext4_inode_info *ei)
1552     {
1553     __le32 *magic = (void *)raw_inode +
1554     EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
1555     +
1556     if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
1557     EXT4_INODE_SIZE(inode->i_sb) &&
1558     *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
1559     ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1560     - ext4_find_inline_data_nolock(inode);
1561     + return ext4_find_inline_data_nolock(inode);
1562     } else
1563     EXT4_I(inode)->i_inline_off = 0;
1564     + return 0;
1565     }
1566    
1567     int ext4_get_projid(struct inode *inode, kprojid_t *projid)
1568     @@ -4893,7 +4895,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1569     ei->i_extra_isize = sizeof(struct ext4_inode) -
1570     EXT4_GOOD_OLD_INODE_SIZE;
1571     } else {
1572     - ext4_iget_extra_inode(inode, raw_inode, ei);
1573     + ret = ext4_iget_extra_inode(inode, raw_inode, ei);
1574     + if (ret)
1575     + goto bad_inode;
1576     }
1577     }
1578    
1579     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1580     index b6bec270a8e4..d792b7689d92 100644
1581     --- a/fs/ext4/resize.c
1582     +++ b/fs/ext4/resize.c
1583     @@ -1933,7 +1933,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1584     return 0;
1585    
1586     n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1587     - if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1588     + if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1589     ext4_warning(sb, "resize would cause inodes_count overflow");
1590     return -EINVAL;
1591     }
1592     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1593     index 499cb4b1fbd2..fc4ced59c565 100644
1594     --- a/fs/ext4/xattr.c
1595     +++ b/fs/ext4/xattr.c
1596     @@ -1688,7 +1688,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1597    
1598     /* No failures allowed past this point. */
1599    
1600     - if (!s->not_found && here->e_value_offs) {
1601     + if (!s->not_found && here->e_value_size && here->e_value_offs) {
1602     /* Remove the old value. */
1603     void *first_val = s->base + min_offs;
1604     size_t offs = le16_to_cpu(here->e_value_offs);
1605     diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
1606     index 79c61da8b1bc..c65a51d87cac 100644
1607     --- a/fs/orangefs/inode.c
1608     +++ b/fs/orangefs/inode.c
1609     @@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
1610     else
1611     stat->result_mask = STATX_BASIC_STATS &
1612     ~STATX_SIZE;
1613     +
1614     + stat->attributes_mask = STATX_ATTR_IMMUTABLE |
1615     + STATX_ATTR_APPEND;
1616     + if (inode->i_flags & S_IMMUTABLE)
1617     + stat->attributes |= STATX_ATTR_IMMUTABLE;
1618     + if (inode->i_flags & S_APPEND)
1619     + stat->attributes |= STATX_ATTR_APPEND;
1620     }
1621     return ret;
1622     }
1623     diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
1624     index 1b5707c44c3f..e026bee02a66 100644
1625     --- a/fs/orangefs/namei.c
1626     +++ b/fs/orangefs/namei.c
1627     @@ -326,6 +326,13 @@ static int orangefs_symlink(struct inode *dir,
1628     ret = PTR_ERR(inode);
1629     goto out;
1630     }
1631     + /*
1632     + * This is necessary because orangefs_inode_getattr will not
1633     + * re-read symlink size as it is impossible for it to change.
1634     + * Invalidating the cache does not help. orangefs_new_inode
1635     + * does not set the correct size (it does not know symname).
1636     + */
1637     + inode->i_size = strlen(symname);
1638    
1639     gossip_debug(GOSSIP_NAME_DEBUG,
1640     "Assigned symlink inode new number of %pU\n",
1641     diff --git a/include/linux/irq.h b/include/linux/irq.h
1642     index 65916a305f3d..4e66378f290b 100644
1643     --- a/include/linux/irq.h
1644     +++ b/include/linux/irq.h
1645     @@ -551,7 +551,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
1646     #endif
1647    
1648     #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
1649     -void irq_move_irq(struct irq_data *data);
1650     +void __irq_move_irq(struct irq_data *data);
1651     +static inline void irq_move_irq(struct irq_data *data)
1652     +{
1653     + if (unlikely(irqd_is_setaffinity_pending(data)))
1654     + __irq_move_irq(data);
1655     +}
1656     void irq_move_masked_irq(struct irq_data *data);
1657     void irq_force_complete_move(struct irq_desc *desc);
1658     #else
1659     diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1660     index f144216febc6..9397628a1967 100644
1661     --- a/include/linux/virtio_net.h
1662     +++ b/include/linux/virtio_net.h
1663     @@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1664     static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1665     struct virtio_net_hdr *hdr,
1666     bool little_endian,
1667     - bool has_data_valid)
1668     + bool has_data_valid,
1669     + int vlan_hlen)
1670     {
1671     memset(hdr, 0, sizeof(*hdr)); /* no info leak */
1672    
1673     @@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1674    
1675     if (skb->ip_summed == CHECKSUM_PARTIAL) {
1676     hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1677     - if (skb_vlan_tag_present(skb))
1678     - hdr->csum_start = __cpu_to_virtio16(little_endian,
1679     - skb_checksum_start_offset(skb) + VLAN_HLEN);
1680     - else
1681     - hdr->csum_start = __cpu_to_virtio16(little_endian,
1682     - skb_checksum_start_offset(skb));
1683     + hdr->csum_start = __cpu_to_virtio16(little_endian,
1684     + skb_checksum_start_offset(skb) + vlan_hlen);
1685     hdr->csum_offset = __cpu_to_virtio16(little_endian,
1686     skb->csum_offset);
1687     } else if (has_data_valid &&
1688     diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
1689     index c4f5caaf3778..f6a3543e5247 100644
1690     --- a/include/net/transp_v6.h
1691     +++ b/include/net/transp_v6.h
1692     @@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
1693     struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
1694     struct sockcm_cookie *sockc);
1695    
1696     -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1697     - __u16 srcp, __u16 destp, int bucket);
1698     +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1699     + __u16 srcp, __u16 destp, int rqueue, int bucket);
1700     +static inline void
1701     +ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
1702     + __u16 destp, int bucket)
1703     +{
1704     + __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
1705     + bucket);
1706     +}
1707    
1708     #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
1709    
1710     diff --git a/include/net/udp.h b/include/net/udp.h
1711     index 0676b272f6ac..1db85dcb06f6 100644
1712     --- a/include/net/udp.h
1713     +++ b/include/net/udp.h
1714     @@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
1715     return htons((((u64) hash * (max - min)) >> 32) + min);
1716     }
1717    
1718     +static inline int udp_rqueue_get(struct sock *sk)
1719     +{
1720     + return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
1721     +}
1722     +
1723     /* net/ipv4/udp.c */
1724     void udp_destruct_sock(struct sock *sk);
1725     void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
1726     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1727     index e3336d904f64..facfecfc543c 100644
1728     --- a/kernel/irq/manage.c
1729     +++ b/kernel/irq/manage.c
1730     @@ -204,6 +204,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
1731     return ret;
1732     }
1733    
1734     +#ifdef CONFIG_GENERIC_PENDING_IRQ
1735     +static inline int irq_set_affinity_pending(struct irq_data *data,
1736     + const struct cpumask *dest)
1737     +{
1738     + struct irq_desc *desc = irq_data_to_desc(data);
1739     +
1740     + irqd_set_move_pending(data);
1741     + irq_copy_pending(desc, dest);
1742     + return 0;
1743     +}
1744     +#else
1745     +static inline int irq_set_affinity_pending(struct irq_data *data,
1746     + const struct cpumask *dest)
1747     +{
1748     + return -EBUSY;
1749     +}
1750     +#endif
1751     +
1752     +static int irq_try_set_affinity(struct irq_data *data,
1753     + const struct cpumask *dest, bool force)
1754     +{
1755     + int ret = irq_do_set_affinity(data, dest, force);
1756     +
1757     + /*
1758     + * In case that the underlying vector management is busy and the
1759     + * architecture supports the generic pending mechanism then utilize
1760     + * this to avoid returning an error to user space.
1761     + */
1762     + if (ret == -EBUSY && !force)
1763     + ret = irq_set_affinity_pending(data, dest);
1764     + return ret;
1765     +}
1766     +
1767     int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
1768     bool force)
1769     {
1770     @@ -214,8 +247,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
1771     if (!chip || !chip->irq_set_affinity)
1772     return -EINVAL;
1773    
1774     - if (irq_can_move_pcntxt(data)) {
1775     - ret = irq_do_set_affinity(data, mask, force);
1776     + if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
1777     + ret = irq_try_set_affinity(data, mask, force);
1778     } else {
1779     irqd_set_move_pending(data);
1780     irq_copy_pending(desc, mask);
1781     diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
1782     index 86ae0eb80b53..def48589ea48 100644
1783     --- a/kernel/irq/migration.c
1784     +++ b/kernel/irq/migration.c
1785     @@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
1786     void irq_move_masked_irq(struct irq_data *idata)
1787     {
1788     struct irq_desc *desc = irq_data_to_desc(idata);
1789     - struct irq_chip *chip = desc->irq_data.chip;
1790     + struct irq_data *data = &desc->irq_data;
1791     + struct irq_chip *chip = data->chip;
1792    
1793     - if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
1794     + if (likely(!irqd_is_setaffinity_pending(data)))
1795     return;
1796    
1797     - irqd_clr_move_pending(&desc->irq_data);
1798     + irqd_clr_move_pending(data);
1799    
1800     /*
1801     * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
1802     */
1803     - if (irqd_is_per_cpu(&desc->irq_data)) {
1804     + if (irqd_is_per_cpu(data)) {
1805     WARN_ON(1);
1806     return;
1807     }
1808     @@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
1809     * For correct operation this depends on the caller
1810     * masking the irqs.
1811     */
1812     - if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
1813     - irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
1814     -
1815     + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
1816     + int ret;
1817     +
1818     + ret = irq_do_set_affinity(data, desc->pending_mask, false);
1819     + /*
1820     + * If the there is a cleanup pending in the underlying
1821     + * vector management, reschedule the move for the next
1822     + * interrupt. Leave desc->pending_mask intact.
1823     + */
1824     + if (ret == -EBUSY) {
1825     + irqd_set_move_pending(data);
1826     + return;
1827     + }
1828     + }
1829     cpumask_clear(desc->pending_mask);
1830     }
1831    
1832     -void irq_move_irq(struct irq_data *idata)
1833     +void __irq_move_irq(struct irq_data *idata)
1834     {
1835     bool masked;
1836    
1837     @@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
1838     */
1839     idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
1840    
1841     - if (likely(!irqd_is_setaffinity_pending(idata)))
1842     - return;
1843     -
1844     if (unlikely(irqd_irq_disabled(idata)))
1845     return;
1846    
1847     diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1848     index 7441bd93b732..8fe3ebd6ac00 100644
1849     --- a/mm/backing-dev.c
1850     +++ b/mm/backing-dev.c
1851     @@ -412,6 +412,7 @@ static void wb_exit(struct bdi_writeback *wb)
1852     * protected.
1853     */
1854     static DEFINE_SPINLOCK(cgwb_lock);
1855     +static struct workqueue_struct *cgwb_release_wq;
1856    
1857     /**
1858     * wb_congested_get_create - get or create a wb_congested
1859     @@ -522,7 +523,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
1860     {
1861     struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
1862     refcnt);
1863     - schedule_work(&wb->release_work);
1864     + queue_work(cgwb_release_wq, &wb->release_work);
1865     }
1866    
1867     static void cgwb_kill(struct bdi_writeback *wb)
1868     @@ -784,6 +785,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi)
1869     spin_unlock_irq(&cgwb_lock);
1870     }
1871    
1872     +static int __init cgwb_init(void)
1873     +{
1874     + /*
1875     + * There can be many concurrent release work items overwhelming
1876     + * system_wq. Put them in a separate wq and limit concurrency.
1877     + * There's no point in executing many of these in parallel.
1878     + */
1879     + cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
1880     + if (!cgwb_release_wq)
1881     + return -ENOMEM;
1882     +
1883     + return 0;
1884     +}
1885     +subsys_initcall(cgwb_init);
1886     +
1887     #else /* CONFIG_CGROUP_WRITEBACK */
1888    
1889     static int cgwb_bdi_init(struct backing_dev_info *bdi)
1890     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1891     index 22320ea27489..d2d0eb9536a3 100644
1892     --- a/mm/page_alloc.c
1893     +++ b/mm/page_alloc.c
1894     @@ -4162,7 +4162,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1895     * orientated.
1896     */
1897     if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
1898     - ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
1899     ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
1900     ac->high_zoneidx, ac->nodemask);
1901     }
1902     diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
1903     index 7d20e1f3de28..56197f0d9608 100644
1904     --- a/net/dsa/tag_trailer.c
1905     +++ b/net/dsa/tag_trailer.c
1906     @@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
1907     if (!skb->dev)
1908     return NULL;
1909    
1910     - pskb_trim_rcsum(skb, skb->len - 4);
1911     + if (pskb_trim_rcsum(skb, skb->len - 4))
1912     + return NULL;
1913    
1914     return skb;
1915     }
1916     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1917     index f70586b50838..ef8cd0f7db89 100644
1918     --- a/net/ipv4/tcp_ipv4.c
1919     +++ b/net/ipv4/tcp_ipv4.c
1920     @@ -1689,6 +1689,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
1921     reqsk_put(req);
1922     goto discard_it;
1923     }
1924     + if (tcp_checksum_complete(skb)) {
1925     + reqsk_put(req);
1926     + goto csum_error;
1927     + }
1928     if (unlikely(sk->sk_state != TCP_LISTEN)) {
1929     inet_csk_reqsk_queue_drop_and_put(sk, req);
1930     goto lookup;
1931     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1932     index b61a770884fa..5f7bc5c6366a 100644
1933     --- a/net/ipv4/udp.c
1934     +++ b/net/ipv4/udp.c
1935     @@ -2718,7 +2718,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1936     " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
1937     bucket, src, srcp, dest, destp, sp->sk_state,
1938     sk_wmem_alloc_get(sp),
1939     - sk_rmem_alloc_get(sp),
1940     + udp_rqueue_get(sp),
1941     0, 0L, 0,
1942     from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
1943     0, sock_i_ino(sp),
1944     diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
1945     index d0390d844ac8..d9ad986c7b2c 100644
1946     --- a/net/ipv4/udp_diag.c
1947     +++ b/net/ipv4/udp_diag.c
1948     @@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
1949     static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
1950     void *info)
1951     {
1952     - r->idiag_rqueue = sk_rmem_alloc_get(sk);
1953     + r->idiag_rqueue = udp_rqueue_get(sk);
1954     r->idiag_wqueue = sk_wmem_alloc_get(sk);
1955     }
1956    
1957     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1958     index a02ad100f0d7..2ee08b6a86a4 100644
1959     --- a/net/ipv6/datagram.c
1960     +++ b/net/ipv6/datagram.c
1961     @@ -1019,8 +1019,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
1962     }
1963     EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
1964    
1965     -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1966     - __u16 srcp, __u16 destp, int bucket)
1967     +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1968     + __u16 srcp, __u16 destp, int rqueue, int bucket)
1969     {
1970     const struct in6_addr *dest, *src;
1971    
1972     @@ -1036,7 +1036,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1973     dest->s6_addr32[2], dest->s6_addr32[3], destp,
1974     sp->sk_state,
1975     sk_wmem_alloc_get(sp),
1976     - sk_rmem_alloc_get(sp),
1977     + rqueue,
1978     0, 0L, 0,
1979     from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1980     0,
1981     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1982     index 4530a82aaa2e..b94345e657f7 100644
1983     --- a/net/ipv6/route.c
1984     +++ b/net/ipv6/route.c
1985     @@ -2149,9 +2149,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1986     const struct in6_addr *daddr, *saddr;
1987     struct rt6_info *rt6 = (struct rt6_info *)dst;
1988    
1989     - if (rt6->rt6i_flags & RTF_LOCAL)
1990     - return;
1991     -
1992     if (dst_metric_locked(dst, RTAX_MTU))
1993     return;
1994    
1995     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1996     index 6d664d83cd16..5d4eb9d2c3a7 100644
1997     --- a/net/ipv6/tcp_ipv6.c
1998     +++ b/net/ipv6/tcp_ipv6.c
1999     @@ -1475,6 +1475,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
2000     reqsk_put(req);
2001     goto discard_it;
2002     }
2003     + if (tcp_checksum_complete(skb)) {
2004     + reqsk_put(req);
2005     + goto csum_error;
2006     + }
2007     if (unlikely(sk->sk_state != TCP_LISTEN)) {
2008     inet_csk_reqsk_queue_drop_and_put(sk, req);
2009     goto lookup;
2010     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2011     index ea0730028e5d..977bd5a07cab 100644
2012     --- a/net/ipv6/udp.c
2013     +++ b/net/ipv6/udp.c
2014     @@ -1475,7 +1475,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
2015     struct inet_sock *inet = inet_sk(v);
2016     __u16 srcp = ntohs(inet->inet_sport);
2017     __u16 destp = ntohs(inet->inet_dport);
2018     - ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
2019     + __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
2020     + udp_rqueue_get(v), bucket);
2021     }
2022     return 0;
2023     }
2024     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2025     index 60c2a252bdf5..38d132d007ba 100644
2026     --- a/net/packet/af_packet.c
2027     +++ b/net/packet/af_packet.c
2028     @@ -2037,7 +2037,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2029     return -EINVAL;
2030     *len -= sizeof(vnet_hdr);
2031    
2032     - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
2033     + if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2034     return -EINVAL;
2035    
2036     return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2037     @@ -2304,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2038     if (do_vnet) {
2039     if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2040     sizeof(struct virtio_net_hdr),
2041     - vio_le(), true)) {
2042     + vio_le(), true, 0)) {
2043     spin_lock(&sk->sk_receive_queue.lock);
2044     goto drop_n_account;
2045     }
2046     diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
2047     index 9618b4a83cee..98c4afe7c15b 100644
2048     --- a/net/sched/act_simple.c
2049     +++ b/net/sched/act_simple.c
2050     @@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a)
2051     kfree(d->tcfd_defdata);
2052     }
2053    
2054     -static int alloc_defdata(struct tcf_defact *d, char *defdata)
2055     +static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
2056     {
2057     d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
2058     if (unlikely(!d->tcfd_defdata))
2059     return -ENOMEM;
2060     - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2061     + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2062     return 0;
2063     }
2064    
2065     -static void reset_policy(struct tcf_defact *d, char *defdata,
2066     +static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
2067     struct tc_defact *p)
2068     {
2069     spin_lock_bh(&d->tcf_lock);
2070     d->tcf_action = p->action;
2071     memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
2072     - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2073     + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2074     spin_unlock_bh(&d->tcf_lock);
2075     }
2076    
2077     @@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2078     struct tcf_defact *d;
2079     bool exists = false;
2080     int ret = 0, err;
2081     - char *defdata;
2082    
2083     if (nla == NULL)
2084     return -EINVAL;
2085     @@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2086     return -EINVAL;
2087     }
2088    
2089     - defdata = nla_data(tb[TCA_DEF_DATA]);
2090     -
2091     if (!exists) {
2092     ret = tcf_idr_create(tn, parm->index, est, a,
2093     &act_simp_ops, bind, false);
2094     @@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2095     return ret;
2096    
2097     d = to_defact(*a);
2098     - ret = alloc_defdata(d, defdata);
2099     + ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
2100     if (ret < 0) {
2101     tcf_idr_release(*a, bind);
2102     return ret;
2103     @@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2104     if (!ovr)
2105     return -EEXIST;
2106    
2107     - reset_policy(d, defdata, parm);
2108     + reset_policy(d, tb[TCA_DEF_DATA], parm);
2109     }
2110    
2111     if (ret == ACT_P_CREATED)
2112     diff --git a/net/socket.c b/net/socket.c
2113     index f10f1d947c78..d1b02f161429 100644
2114     --- a/net/socket.c
2115     +++ b/net/socket.c
2116     @@ -537,7 +537,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
2117     if (!err && (iattr->ia_valid & ATTR_UID)) {
2118     struct socket *sock = SOCKET_I(d_inode(dentry));
2119    
2120     - sock->sk->sk_uid = iattr->ia_uid;
2121     + if (sock->sk)
2122     + sock->sk->sk_uid = iattr->ia_uid;
2123     + else
2124     + err = -ENOENT;
2125     }
2126    
2127     return err;
2128     @@ -586,12 +589,16 @@ EXPORT_SYMBOL(sock_alloc);
2129     * an inode not a file.
2130     */
2131    
2132     -void sock_release(struct socket *sock)
2133     +static void __sock_release(struct socket *sock, struct inode *inode)
2134     {
2135     if (sock->ops) {
2136     struct module *owner = sock->ops->owner;
2137    
2138     + if (inode)
2139     + inode_lock(inode);
2140     sock->ops->release(sock);
2141     + if (inode)
2142     + inode_unlock(inode);
2143     sock->ops = NULL;
2144     module_put(owner);
2145     }
2146     @@ -605,6 +612,11 @@ void sock_release(struct socket *sock)
2147     }
2148     sock->file = NULL;
2149     }
2150     +
2151     +void sock_release(struct socket *sock)
2152     +{
2153     + __sock_release(sock, NULL);
2154     +}
2155     EXPORT_SYMBOL(sock_release);
2156    
2157     void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
2158     @@ -1146,7 +1158,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
2159    
2160     static int sock_close(struct inode *inode, struct file *filp)
2161     {
2162     - sock_release(SOCKET_I(inode));
2163     + __sock_release(SOCKET_I(inode), inode);
2164     return 0;
2165     }
2166    
2167     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2168     index e1c93ce74e0f..5fe29121b9a8 100644
2169     --- a/net/tls/tls_sw.c
2170     +++ b/net/tls/tls_sw.c
2171     @@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
2172     }
2173    
2174     static int tls_do_encryption(struct tls_context *tls_ctx,
2175     - struct tls_sw_context *ctx, size_t data_len,
2176     - gfp_t flags)
2177     + struct tls_sw_context *ctx,
2178     + struct aead_request *aead_req,
2179     + size_t data_len)
2180     {
2181     - unsigned int req_size = sizeof(struct aead_request) +
2182     - crypto_aead_reqsize(ctx->aead_send);
2183     - struct aead_request *aead_req;
2184     int rc;
2185    
2186     - aead_req = kzalloc(req_size, flags);
2187     - if (!aead_req)
2188     - return -ENOMEM;
2189     -
2190     ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
2191     ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
2192    
2193     @@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
2194     ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
2195     ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
2196    
2197     - kfree(aead_req);
2198     return rc;
2199     }
2200    
2201     @@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
2202     {
2203     struct tls_context *tls_ctx = tls_get_ctx(sk);
2204     struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
2205     + struct aead_request *req;
2206     int rc;
2207    
2208     + req = kzalloc(sizeof(struct aead_request) +
2209     + crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
2210     + if (!req)
2211     + return -ENOMEM;
2212     +
2213     sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
2214     sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
2215    
2216     @@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
2217     tls_ctx->pending_open_record_frags = 0;
2218     set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
2219    
2220     - rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
2221     - sk->sk_allocation);
2222     + rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
2223     if (rc < 0) {
2224     /* If we are called from write_space and
2225     * we fail, we need to set this SOCK_NOSPACE
2226     * to trigger another write_space in the future.
2227     */
2228     set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2229     - return rc;
2230     + goto out_req;
2231     }
2232    
2233     free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
2234     @@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
2235     tls_err_abort(sk, EBADMSG);
2236    
2237     tls_advance_record_sn(sk, &tls_ctx->tx);
2238     +out_req:
2239     + kfree(req);
2240     return rc;
2241     }
2242    
2243     @@ -755,7 +755,7 @@ int tls_sw_recvmsg(struct sock *sk,
2244     struct sk_buff *skb;
2245     ssize_t copied = 0;
2246     bool cmsg = false;
2247     - int err = 0;
2248     + int target, err = 0;
2249     long timeo;
2250    
2251     flags |= nonblock;
2252     @@ -765,6 +765,7 @@ int tls_sw_recvmsg(struct sock *sk,
2253    
2254     lock_sock(sk);
2255    
2256     + target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2257     timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2258     do {
2259     bool zc = false;
2260     @@ -857,6 +858,9 @@ int tls_sw_recvmsg(struct sock *sk,
2261     goto recv_end;
2262     }
2263     }
2264     + /* If we have a new message from strparser, continue now. */
2265     + if (copied >= target && !ctx->recv_pkt)
2266     + break;
2267     } while (len);
2268    
2269     recv_end:
2270     diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
2271     index d1eb14842340..a12e594d4e3b 100644
2272     --- a/sound/pci/hda/hda_controller.c
2273     +++ b/sound/pci/hda/hda_controller.c
2274     @@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
2275     return err;
2276     strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
2277     apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
2278     - if (apcm == NULL)
2279     + if (apcm == NULL) {
2280     + snd_device_free(chip->card, pcm);
2281     return -ENOMEM;
2282     + }
2283     apcm->chip = chip;
2284     apcm->pcm = pcm;
2285     apcm->codec = codec;
2286     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2287     index 5b4dbcec6de8..ba9a7e552183 100644
2288     --- a/sound/pci/hda/patch_conexant.c
2289     +++ b/sound/pci/hda/patch_conexant.c
2290     @@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2291     SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
2292     SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
2293     SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
2294     + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
2295     + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
2296     SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2297     SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
2298     SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
2299     SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
2300     SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2301     SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2302     + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2303     SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
2304     SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
2305     SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
2306     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2307     index 01a6643fc7d4..06c2c80a045b 100644
2308     --- a/sound/pci/hda/patch_realtek.c
2309     +++ b/sound/pci/hda/patch_realtek.c
2310     @@ -6580,7 +6580,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2311     SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2312     SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2313     SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2314     - SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
2315     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
2316     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
2317     SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
2318     @@ -6752,6 +6751,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2319     {0x1b, 0x01111010},
2320     {0x1e, 0x01451130},
2321     {0x21, 0x02211020}),
2322     + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
2323     + {0x12, 0x90a60140},
2324     + {0x14, 0x90170110},
2325     + {0x19, 0x02a11030},
2326     + {0x21, 0x02211020}),
2327     SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2328     {0x12, 0x90a60140},
2329     {0x14, 0x90170110},
2330     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2331     index 754e632a27bd..02b7ad1946db 100644
2332     --- a/sound/usb/quirks-table.h
2333     +++ b/sound/usb/quirks-table.h
2334     @@ -3277,6 +3277,10 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2335     }
2336     },
2337    
2338     +/* disabled due to regression for other devices;
2339     + * see https://bugzilla.kernel.org/show_bug.cgi?id=199905
2340     + */
2341     +#if 0
2342     {
2343     /*
2344     * Nura's first gen headphones use Cambridge Silicon Radio's vendor
2345     @@ -3324,6 +3328,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2346     }
2347     }
2348     },
2349     +#endif /* disabled */
2350    
2351     {
2352     /*