Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0128-4.4.29-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2864 - (show annotations) (download)
Mon Mar 27 13:49:08 2017 UTC (7 years, 1 month ago) by niro
File size: 78075 byte(s)
linux-4.4.29
1 diff --git a/Documentation/x86/exception-tables.txt b/Documentation/x86/exception-tables.txt
2 index 32901aa36f0a..e396bcd8d830 100644
3 --- a/Documentation/x86/exception-tables.txt
4 +++ b/Documentation/x86/exception-tables.txt
5 @@ -290,3 +290,38 @@ Due to the way that the exception table is built and needs to be ordered,
6 only use exceptions for code in the .text section. Any other section
7 will cause the exception table to not be sorted correctly, and the
8 exceptions will fail.
9 +
10 +Things changed when 64-bit support was added to x86 Linux. Rather than
11 +double the size of the exception table by expanding the two entries
12 +from 32-bits to 64 bits, a clever trick was used to store addresses
13 +as relative offsets from the table itself. The assembly code changed
14 +from:
15 + .long 1b,3b
16 +to:
17 + .long (from) - .
18 + .long (to) - .
19 +
20 +and the C-code that uses these values converts back to absolute addresses
21 +like this:
22 +
23 + ex_insn_addr(const struct exception_table_entry *x)
24 + {
25 + return (unsigned long)&x->insn + x->insn;
26 + }
27 +
28 +In v4.6 the exception table entry was expanded with a new field "handler".
29 +This is also 32-bits wide and contains a third relative function
30 +pointer which points to one of:
31 +
32 +1) int ex_handler_default(const struct exception_table_entry *fixup)
33 + This is legacy case that just jumps to the fixup code
34 +2) int ex_handler_fault(const struct exception_table_entry *fixup)
35 + This case provides the fault number of the trap that occurred at
36 + entry->insn. It is used to distinguish page faults from machine
37 + check.
38 +3) int ex_handler_ext(const struct exception_table_entry *fixup)
39 + This case is used for uaccess_err ... we need to set a flag
40 + in the task structure. Before the handler functions existed this
41 + case was handled by adding a large offset to the fixup to tag
42 + it as special.
43 +More functions can easily be added.
44 diff --git a/Makefile b/Makefile
45 index 391294301aaf..19d7d9f68e35 100644
46 --- a/Makefile
47 +++ b/Makefile
48 @@ -1,6 +1,6 @@
49 VERSION = 4
50 PATCHLEVEL = 4
51 -SUBLEVEL = 28
52 +SUBLEVEL = 29
53 EXTRAVERSION =
54 NAME = Blurry Fish Butt
55
56 diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
57 index 03a39fe29246..9d9ba9acdddc 100644
58 --- a/arch/arm/crypto/ghash-ce-glue.c
59 +++ b/arch/arm/crypto/ghash-ce-glue.c
60 @@ -226,6 +226,27 @@ static int ghash_async_digest(struct ahash_request *req)
61 }
62 }
63
64 +static int ghash_async_import(struct ahash_request *req, const void *in)
65 +{
66 + struct ahash_request *cryptd_req = ahash_request_ctx(req);
67 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
68 + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
69 + struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
70 +
71 + desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
72 + desc->flags = req->base.flags;
73 +
74 + return crypto_shash_import(desc, in);
75 +}
76 +
77 +static int ghash_async_export(struct ahash_request *req, void *out)
78 +{
79 + struct ahash_request *cryptd_req = ahash_request_ctx(req);
80 + struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
81 +
82 + return crypto_shash_export(desc, out);
83 +}
84 +
85 static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
86 unsigned int keylen)
87 {
88 @@ -274,7 +295,10 @@ static struct ahash_alg ghash_async_alg = {
89 .final = ghash_async_final,
90 .setkey = ghash_async_setkey,
91 .digest = ghash_async_digest,
92 + .import = ghash_async_import,
93 + .export = ghash_async_export,
94 .halg.digestsize = GHASH_DIGEST_SIZE,
95 + .halg.statesize = sizeof(struct ghash_desc_ctx),
96 .halg.base = {
97 .cra_name = "ghash",
98 .cra_driver_name = "ghash-ce",
99 diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
100 index 2385052b0ce1..e362f865fcd2 100644
101 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
102 +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
103 @@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
104 unsigned long pending;
105 unsigned int bit;
106
107 - pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
108 - for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
109 - generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
110 + do {
111 + pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
112 + for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
113 + generic_handle_irq(irq_find_mapping(fpga->irqdomain,
114 + bit));
115 + }
116 + } while (pending);
117
118 return IRQ_HANDLED;
119 }
120
121 -static void cplds_irq_mask_ack(struct irq_data *d)
122 +static void cplds_irq_mask(struct irq_data *d)
123 {
124 struct cplds *fpga = irq_data_get_irq_chip_data(d);
125 unsigned int cplds_irq = irqd_to_hwirq(d);
126 - unsigned int set, bit = BIT(cplds_irq);
127 + unsigned int bit = BIT(cplds_irq);
128
129 fpga->irq_mask &= ~bit;
130 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
131 - set = readl(fpga->base + FPGA_IRQ_SET_CLR);
132 - writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
133 }
134
135 static void cplds_irq_unmask(struct irq_data *d)
136 {
137 struct cplds *fpga = irq_data_get_irq_chip_data(d);
138 unsigned int cplds_irq = irqd_to_hwirq(d);
139 - unsigned int bit = BIT(cplds_irq);
140 + unsigned int set, bit = BIT(cplds_irq);
141 +
142 + set = readl(fpga->base + FPGA_IRQ_SET_CLR);
143 + writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
144
145 fpga->irq_mask |= bit;
146 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
147 @@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d)
148
149 static struct irq_chip cplds_irq_chip = {
150 .name = "pxa_cplds",
151 - .irq_mask_ack = cplds_irq_mask_ack,
152 + .irq_ack = cplds_irq_mask,
153 + .irq_mask = cplds_irq_mask,
154 .irq_unmask = cplds_irq_unmask,
155 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
156 };
157 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
158 index 247a0dc012f1..c07bfb52275e 100644
159 --- a/arch/powerpc/kernel/eeh_driver.c
160 +++ b/arch/powerpc/kernel/eeh_driver.c
161 @@ -909,6 +909,14 @@ static void eeh_handle_special_event(void)
162 /* Notify all devices to be down */
163 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
164 bus = eeh_pe_bus_get(phb_pe);
165 + if (!bus) {
166 + pr_err("%s: Cannot find PCI bus for "
167 + "PHB#%d-PE#%x\n",
168 + __func__,
169 + pe->phb->global_number,
170 + pe->addr);
171 + break;
172 + }
173 eeh_pe_dev_traverse(pe,
174 eeh_report_failure, NULL);
175 pcibios_remove_pci_devices(bus);
176 diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
177 index 32e26526f7e4..1eb698f653b4 100644
178 --- a/arch/powerpc/kernel/nvram_64.c
179 +++ b/arch/powerpc/kernel/nvram_64.c
180 @@ -969,7 +969,7 @@ int __init nvram_remove_partition(const char *name, int sig,
181
182 /* Make partition a free partition */
183 part->header.signature = NVRAM_SIG_FREE;
184 - strncpy(part->header.name, "wwwwwwwwwwww", 12);
185 + memset(part->header.name, 'w', 12);
186 part->header.checksum = nvram_checksum(&part->header);
187 rc = nvram_write_header(part);
188 if (rc <= 0) {
189 @@ -987,8 +987,8 @@ int __init nvram_remove_partition(const char *name, int sig,
190 }
191 if (prev) {
192 prev->header.length += part->header.length;
193 - prev->header.checksum = nvram_checksum(&part->header);
194 - rc = nvram_write_header(part);
195 + prev->header.checksum = nvram_checksum(&prev->header);
196 + rc = nvram_write_header(prev);
197 if (rc <= 0) {
198 printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
199 return rc;
200 diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
201 index ba0cae69a396..92736851c795 100644
202 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c
203 +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
204 @@ -956,6 +956,11 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
205 }
206
207 bus = eeh_pe_bus_get(pe);
208 + if (!bus) {
209 + pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
210 + __func__, pe->phb->global_number, pe->addr);
211 + return -EIO;
212 + }
213 if (pci_is_root_bus(bus) ||
214 pci_is_root_bus(bus->parent))
215 ret = pnv_eeh_root_reset(hose, option);
216 diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
217 index 189679aba703..f5063b6659eb 100644
218 --- a/arch/x86/include/asm/asm.h
219 +++ b/arch/x86/include/asm/asm.h
220 @@ -44,19 +44,22 @@
221
222 /* Exception table entry */
223 #ifdef __ASSEMBLY__
224 -# define _ASM_EXTABLE(from,to) \
225 +# define _ASM_EXTABLE_HANDLE(from, to, handler) \
226 .pushsection "__ex_table","a" ; \
227 - .balign 8 ; \
228 + .balign 4 ; \
229 .long (from) - . ; \
230 .long (to) - . ; \
231 + .long (handler) - . ; \
232 .popsection
233
234 -# define _ASM_EXTABLE_EX(from,to) \
235 - .pushsection "__ex_table","a" ; \
236 - .balign 8 ; \
237 - .long (from) - . ; \
238 - .long (to) - . + 0x7ffffff0 ; \
239 - .popsection
240 +# define _ASM_EXTABLE(from, to) \
241 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
242 +
243 +# define _ASM_EXTABLE_FAULT(from, to) \
244 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
245 +
246 +# define _ASM_EXTABLE_EX(from, to) \
247 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
248
249 # define _ASM_NOKPROBE(entry) \
250 .pushsection "_kprobe_blacklist","aw" ; \
251 @@ -89,19 +92,24 @@
252 .endm
253
254 #else
255 -# define _ASM_EXTABLE(from,to) \
256 +# define _EXPAND_EXTABLE_HANDLE(x) #x
257 +# define _ASM_EXTABLE_HANDLE(from, to, handler) \
258 " .pushsection \"__ex_table\",\"a\"\n" \
259 - " .balign 8\n" \
260 + " .balign 4\n" \
261 " .long (" #from ") - .\n" \
262 " .long (" #to ") - .\n" \
263 + " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
264 " .popsection\n"
265
266 -# define _ASM_EXTABLE_EX(from,to) \
267 - " .pushsection \"__ex_table\",\"a\"\n" \
268 - " .balign 8\n" \
269 - " .long (" #from ") - .\n" \
270 - " .long (" #to ") - . + 0x7ffffff0\n" \
271 - " .popsection\n"
272 +# define _ASM_EXTABLE(from, to) \
273 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
274 +
275 +# define _ASM_EXTABLE_FAULT(from, to) \
276 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
277 +
278 +# define _ASM_EXTABLE_EX(from, to) \
279 + _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
280 +
281 /* For C file, we already have NOKPROBE_SYMBOL macro */
282 #endif
283
284 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
285 index d42252ce9b4d..3794c7331cfc 100644
286 --- a/arch/x86/include/asm/uaccess.h
287 +++ b/arch/x86/include/asm/uaccess.h
288 @@ -90,12 +90,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
289 likely(!__range_not_ok(addr, size, user_addr_max()))
290
291 /*
292 - * The exception table consists of pairs of addresses relative to the
293 - * exception table enty itself: the first is the address of an
294 - * instruction that is allowed to fault, and the second is the address
295 - * at which the program should continue. No registers are modified,
296 - * so it is entirely up to the continuation code to figure out what to
297 - * do.
298 + * The exception table consists of triples of addresses relative to the
299 + * exception table entry itself. The first address is of an instruction
300 + * that is allowed to fault, the second is the target at which the program
301 + * should continue. The third is a handler function to deal with the fault
302 + * caused by the instruction in the first field.
303 *
304 * All the routines below use bits of fixup code that are out of line
305 * with the main instruction path. This means when everything is well,
306 @@ -104,13 +103,14 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
307 */
308
309 struct exception_table_entry {
310 - int insn, fixup;
311 + int insn, fixup, handler;
312 };
313 /* This is not the generic standard exception_table_entry format */
314 #define ARCH_HAS_SORT_EXTABLE
315 #define ARCH_HAS_SEARCH_EXTABLE
316
317 -extern int fixup_exception(struct pt_regs *regs);
318 +extern int fixup_exception(struct pt_regs *regs, int trapnr);
319 +extern bool ex_has_fault_handler(unsigned long ip);
320 extern int early_fixup_exception(unsigned long *ip);
321
322 /*
323 diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
324 index 9fdf1d330727..a257d6077d1b 100644
325 --- a/arch/x86/kernel/early-quirks.c
326 +++ b/arch/x86/kernel/early-quirks.c
327 @@ -331,12 +331,11 @@ static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_si
328
329 static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
330 {
331 - /*
332 - * FIXME is the graphics stolen memory region
333 - * always at TOUD? Ie. is it always the last
334 - * one to be allocated by the BIOS?
335 - */
336 - return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
337 + u16 toud = 0;
338 +
339 + toud = read_pci_config_16(0, 0, 0, I865_TOUD);
340 +
341 + return (phys_addr_t)(toud << 16) + i845_tseg_size();
342 }
343
344 static size_t __init i830_stolen_size(int num, int slot, int func)
345 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
346 index 023c442c33bb..e1d1f6cbaf11 100644
347 --- a/arch/x86/kernel/kprobes/core.c
348 +++ b/arch/x86/kernel/kprobes/core.c
349 @@ -1000,7 +1000,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
350 * In case the user-specified fault handler returned
351 * zero, try to fix up.
352 */
353 - if (fixup_exception(regs))
354 + if (fixup_exception(regs, trapnr))
355 return 1;
356
357 /*
358 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
359 index 679302c312f8..5621f882645e 100644
360 --- a/arch/x86/kernel/traps.c
361 +++ b/arch/x86/kernel/traps.c
362 @@ -199,7 +199,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
363 }
364
365 if (!user_mode(regs)) {
366 - if (!fixup_exception(regs)) {
367 + if (!fixup_exception(regs, trapnr)) {
368 tsk->thread.error_code = error_code;
369 tsk->thread.trap_nr = trapnr;
370 die(str, regs, error_code);
371 @@ -453,7 +453,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
372
373 tsk = current;
374 if (!user_mode(regs)) {
375 - if (fixup_exception(regs))
376 + if (fixup_exception(regs, X86_TRAP_GP))
377 return;
378
379 tsk->thread.error_code = error_code;
380 @@ -699,7 +699,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
381 conditional_sti(regs);
382
383 if (!user_mode(regs)) {
384 - if (!fixup_exception(regs)) {
385 + if (!fixup_exception(regs, trapnr)) {
386 task->thread.error_code = error_code;
387 task->thread.trap_nr = trapnr;
388 die(str, regs, error_code);
389 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
390 index 903ec1e9c326..9dd7e4b7fcde 100644
391 --- a/arch/x86/mm/extable.c
392 +++ b/arch/x86/mm/extable.c
393 @@ -3,6 +3,9 @@
394 #include <linux/sort.h>
395 #include <asm/uaccess.h>
396
397 +typedef bool (*ex_handler_t)(const struct exception_table_entry *,
398 + struct pt_regs *, int);
399 +
400 static inline unsigned long
401 ex_insn_addr(const struct exception_table_entry *x)
402 {
403 @@ -13,11 +16,56 @@ ex_fixup_addr(const struct exception_table_entry *x)
404 {
405 return (unsigned long)&x->fixup + x->fixup;
406 }
407 +static inline ex_handler_t
408 +ex_fixup_handler(const struct exception_table_entry *x)
409 +{
410 + return (ex_handler_t)((unsigned long)&x->handler + x->handler);
411 +}
412
413 -int fixup_exception(struct pt_regs *regs)
414 +bool ex_handler_default(const struct exception_table_entry *fixup,
415 + struct pt_regs *regs, int trapnr)
416 {
417 - const struct exception_table_entry *fixup;
418 - unsigned long new_ip;
419 + regs->ip = ex_fixup_addr(fixup);
420 + return true;
421 +}
422 +EXPORT_SYMBOL(ex_handler_default);
423 +
424 +bool ex_handler_fault(const struct exception_table_entry *fixup,
425 + struct pt_regs *regs, int trapnr)
426 +{
427 + regs->ip = ex_fixup_addr(fixup);
428 + regs->ax = trapnr;
429 + return true;
430 +}
431 +EXPORT_SYMBOL_GPL(ex_handler_fault);
432 +
433 +bool ex_handler_ext(const struct exception_table_entry *fixup,
434 + struct pt_regs *regs, int trapnr)
435 +{
436 + /* Special hack for uaccess_err */
437 + current_thread_info()->uaccess_err = 1;
438 + regs->ip = ex_fixup_addr(fixup);
439 + return true;
440 +}
441 +EXPORT_SYMBOL(ex_handler_ext);
442 +
443 +bool ex_has_fault_handler(unsigned long ip)
444 +{
445 + const struct exception_table_entry *e;
446 + ex_handler_t handler;
447 +
448 + e = search_exception_tables(ip);
449 + if (!e)
450 + return false;
451 + handler = ex_fixup_handler(e);
452 +
453 + return handler == ex_handler_fault;
454 +}
455 +
456 +int fixup_exception(struct pt_regs *regs, int trapnr)
457 +{
458 + const struct exception_table_entry *e;
459 + ex_handler_t handler;
460
461 #ifdef CONFIG_PNPBIOS
462 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
463 @@ -33,42 +81,34 @@ int fixup_exception(struct pt_regs *regs)
464 }
465 #endif
466
467 - fixup = search_exception_tables(regs->ip);
468 - if (fixup) {
469 - new_ip = ex_fixup_addr(fixup);
470 -
471 - if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
472 - /* Special hack for uaccess_err */
473 - current_thread_info()->uaccess_err = 1;
474 - new_ip -= 0x7ffffff0;
475 - }
476 - regs->ip = new_ip;
477 - return 1;
478 - }
479 + e = search_exception_tables(regs->ip);
480 + if (!e)
481 + return 0;
482
483 - return 0;
484 + handler = ex_fixup_handler(e);
485 + return handler(e, regs, trapnr);
486 }
487
488 /* Restricted version used during very early boot */
489 int __init early_fixup_exception(unsigned long *ip)
490 {
491 - const struct exception_table_entry *fixup;
492 + const struct exception_table_entry *e;
493 unsigned long new_ip;
494 + ex_handler_t handler;
495
496 - fixup = search_exception_tables(*ip);
497 - if (fixup) {
498 - new_ip = ex_fixup_addr(fixup);
499 + e = search_exception_tables(*ip);
500 + if (!e)
501 + return 0;
502
503 - if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
504 - /* uaccess handling not supported during early boot */
505 - return 0;
506 - }
507 + new_ip = ex_fixup_addr(e);
508 + handler = ex_fixup_handler(e);
509
510 - *ip = new_ip;
511 - return 1;
512 - }
513 + /* special handling not supported during early boot */
514 + if (handler != ex_handler_default)
515 + return 0;
516
517 - return 0;
518 + *ip = new_ip;
519 + return 1;
520 }
521
522 /*
523 @@ -133,6 +173,8 @@ void sort_extable(struct exception_table_entry *start,
524 i += 4;
525 p->fixup += i;
526 i += 4;
527 + p->handler += i;
528 + i += 4;
529 }
530
531 sort(start, finish - start, sizeof(struct exception_table_entry),
532 @@ -145,6 +187,8 @@ void sort_extable(struct exception_table_entry *start,
533 i += 4;
534 p->fixup -= i;
535 i += 4;
536 + p->handler -= i;
537 + i += 4;
538 }
539 }
540
541 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
542 index e830c71a1323..03898aea6e0f 100644
543 --- a/arch/x86/mm/fault.c
544 +++ b/arch/x86/mm/fault.c
545 @@ -663,7 +663,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
546 int sig;
547
548 /* Are we prepared to handle this kernel fault? */
549 - if (fixup_exception(regs)) {
550 + if (fixup_exception(regs, X86_TRAP_PF)) {
551 /*
552 * Any interrupt that takes a fault gets the fixup. This makes
553 * the below recursive fault logic only apply to a faults from
554 diff --git a/crypto/gcm.c b/crypto/gcm.c
555 index d9ea5f9c0574..1238b3c5a321 100644
556 --- a/crypto/gcm.c
557 +++ b/crypto/gcm.c
558 @@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
559 struct crypto_ablkcipher *ctr = ctx->ctr;
560 struct {
561 be128 hash;
562 - u8 iv[8];
563 + u8 iv[16];
564
565 struct crypto_gcm_setkey_result result;
566
567 diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
568 index 01d4be2c354b..f5c26a5f6875 100644
569 --- a/drivers/char/hw_random/omap-rng.c
570 +++ b/drivers/char/hw_random/omap-rng.c
571 @@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
572
573 pm_runtime_enable(&pdev->dev);
574 ret = pm_runtime_get_sync(&pdev->dev);
575 - if (ret) {
576 + if (ret < 0) {
577 dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
578 pm_runtime_put_noidle(&pdev->dev);
579 goto err_ioremap;
580 @@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
581 int ret;
582
583 ret = pm_runtime_get_sync(dev);
584 - if (ret) {
585 + if (ret < 0) {
586 dev_err(dev, "Failed to runtime_get device: %d\n", ret);
587 pm_runtime_put_noidle(dev);
588 return ret;
589 diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
590 index bbf206e3da0d..ac9582de64a5 100644
591 --- a/drivers/clk/clk-divider.c
592 +++ b/drivers/clk/clk-divider.c
593 @@ -354,7 +354,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
594
595 /* if read only, just return current value */
596 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
597 - bestdiv = readl(divider->reg) >> divider->shift;
598 + bestdiv = clk_readl(divider->reg) >> divider->shift;
599 bestdiv &= div_mask(divider->width);
600 bestdiv = _get_div(divider->table, bestdiv, divider->flags,
601 divider->width);
602 diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
603 index 7bc1c4527ae4..8b77abb6bc22 100644
604 --- a/drivers/clk/clk-qoriq.c
605 +++ b/drivers/clk/clk-qoriq.c
606 @@ -766,7 +766,11 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
607 if (!hwc)
608 return NULL;
609
610 - hwc->reg = cg->regs + 0x20 * idx;
611 + if (cg->info.flags & CG_VER3)
612 + hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
613 + else
614 + hwc->reg = cg->regs + 0x20 * idx;
615 +
616 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
617
618 /*
619 diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
620 index b0978d3b83e2..d302ed3b8225 100644
621 --- a/drivers/clk/imx/clk-imx35.c
622 +++ b/drivers/clk/imx/clk-imx35.c
623 @@ -115,7 +115,7 @@ static void __init _mx35_clocks_init(void)
624 }
625
626 clk[ckih] = imx_clk_fixed("ckih", 24000000);
627 - clk[ckil] = imx_clk_fixed("ckih", 32768);
628 + clk[ckil] = imx_clk_fixed("ckil", 32768);
629 clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
630 clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
631
632 diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
633 index 2bf37e68ad0f..dd184b50e5b4 100644
634 --- a/drivers/dma/ipu/ipu_irq.c
635 +++ b/drivers/dma/ipu/ipu_irq.c
636 @@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc)
637 raw_spin_unlock(&bank_lock);
638 while ((line = ffs(status))) {
639 struct ipu_irq_map *map;
640 - unsigned int irq = NO_IRQ;
641 + unsigned int irq;
642
643 line--;
644 status &= ~(1UL << line);
645
646 raw_spin_lock(&bank_lock);
647 map = src2map(32 * i + line);
648 - if (map)
649 - irq = map->irq;
650 - raw_spin_unlock(&bank_lock);
651 -
652 if (!map) {
653 + raw_spin_unlock(&bank_lock);
654 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
655 line, i);
656 continue;
657 }
658 + irq = map->irq;
659 + raw_spin_unlock(&bank_lock);
660 generic_handle_irq(irq);
661 }
662 }
663 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
664 index fe36caf1b7d7..14f57d9915e3 100644
665 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
666 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
667 @@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
668 printk("\n");
669 }
670
671 +
672 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
673 {
674 struct drm_device *dev = adev->ddev;
675 struct drm_crtc *crtc;
676 struct amdgpu_crtc *amdgpu_crtc;
677 - u32 line_time_us, vblank_lines;
678 + u32 vblank_in_pixels;
679 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
680
681 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
682 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
683 amdgpu_crtc = to_amdgpu_crtc(crtc);
684 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
685 - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
686 - amdgpu_crtc->hw_mode.clock;
687 - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
688 + vblank_in_pixels =
689 + amdgpu_crtc->hw_mode.crtc_htotal *
690 + (amdgpu_crtc->hw_mode.crtc_vblank_end -
691 amdgpu_crtc->hw_mode.crtc_vdisplay +
692 - (amdgpu_crtc->v_border * 2);
693 - vblank_time_us = vblank_lines * line_time_us;
694 + (amdgpu_crtc->v_border * 2));
695 +
696 + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
697 break;
698 }
699 }
700 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
701 index 4488e82f87b0..a5c824078472 100644
702 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
703 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
704 @@ -227,7 +227,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
705 type = AMD_IP_BLOCK_TYPE_UVD;
706 ring_mask = adev->uvd.ring.ready ? 1 : 0;
707 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
708 - ib_size_alignment = 8;
709 + ib_size_alignment = 16;
710 break;
711 case AMDGPU_HW_IP_VCE:
712 type = AMD_IP_BLOCK_TYPE_VCE;
713 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
714 index 4dcc8fba5792..5b261adb4b69 100644
715 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
716 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
717 @@ -419,16 +419,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
718 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
719 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
720
721 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
722 - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
723 - /* don't try to enable hpd on eDP or LVDS avoid breaking the
724 - * aux dp channel on imac and help (but not completely fix)
725 - * https://bugzilla.redhat.com/show_bug.cgi?id=726143
726 - * also avoid interrupt storms during dpms.
727 - */
728 - continue;
729 - }
730 -
731 switch (amdgpu_connector->hpd.hpd) {
732 case AMDGPU_HPD_1:
733 idx = 0;
734 @@ -452,6 +442,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
735 continue;
736 }
737
738 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
739 + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
740 + /* don't try to enable hpd on eDP or LVDS avoid breaking the
741 + * aux dp channel on imac and help (but not completely fix)
742 + * https://bugzilla.redhat.com/show_bug.cgi?id=726143
743 + * also avoid interrupt storms during dpms.
744 + */
745 + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
746 + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
747 + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
748 + continue;
749 + }
750 +
751 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
752 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
753 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
754 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
755 index 8f1e51128b33..c161eeda417b 100644
756 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
757 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
758 @@ -409,16 +409,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
759 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
760 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
761
762 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
763 - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
764 - /* don't try to enable hpd on eDP or LVDS avoid breaking the
765 - * aux dp channel on imac and help (but not completely fix)
766 - * https://bugzilla.redhat.com/show_bug.cgi?id=726143
767 - * also avoid interrupt storms during dpms.
768 - */
769 - continue;
770 - }
771 -
772 switch (amdgpu_connector->hpd.hpd) {
773 case AMDGPU_HPD_1:
774 idx = 0;
775 @@ -442,6 +432,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
776 continue;
777 }
778
779 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
780 + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
781 + /* don't try to enable hpd on eDP or LVDS avoid breaking the
782 + * aux dp channel on imac and help (but not completely fix)
783 + * https://bugzilla.redhat.com/show_bug.cgi?id=726143
784 + * also avoid interrupt storms during dpms.
785 + */
786 + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
787 + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
788 + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
789 + continue;
790 + }
791 +
792 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
793 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
794 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
795 @@ -3030,6 +3033,7 @@ static int dce_v11_0_sw_fini(void *handle)
796
797 dce_v11_0_afmt_fini(adev);
798
799 + drm_mode_config_cleanup(adev->ddev);
800 adev->mode_info.mode_config_initialized = false;
801
802 return 0;
803 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
804 index 42d954dc436d..9b4dcf76ce6c 100644
805 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
806 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
807 @@ -392,15 +392,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
808 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
809 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
810
811 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
812 - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
813 - /* don't try to enable hpd on eDP or LVDS avoid breaking the
814 - * aux dp channel on imac and help (but not completely fix)
815 - * https://bugzilla.redhat.com/show_bug.cgi?id=726143
816 - * also avoid interrupt storms during dpms.
817 - */
818 - continue;
819 - }
820 switch (amdgpu_connector->hpd.hpd) {
821 case AMDGPU_HPD_1:
822 WREG32(mmDC_HPD1_CONTROL, tmp);
823 @@ -423,6 +414,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
824 default:
825 break;
826 }
827 +
828 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
829 + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
830 + /* don't try to enable hpd on eDP or LVDS avoid breaking the
831 + * aux dp channel on imac and help (but not completely fix)
832 + * https://bugzilla.redhat.com/show_bug.cgi?id=726143
833 + * also avoid interrupt storms during dpms.
834 + */
835 + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
836 +
837 + switch (amdgpu_connector->hpd.hpd) {
838 + case AMDGPU_HPD_1:
839 + dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
840 + break;
841 + case AMDGPU_HPD_2:
842 + dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
843 + break;
844 + case AMDGPU_HPD_3:
845 + dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
846 + break;
847 + case AMDGPU_HPD_4:
848 + dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
849 + break;
850 + case AMDGPU_HPD_5:
851 + dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
852 + break;
853 + case AMDGPU_HPD_6:
854 + dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
855 + break;
856 + default:
857 + continue;
858 + }
859 +
860 + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
861 + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
862 + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
863 + continue;
864 + }
865 +
866 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
867 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
868 }
869 diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
870 index 9f935f55d74c..968b31f39884 100644
871 --- a/drivers/gpu/drm/drm_prime.c
872 +++ b/drivers/gpu/drm/drm_prime.c
873 @@ -339,14 +339,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
874 * using the PRIME helpers.
875 */
876 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
877 - struct drm_gem_object *obj, int flags)
878 + struct drm_gem_object *obj,
879 + int flags)
880 {
881 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
882 -
883 - exp_info.ops = &drm_gem_prime_dmabuf_ops;
884 - exp_info.size = obj->size;
885 - exp_info.flags = flags;
886 - exp_info.priv = obj;
887 + struct dma_buf_export_info exp_info = {
888 + .exp_name = KBUILD_MODNAME, /* white lie for debug */
889 + .owner = dev->driver->fops->owner,
890 + .ops = &drm_gem_prime_dmabuf_ops,
891 + .size = obj->size,
892 + .flags = flags,
893 + .priv = obj,
894 + };
895
896 if (dev->driver->gem_prime_res_obj)
897 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
898 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
899 index d400d6773bbb..fb9f647bb5cd 100644
900 --- a/drivers/gpu/drm/i915/i915_drv.h
901 +++ b/drivers/gpu/drm/i915/i915_drv.h
902 @@ -2150,21 +2150,19 @@ struct drm_i915_gem_object {
903 /** Record of address bit 17 of each page at last unbind. */
904 unsigned long *bit_17;
905
906 - union {
907 - /** for phy allocated objects */
908 - struct drm_dma_handle *phys_handle;
909 -
910 - struct i915_gem_userptr {
911 - uintptr_t ptr;
912 - unsigned read_only :1;
913 - unsigned workers :4;
914 + struct i915_gem_userptr {
915 + uintptr_t ptr;
916 + unsigned read_only :1;
917 + unsigned workers :4;
918 #define I915_GEM_USERPTR_MAX_WORKERS 15
919
920 - struct i915_mm_struct *mm;
921 - struct i915_mmu_object *mmu_object;
922 - struct work_struct *work;
923 - } userptr;
924 - };
925 + struct i915_mm_struct *mm;
926 + struct i915_mmu_object *mmu_object;
927 + struct work_struct *work;
928 + } userptr;
929 +
930 + /** for phys allocated objects */
931 + struct drm_dma_handle *phys_handle;
932 };
933 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
934
935 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
936 index 87e919a06b27..5d2323a40c25 100644
937 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
938 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
939 @@ -108,17 +108,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
940 pci_read_config_dword(dev->pdev, 0x5c, &base);
941 base &= ~((1<<20) - 1);
942 } else if (IS_I865G(dev)) {
943 + u32 tseg_size = 0;
944 u16 toud = 0;
945 + u8 tmp;
946 +
947 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
948 + I845_ESMRAMC, &tmp);
949 +
950 + if (tmp & TSEG_ENABLE) {
951 + switch (tmp & I845_TSEG_SIZE_MASK) {
952 + case I845_TSEG_SIZE_512K:
953 + tseg_size = KB(512);
954 + break;
955 + case I845_TSEG_SIZE_1M:
956 + tseg_size = MB(1);
957 + break;
958 + }
959 + }
960
961 - /*
962 - * FIXME is the graphics stolen memory region
963 - * always at TOUD? Ie. is it always the last
964 - * one to be allocated by the BIOS?
965 - */
966 pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
967 I865_TOUD, &toud);
968
969 - base = toud << 16;
970 + base = (toud << 16) + tseg_size;
971 } else if (IS_I85X(dev)) {
972 u32 tseg_size = 0;
973 u32 tom;
974 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
975 index ebbd23407a80..0f8367da0663 100644
976 --- a/drivers/gpu/drm/i915/intel_dp.c
977 +++ b/drivers/gpu/drm/i915/intel_dp.c
978 @@ -4648,7 +4648,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
979 *
980 * Return %true if @port is connected, %false otherwise.
981 */
982 -bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
983 +static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
984 struct intel_digital_port *port)
985 {
986 if (HAS_PCH_IBX(dev_priv))
987 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
988 index 41442e619595..722aa159cd28 100644
989 --- a/drivers/gpu/drm/i915/intel_drv.h
990 +++ b/drivers/gpu/drm/i915/intel_drv.h
991 @@ -1231,8 +1231,6 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
992 void intel_edp_drrs_invalidate(struct drm_device *dev,
993 unsigned frontbuffer_bits);
994 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
995 -bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
996 - struct intel_digital_port *port);
997 void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
998
999 /* intel_dp_mst.c */
1000 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1001 index dff69fef47e0..3b92cad8bef2 100644
1002 --- a/drivers/gpu/drm/i915/intel_hdmi.c
1003 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1004 @@ -1331,19 +1331,18 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
1005 }
1006
1007 static bool
1008 -intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1009 +intel_hdmi_set_edid(struct drm_connector *connector)
1010 {
1011 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1012 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1013 - struct edid *edid = NULL;
1014 + struct edid *edid;
1015 bool connected = false;
1016
1017 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1018
1019 - if (force)
1020 - edid = drm_get_edid(connector,
1021 - intel_gmbus_get_adapter(dev_priv,
1022 - intel_hdmi->ddc_bus));
1023 + edid = drm_get_edid(connector,
1024 + intel_gmbus_get_adapter(dev_priv,
1025 + intel_hdmi->ddc_bus));
1026
1027 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1028
1029 @@ -1371,37 +1370,16 @@ static enum drm_connector_status
1030 intel_hdmi_detect(struct drm_connector *connector, bool force)
1031 {
1032 enum drm_connector_status status;
1033 - struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1034 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1035 - bool live_status = false;
1036 - unsigned int try;
1037
1038 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1039 connector->base.id, connector->name);
1040
1041 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1042
1043 - for (try = 0; !live_status && try < 9; try++) {
1044 - if (try)
1045 - msleep(10);
1046 - live_status = intel_digital_port_connected(dev_priv,
1047 - hdmi_to_dig_port(intel_hdmi));
1048 - }
1049 -
1050 - if (!live_status) {
1051 - DRM_DEBUG_KMS("HDMI live status down\n");
1052 - /*
1053 - * Live status register is not reliable on all intel platforms.
1054 - * So consider live_status only for certain platforms, for
1055 - * others, read EDID to determine presence of sink.
1056 - */
1057 - if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
1058 - live_status = true;
1059 - }
1060 -
1061 intel_hdmi_unset_edid(connector);
1062
1063 - if (intel_hdmi_set_edid(connector, live_status)) {
1064 + if (intel_hdmi_set_edid(connector)) {
1065 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1066
1067 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1068 @@ -1427,7 +1405,7 @@ intel_hdmi_force(struct drm_connector *connector)
1069 if (connector->status != connector_status_connected)
1070 return;
1071
1072 - intel_hdmi_set_edid(connector, true);
1073 + intel_hdmi_set_edid(connector);
1074 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1075 }
1076
1077 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1078 index 1e851e037c29..3f802163f7d4 100644
1079 --- a/drivers/gpu/drm/i915/intel_pm.c
1080 +++ b/drivers/gpu/drm/i915/intel_pm.c
1081 @@ -2097,32 +2097,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1082 GEN9_MEM_LATENCY_LEVEL_MASK;
1083
1084 /*
1085 + * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
1086 + * need to be disabled. We make sure to sanitize the values out
1087 + * of the punit to satisfy this requirement.
1088 + */
1089 + for (level = 1; level <= max_level; level++) {
1090 + if (wm[level] == 0) {
1091 + for (i = level + 1; i <= max_level; i++)
1092 + wm[i] = 0;
1093 + break;
1094 + }
1095 + }
1096 +
1097 + /*
1098 * WaWmMemoryReadLatency:skl
1099 *
1100 * punit doesn't take into account the read latency so we need
1101 - * to add 2us to the various latency levels we retrieve from
1102 - * the punit.
1103 - * - W0 is a bit special in that it's the only level that
1104 - * can't be disabled if we want to have display working, so
1105 - * we always add 2us there.
1106 - * - For levels >=1, punit returns 0us latency when they are
1107 - * disabled, so we respect that and don't add 2us then
1108 - *
1109 - * Additionally, if a level n (n > 1) has a 0us latency, all
1110 - * levels m (m >= n) need to be disabled. We make sure to
1111 - * sanitize the values out of the punit to satisfy this
1112 - * requirement.
1113 + * to add 2us to the various latency levels we retrieve from the
1114 + * punit when level 0 response data us 0us.
1115 */
1116 - wm[0] += 2;
1117 - for (level = 1; level <= max_level; level++)
1118 - if (wm[level] != 0)
1119 + if (wm[0] == 0) {
1120 + wm[0] += 2;
1121 + for (level = 1; level <= max_level; level++) {
1122 + if (wm[level] == 0)
1123 + break;
1124 wm[level] += 2;
1125 - else {
1126 - for (i = level + 1; i <= max_level; i++)
1127 - wm[i] = 0;
1128 -
1129 - break;
1130 }
1131 + }
1132 +
1133 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1134 uint64_t sskpd = I915_READ64(MCH_SSKPD);
1135
1136 diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
1137 index fa2154493cf1..470af4aa4a6a 100644
1138 --- a/drivers/gpu/drm/radeon/r600_dpm.c
1139 +++ b/drivers/gpu/drm/radeon/r600_dpm.c
1140 @@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
1141 struct drm_device *dev = rdev->ddev;
1142 struct drm_crtc *crtc;
1143 struct radeon_crtc *radeon_crtc;
1144 - u32 line_time_us, vblank_lines;
1145 + u32 vblank_in_pixels;
1146 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
1147
1148 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1149 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1150 radeon_crtc = to_radeon_crtc(crtc);
1151 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
1152 - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
1153 - radeon_crtc->hw_mode.clock;
1154 - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
1155 - radeon_crtc->hw_mode.crtc_vdisplay +
1156 - (radeon_crtc->v_border * 2);
1157 - vblank_time_us = vblank_lines * line_time_us;
1158 + vblank_in_pixels =
1159 + radeon_crtc->hw_mode.crtc_htotal *
1160 + (radeon_crtc->hw_mode.crtc_vblank_end -
1161 + radeon_crtc->hw_mode.crtc_vdisplay +
1162 + (radeon_crtc->v_border * 2));
1163 +
1164 + vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
1165 break;
1166 }
1167 }
1168 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1169 index e2dd5d19c32c..4aa2cbe4c85f 100644
1170 --- a/drivers/gpu/drm/radeon/radeon_device.c
1171 +++ b/drivers/gpu/drm/radeon/radeon_device.c
1172 @@ -660,8 +660,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
1173 {
1174 uint32_t reg;
1175
1176 - /* for pass through, always force asic_init */
1177 - if (radeon_device_is_virtual())
1178 + /* for pass through, always force asic_init for CI */
1179 + if (rdev->family >= CHIP_BONAIRE &&
1180 + radeon_device_is_virtual())
1181 return false;
1182
1183 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
1184 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1185 index 3aaa07dafc00..472e0771832e 100644
1186 --- a/drivers/gpu/drm/radeon/si_dpm.c
1187 +++ b/drivers/gpu/drm/radeon/si_dpm.c
1188 @@ -4112,7 +4112,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
1189 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
1190 si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
1191
1192 - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
1193 + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
1194 cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
1195
1196 si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
1197 diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
1198 index 3c779838d9ab..966e3a556011 100644
1199 --- a/drivers/gpu/drm/radeon/sislands_smc.h
1200 +++ b/drivers/gpu/drm/radeon/sislands_smc.h
1201 @@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
1202 #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
1203 #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
1204 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
1205 +#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
1206 #define SISLANDS_SMC_VOLTAGEMASK_MAX 4
1207
1208 struct SISLANDS_SMC_VOLTAGEMASKTABLE
1209 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1210 index 4948c1529836..ecf15cf0c3fd 100644
1211 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1212 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1213 @@ -3830,14 +3830,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
1214 int ret;
1215
1216 *header = NULL;
1217 - if (!dev_priv->cman || kernel_commands)
1218 - return kernel_commands;
1219 -
1220 if (command_size > SVGA_CB_MAX_SIZE) {
1221 DRM_ERROR("Command buffer is too large.\n");
1222 return ERR_PTR(-EINVAL);
1223 }
1224
1225 + if (!dev_priv->cman || kernel_commands)
1226 + return kernel_commands;
1227 +
1228 /* If possible, add a little space for fencing. */
1229 cmdbuf_size = command_size + 512;
1230 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
1231 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
1232 index 71493d2af912..70a6985334d5 100644
1233 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
1234 +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
1235 @@ -4102,7 +4102,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
1236 (u8 *)&settings->beacon.head[ie_offset],
1237 settings->beacon.head_len - ie_offset,
1238 WLAN_EID_SSID);
1239 - if (!ssid_ie)
1240 + if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
1241 return -EINVAL;
1242
1243 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
1244 diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
1245 index 3cda1f956f0b..6378dfd3b4e8 100644
1246 --- a/drivers/net/wireless/mwifiex/join.c
1247 +++ b/drivers/net/wireless/mwifiex/join.c
1248 @@ -661,9 +661,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
1249 priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
1250 sizeof(priv->assoc_rsp_buf));
1251
1252 - memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
1253 -
1254 assoc_rsp->a_id = cpu_to_le16(aid);
1255 + memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
1256
1257 if (status_code) {
1258 priv->adapter->dbg.num_cmd_assoc_failure++;
1259 diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c
1260 index 1fea2c7ef97f..6fc31bdc639b 100644
1261 --- a/drivers/power/bq24257_charger.c
1262 +++ b/drivers/power/bq24257_charger.c
1263 @@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client,
1264 return ret;
1265 }
1266
1267 + ret = bq24257_power_supply_init(bq);
1268 + if (ret < 0) {
1269 + dev_err(dev, "Failed to register power supply\n");
1270 + return ret;
1271 + }
1272 +
1273 ret = devm_request_threaded_irq(dev, client->irq, NULL,
1274 bq24257_irq_handler_thread,
1275 IRQF_TRIGGER_FALLING |
1276 @@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client,
1277 return ret;
1278 }
1279
1280 - ret = bq24257_power_supply_init(bq);
1281 - if (ret < 0) {
1282 - dev_err(dev, "Failed to register power supply\n");
1283 - return ret;
1284 - }
1285 -
1286 ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group);
1287 if (ret < 0) {
1288 dev_err(dev, "Can't create sysfs entries\n");
1289 diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
1290 index 7c511add5aa7..bae98521c808 100644
1291 --- a/drivers/s390/char/con3270.c
1292 +++ b/drivers/s390/char/con3270.c
1293 @@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp)
1294 static void
1295 con3270_update_string(struct con3270 *cp, struct string *s, int nr)
1296 {
1297 - if (s->len >= cp->view.cols - 5)
1298 + if (s->len < 4) {
1299 + /* This indicates a bug, but printing a warning would
1300 + * cause a deadlock. */
1301 + return;
1302 + }
1303 + if (s->string[s->len - 4] != TO_RA)
1304 return;
1305 raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
1306 cp->view.cols * (nr + 1));
1307 @@ -461,11 +466,11 @@ con3270_cline_end(struct con3270 *cp)
1308 cp->cline->len + 4 : cp->view.cols;
1309 s = con3270_alloc_string(cp, size);
1310 memcpy(s->string, cp->cline->string, cp->cline->len);
1311 - if (s->len < cp->view.cols - 5) {
1312 + if (cp->cline->len < cp->view.cols - 5) {
1313 s->string[s->len - 4] = TO_RA;
1314 s->string[s->len - 1] = 0;
1315 } else {
1316 - while (--size > cp->cline->len)
1317 + while (--size >= cp->cline->len)
1318 s->string[size] = cp->view.ascebc[' '];
1319 }
1320 /* Replace cline with allocated line s and reset cline. */
1321 diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
1322 index c424c0c7367e..1e16331891a9 100644
1323 --- a/drivers/s390/cio/chsc.c
1324 +++ b/drivers/s390/cio/chsc.c
1325 @@ -95,12 +95,13 @@ struct chsc_ssd_area {
1326 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
1327 {
1328 struct chsc_ssd_area *ssd_area;
1329 + unsigned long flags;
1330 int ccode;
1331 int ret;
1332 int i;
1333 int mask;
1334
1335 - spin_lock_irq(&chsc_page_lock);
1336 + spin_lock_irqsave(&chsc_page_lock, flags);
1337 memset(chsc_page, 0, PAGE_SIZE);
1338 ssd_area = chsc_page;
1339 ssd_area->request.length = 0x0010;
1340 @@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
1341 ssd->fla[i] = ssd_area->fla[i];
1342 }
1343 out:
1344 - spin_unlock_irq(&chsc_page_lock);
1345 + spin_unlock_irqrestore(&chsc_page_lock, flags);
1346 return ret;
1347 }
1348
1349 @@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
1350 u32 fmt : 4;
1351 u32 : 16;
1352 } __attribute__ ((packed)) *secm_area;
1353 + unsigned long flags;
1354 int ret, ccode;
1355
1356 - spin_lock_irq(&chsc_page_lock);
1357 + spin_lock_irqsave(&chsc_page_lock, flags);
1358 memset(chsc_page, 0, PAGE_SIZE);
1359 secm_area = chsc_page;
1360 secm_area->request.length = 0x0050;
1361 @@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
1362 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
1363 secm_area->response.code);
1364 out:
1365 - spin_unlock_irq(&chsc_page_lock);
1366 + spin_unlock_irqrestore(&chsc_page_lock, flags);
1367 return ret;
1368 }
1369
1370 @@ -993,6 +995,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1371
1372 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1373 {
1374 + unsigned long flags;
1375 int ccode, ret;
1376
1377 struct {
1378 @@ -1022,7 +1025,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
1379 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1380 return 0;
1381
1382 - spin_lock_irq(&chsc_page_lock);
1383 + spin_lock_irqsave(&chsc_page_lock, flags);
1384 memset(chsc_page, 0, PAGE_SIZE);
1385 scmc_area = chsc_page;
1386 scmc_area->request.length = 0x0010;
1387 @@ -1054,7 +1057,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
1388 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1389 (struct cmg_chars *) &scmc_area->data);
1390 out:
1391 - spin_unlock_irq(&chsc_page_lock);
1392 + spin_unlock_irqrestore(&chsc_page_lock, flags);
1393 return ret;
1394 }
1395
1396 @@ -1135,6 +1138,7 @@ struct css_chsc_char css_chsc_characteristics;
1397 int __init
1398 chsc_determine_css_characteristics(void)
1399 {
1400 + unsigned long flags;
1401 int result;
1402 struct {
1403 struct chsc_header request;
1404 @@ -1147,7 +1151,7 @@ chsc_determine_css_characteristics(void)
1405 u32 chsc_char[508];
1406 } __attribute__ ((packed)) *scsc_area;
1407
1408 - spin_lock_irq(&chsc_page_lock);
1409 + spin_lock_irqsave(&chsc_page_lock, flags);
1410 memset(chsc_page, 0, PAGE_SIZE);
1411 scsc_area = chsc_page;
1412 scsc_area->request.length = 0x0010;
1413 @@ -1169,7 +1173,7 @@ chsc_determine_css_characteristics(void)
1414 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1415 scsc_area->response.code);
1416 exit:
1417 - spin_unlock_irq(&chsc_page_lock);
1418 + spin_unlock_irqrestore(&chsc_page_lock, flags);
1419 return result;
1420 }
1421
1422 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1423 index 6180f7970bbf..0969cea1089a 100644
1424 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1425 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1426 @@ -4510,7 +4510,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
1427 le16_to_cpu(mpi_reply->DevHandle));
1428 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
1429
1430 - if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
1431 + if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
1432 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
1433 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
1434 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
1435 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
1436 index 39412c9097c6..a3965cac1b34 100644
1437 --- a/drivers/spi/spi-fsl-dspi.c
1438 +++ b/drivers/spi/spi-fsl-dspi.c
1439 @@ -753,7 +753,6 @@ static int dspi_remove(struct platform_device *pdev)
1440 /* Disconnect from the SPI framework */
1441 clk_disable_unprepare(dspi->clk);
1442 spi_unregister_master(dspi->master);
1443 - spi_master_put(dspi->master);
1444
1445 return 0;
1446 }
1447 diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
1448 index 9b7026e7d55b..45d0a87f55d2 100644
1449 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
1450 +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
1451 @@ -718,13 +718,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
1452 u8 res = _SUCCESS;
1453
1454
1455 - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
1456 + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1457 if (ph2c == NULL) {
1458 res = _FAIL;
1459 goto exit;
1460 }
1461
1462 - paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
1463 + paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
1464 if (paddbareq_parm == NULL) {
1465 kfree(ph2c);
1466 res = _FAIL;
1467 diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
1468 index 915facbf552e..e1134a4d97f3 100644
1469 --- a/drivers/uio/uio_dmem_genirq.c
1470 +++ b/drivers/uio/uio_dmem_genirq.c
1471 @@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
1472 ++uiomem;
1473 }
1474
1475 - priv->dmem_region_start = i;
1476 + priv->dmem_region_start = uiomem - &uioinfo->mem[0];
1477 priv->num_dmem_regions = pdata->num_dynamic_regions;
1478
1479 for (i = 0; i < pdata->num_dynamic_regions; ++i) {
1480 diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
1481 index 531e76474983..0e0eb10f82a0 100644
1482 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
1483 +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
1484 @@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
1485 rc = -ENOMEM;
1486 goto out;
1487 }
1488 - } else {
1489 + } else if (msg_type == XS_TRANSACTION_END) {
1490 list_for_each_entry(trans, &u->transactions, list)
1491 if (trans->handle.id == u->u.msg.tx_id)
1492 break;
1493 diff --git a/fs/9p/acl.c b/fs/9p/acl.c
1494 index a7e28890f5ef..929b618da43b 100644
1495 --- a/fs/9p/acl.c
1496 +++ b/fs/9p/acl.c
1497 @@ -282,32 +282,26 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
1498 switch (handler->flags) {
1499 case ACL_TYPE_ACCESS:
1500 if (acl) {
1501 - umode_t mode = inode->i_mode;
1502 - retval = posix_acl_equiv_mode(acl, &mode);
1503 - if (retval < 0)
1504 + struct iattr iattr;
1505 +
1506 + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
1507 + if (retval)
1508 goto err_out;
1509 - else {
1510 - struct iattr iattr;
1511 - if (retval == 0) {
1512 - /*
1513 - * ACL can be represented
1514 - * by the mode bits. So don't
1515 - * update ACL.
1516 - */
1517 - acl = NULL;
1518 - value = NULL;
1519 - size = 0;
1520 - }
1521 - /* Updte the mode bits */
1522 - iattr.ia_mode = ((mode & S_IALLUGO) |
1523 - (inode->i_mode & ~S_IALLUGO));
1524 - iattr.ia_valid = ATTR_MODE;
1525 - /* FIXME should we update ctime ?
1526 - * What is the following setxattr update the
1527 - * mode ?
1528 + if (!acl) {
1529 + /*
1530 + * ACL can be represented
1531 + * by the mode bits. So don't
1532 + * update ACL.
1533 */
1534 - v9fs_vfs_setattr_dotl(dentry, &iattr);
1535 + value = NULL;
1536 + size = 0;
1537 }
1538 + iattr.ia_valid = ATTR_MODE;
1539 + /* FIXME should we update ctime ?
1540 + * What is the following setxattr update the
1541 + * mode ?
1542 + */
1543 + v9fs_vfs_setattr_dotl(dentry, &iattr);
1544 }
1545 break;
1546 case ACL_TYPE_DEFAULT:
1547 diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
1548 index 9a0124a95851..fb3e64d37cb4 100644
1549 --- a/fs/btrfs/acl.c
1550 +++ b/fs/btrfs/acl.c
1551 @@ -83,11 +83,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
1552 case ACL_TYPE_ACCESS:
1553 name = POSIX_ACL_XATTR_ACCESS;
1554 if (acl) {
1555 - ret = posix_acl_equiv_mode(acl, &inode->i_mode);
1556 - if (ret < 0)
1557 + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1558 + if (ret)
1559 return ret;
1560 - if (ret == 0)
1561 - acl = NULL;
1562 }
1563 ret = 0;
1564 break;
1565 diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
1566 index 8f84646f10e9..4d8caeb94a11 100644
1567 --- a/fs/ceph/acl.c
1568 +++ b/fs/ceph/acl.c
1569 @@ -94,11 +94,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1570 case ACL_TYPE_ACCESS:
1571 name = POSIX_ACL_XATTR_ACCESS;
1572 if (acl) {
1573 - ret = posix_acl_equiv_mode(acl, &new_mode);
1574 - if (ret < 0)
1575 + ret = posix_acl_update_mode(inode, &new_mode, &acl);
1576 + if (ret)
1577 goto out;
1578 - if (ret == 0)
1579 - acl = NULL;
1580 }
1581 break;
1582 case ACL_TYPE_DEFAULT:
1583 diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
1584 index 27695e6f4e46..d6aeb84e90b6 100644
1585 --- a/fs/ext2/acl.c
1586 +++ b/fs/ext2/acl.c
1587 @@ -193,15 +193,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1588 case ACL_TYPE_ACCESS:
1589 name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
1590 if (acl) {
1591 - error = posix_acl_equiv_mode(acl, &inode->i_mode);
1592 - if (error < 0)
1593 + error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1594 + if (error)
1595 return error;
1596 - else {
1597 - inode->i_ctime = CURRENT_TIME_SEC;
1598 - mark_inode_dirty(inode);
1599 - if (error == 0)
1600 - acl = NULL;
1601 - }
1602 + inode->i_ctime = CURRENT_TIME_SEC;
1603 + mark_inode_dirty(inode);
1604 }
1605 break;
1606
1607 diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
1608 index 69b1e73026a5..c3fe1e323951 100644
1609 --- a/fs/ext4/acl.c
1610 +++ b/fs/ext4/acl.c
1611 @@ -196,15 +196,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
1612 case ACL_TYPE_ACCESS:
1613 name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
1614 if (acl) {
1615 - error = posix_acl_equiv_mode(acl, &inode->i_mode);
1616 - if (error < 0)
1617 + error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1618 + if (error)
1619 return error;
1620 - else {
1621 - inode->i_ctime = ext4_current_time(inode);
1622 - ext4_mark_inode_dirty(handle, inode);
1623 - if (error == 0)
1624 - acl = NULL;
1625 - }
1626 + inode->i_ctime = ext4_current_time(inode);
1627 + ext4_mark_inode_dirty(handle, inode);
1628 }
1629 break;
1630
1631 diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
1632 index c8f25f7241f0..e9a8d676c6bc 100644
1633 --- a/fs/f2fs/acl.c
1634 +++ b/fs/f2fs/acl.c
1635 @@ -214,12 +214,10 @@ static int __f2fs_set_acl(struct inode *inode, int type,
1636 case ACL_TYPE_ACCESS:
1637 name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
1638 if (acl) {
1639 - error = posix_acl_equiv_mode(acl, &inode->i_mode);
1640 - if (error < 0)
1641 + error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1642 + if (error)
1643 return error;
1644 set_acl_inode(fi, inode->i_mode);
1645 - if (error == 0)
1646 - acl = NULL;
1647 }
1648 break;
1649
1650 diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
1651 index 1be3b061c05c..ff0ac96a8e7b 100644
1652 --- a/fs/gfs2/acl.c
1653 +++ b/fs/gfs2/acl.c
1654 @@ -79,17 +79,11 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1655 if (type == ACL_TYPE_ACCESS) {
1656 umode_t mode = inode->i_mode;
1657
1658 - error = posix_acl_equiv_mode(acl, &mode);
1659 - if (error < 0)
1660 + error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1661 + if (error)
1662 return error;
1663 -
1664 - if (error == 0)
1665 - acl = NULL;
1666 -
1667 - if (mode != inode->i_mode) {
1668 - inode->i_mode = mode;
1669 + if (mode != inode->i_mode)
1670 mark_inode_dirty(inode);
1671 - }
1672 }
1673
1674 if (acl) {
1675 diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
1676 index df0c9af68d05..71b3087b7e32 100644
1677 --- a/fs/hfsplus/posix_acl.c
1678 +++ b/fs/hfsplus/posix_acl.c
1679 @@ -68,8 +68,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
1680 case ACL_TYPE_ACCESS:
1681 xattr_name = POSIX_ACL_XATTR_ACCESS;
1682 if (acl) {
1683 - err = posix_acl_equiv_mode(acl, &inode->i_mode);
1684 - if (err < 0)
1685 + err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1686 + if (err)
1687 return err;
1688 }
1689 err = 0;
1690 diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
1691 index 2f7a3c090489..f9f86f87d32b 100644
1692 --- a/fs/jffs2/acl.c
1693 +++ b/fs/jffs2/acl.c
1694 @@ -235,9 +235,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1695 case ACL_TYPE_ACCESS:
1696 xprefix = JFFS2_XPREFIX_ACL_ACCESS;
1697 if (acl) {
1698 - umode_t mode = inode->i_mode;
1699 - rc = posix_acl_equiv_mode(acl, &mode);
1700 - if (rc < 0)
1701 + umode_t mode;
1702 +
1703 + rc = posix_acl_update_mode(inode, &mode, &acl);
1704 + if (rc)
1705 return rc;
1706 if (inode->i_mode != mode) {
1707 struct iattr attr;
1708 @@ -249,8 +250,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1709 if (rc < 0)
1710 return rc;
1711 }
1712 - if (rc == 0)
1713 - acl = NULL;
1714 }
1715 break;
1716 case ACL_TYPE_DEFAULT:
1717 diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
1718 index 0c8ca830b113..9fad9f4fe883 100644
1719 --- a/fs/jfs/acl.c
1720 +++ b/fs/jfs/acl.c
1721 @@ -84,13 +84,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
1722 case ACL_TYPE_ACCESS:
1723 ea_name = POSIX_ACL_XATTR_ACCESS;
1724 if (acl) {
1725 - rc = posix_acl_equiv_mode(acl, &inode->i_mode);
1726 - if (rc < 0)
1727 + rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1728 + if (rc)
1729 return rc;
1730 inode->i_ctime = CURRENT_TIME;
1731 mark_inode_dirty(inode);
1732 - if (rc == 0)
1733 - acl = NULL;
1734 }
1735 break;
1736 case ACL_TYPE_DEFAULT:
1737 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
1738 index 2162434728c0..164307b99405 100644
1739 --- a/fs/ocfs2/acl.c
1740 +++ b/fs/ocfs2/acl.c
1741 @@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle,
1742 case ACL_TYPE_ACCESS:
1743 name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
1744 if (acl) {
1745 - umode_t mode = inode->i_mode;
1746 - ret = posix_acl_equiv_mode(acl, &mode);
1747 - if (ret < 0)
1748 - return ret;
1749 + umode_t mode;
1750
1751 - if (ret == 0)
1752 - acl = NULL;
1753 + ret = posix_acl_update_mode(inode, &mode, &acl);
1754 + if (ret)
1755 + return ret;
1756
1757 ret = ocfs2_acl_set_mode(inode, di_bh,
1758 handle, mode);
1759 diff --git a/fs/posix_acl.c b/fs/posix_acl.c
1760 index 34bd1bd354e6..a60d3cc5b55d 100644
1761 --- a/fs/posix_acl.c
1762 +++ b/fs/posix_acl.c
1763 @@ -592,6 +592,37 @@ no_mem:
1764 }
1765 EXPORT_SYMBOL_GPL(posix_acl_create);
1766
1767 +/**
1768 + * posix_acl_update_mode - update mode in set_acl
1769 + *
1770 + * Update the file mode when setting an ACL: compute the new file permission
1771 + * bits based on the ACL. In addition, if the ACL is equivalent to the new
1772 + * file mode, set *acl to NULL to indicate that no ACL should be set.
1773 + *
1774 + * As with chmod, clear the setgit bit if the caller is not in the owning group
1775 + * or capable of CAP_FSETID (see inode_change_ok).
1776 + *
1777 + * Called from set_acl inode operations.
1778 + */
1779 +int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
1780 + struct posix_acl **acl)
1781 +{
1782 + umode_t mode = inode->i_mode;
1783 + int error;
1784 +
1785 + error = posix_acl_equiv_mode(*acl, &mode);
1786 + if (error < 0)
1787 + return error;
1788 + if (error == 0)
1789 + *acl = NULL;
1790 + if (!in_group_p(inode->i_gid) &&
1791 + !capable_wrt_inode_uidgid(inode, CAP_FSETID))
1792 + mode &= ~S_ISGID;
1793 + *mode_p = mode;
1794 + return 0;
1795 +}
1796 +EXPORT_SYMBOL(posix_acl_update_mode);
1797 +
1798 /*
1799 * Fix up the uids and gids in posix acl extended attributes in place.
1800 */
1801 diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
1802 index 4b34b9dc03dd..9b1824f35501 100644
1803 --- a/fs/reiserfs/xattr_acl.c
1804 +++ b/fs/reiserfs/xattr_acl.c
1805 @@ -246,13 +246,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
1806 case ACL_TYPE_ACCESS:
1807 name = POSIX_ACL_XATTR_ACCESS;
1808 if (acl) {
1809 - error = posix_acl_equiv_mode(acl, &inode->i_mode);
1810 - if (error < 0)
1811 + error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1812 + if (error)
1813 return error;
1814 - else {
1815 - if (error == 0)
1816 - acl = NULL;
1817 - }
1818 }
1819 break;
1820 case ACL_TYPE_DEFAULT:
1821 diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
1822 index 6bb470fbb8e8..c5101a3295d8 100644
1823 --- a/fs/xfs/xfs_acl.c
1824 +++ b/fs/xfs/xfs_acl.c
1825 @@ -288,16 +288,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1826 return error;
1827
1828 if (type == ACL_TYPE_ACCESS) {
1829 - umode_t mode = inode->i_mode;
1830 - error = posix_acl_equiv_mode(acl, &mode);
1831 -
1832 - if (error <= 0) {
1833 - acl = NULL;
1834 -
1835 - if (error < 0)
1836 - return error;
1837 - }
1838 + umode_t mode;
1839
1840 + error = posix_acl_update_mode(inode, &mode, &acl);
1841 + if (error)
1842 + return error;
1843 error = xfs_set_mode(inode, mode);
1844 if (error)
1845 return error;
1846 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
1847 index 0a271ca1f7c7..a31976c860f6 100644
1848 --- a/include/drm/drmP.h
1849 +++ b/include/drm/drmP.h
1850 @@ -1029,7 +1029,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
1851 #endif
1852
1853 extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
1854 - struct drm_gem_object *obj, int flags);
1855 + struct drm_gem_object *obj,
1856 + int flags);
1857 extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
1858 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
1859 int *prime_fd);
1860 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1861 index 4e9c75226f07..12b4d54a8ffa 100644
1862 --- a/include/linux/netdevice.h
1863 +++ b/include/linux/netdevice.h
1864 @@ -1986,8 +1986,8 @@ struct napi_gro_cb {
1865 /* This is non-zero if the packet may be of the same flow. */
1866 u8 same_flow:1;
1867
1868 - /* Used in udp_gro_receive */
1869 - u8 udp_mark:1;
1870 + /* Used in tunnel GRO receive */
1871 + u8 encap_mark:1;
1872
1873 /* GRO checksum is valid */
1874 u8 csum_valid:1;
1875 diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
1876 index 3e96a6a76103..d1a8ad7e5ae4 100644
1877 --- a/include/linux/posix_acl.h
1878 +++ b/include/linux/posix_acl.h
1879 @@ -95,6 +95,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *);
1880 extern int posix_acl_chmod(struct inode *, umode_t);
1881 extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
1882 struct posix_acl **);
1883 +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
1884
1885 extern int simple_set_acl(struct inode *, struct posix_acl *, int);
1886 extern int simple_acl_create(struct inode *, struct inode *);
1887 diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
1888 index af40bc586a1b..86a7bdd61d1a 100644
1889 --- a/include/net/ip_tunnels.h
1890 +++ b/include/net/ip_tunnels.h
1891 @@ -283,6 +283,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
1892 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
1893 int gso_type_mask);
1894
1895 +static inline int iptunnel_pull_offloads(struct sk_buff *skb)
1896 +{
1897 + if (skb_is_gso(skb)) {
1898 + int err;
1899 +
1900 + err = skb_unclone(skb, GFP_ATOMIC);
1901 + if (unlikely(err))
1902 + return err;
1903 + skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
1904 + NETIF_F_GSO_SHIFT);
1905 + }
1906 +
1907 + skb->encapsulation = 0;
1908 + return 0;
1909 +}
1910 +
1911 static inline void iptunnel_xmit_stats(int err,
1912 struct net_device_stats *err_stats,
1913 struct pcpu_sw_netstats __percpu *stats)
1914 diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
1915 index abd286afbd27..a4775f3451b9 100644
1916 --- a/kernel/irq/generic-chip.c
1917 +++ b/kernel/irq/generic-chip.c
1918 @@ -411,8 +411,29 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1919 }
1920 EXPORT_SYMBOL_GPL(irq_map_generic_chip);
1921
1922 +static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
1923 +{
1924 + struct irq_data *data = irq_domain_get_irq_data(d, virq);
1925 + struct irq_domain_chip_generic *dgc = d->gc;
1926 + unsigned int hw_irq = data->hwirq;
1927 + struct irq_chip_generic *gc;
1928 + int irq_idx;
1929 +
1930 + gc = irq_get_domain_generic_chip(d, hw_irq);
1931 + if (!gc)
1932 + return;
1933 +
1934 + irq_idx = hw_irq % dgc->irqs_per_chip;
1935 +
1936 + clear_bit(irq_idx, &gc->installed);
1937 + irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
1938 + NULL);
1939 +
1940 +}
1941 +
1942 struct irq_domain_ops irq_generic_chip_ops = {
1943 .map = irq_map_generic_chip,
1944 + .unmap = irq_unmap_generic_chip,
1945 .xlate = irq_domain_xlate_onetwocell,
1946 };
1947 EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
1948 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1949 index 125c7dd55322..4434cdd4cd9a 100644
1950 --- a/mm/hugetlb.c
1951 +++ b/mm/hugetlb.c
1952 @@ -1416,12 +1416,13 @@ static void dissolve_free_huge_page(struct page *page)
1953 {
1954 spin_lock(&hugetlb_lock);
1955 if (PageHuge(page) && !page_count(page)) {
1956 - struct hstate *h = page_hstate(page);
1957 - int nid = page_to_nid(page);
1958 - list_del(&page->lru);
1959 + struct page *head = compound_head(page);
1960 + struct hstate *h = page_hstate(head);
1961 + int nid = page_to_nid(head);
1962 + list_del(&head->lru);
1963 h->free_huge_pages--;
1964 h->free_huge_pages_node[nid]--;
1965 - update_and_free_page(h, page);
1966 + update_and_free_page(h, head);
1967 }
1968 spin_unlock(&hugetlb_lock);
1969 }
1970 @@ -1429,7 +1430,8 @@ static void dissolve_free_huge_page(struct page *page)
1971 /*
1972 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1973 * make specified memory blocks removable from the system.
1974 - * Note that start_pfn should aligned with (minimum) hugepage size.
1975 + * Note that this will dissolve a free gigantic hugepage completely, if any
1976 + * part of it lies within the given range.
1977 */
1978 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1979 {
1980 @@ -1438,7 +1440,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1981 if (!hugepages_supported())
1982 return;
1983
1984 - VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1985 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1986 dissolve_free_huge_page(pfn_to_page(pfn));
1987 }
1988 diff --git a/net/core/dev.c b/net/core/dev.c
1989 index de4ed2b5a221..0989fea88c44 100644
1990 --- a/net/core/dev.c
1991 +++ b/net/core/dev.c
1992 @@ -4239,7 +4239,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
1993 NAPI_GRO_CB(skb)->same_flow = 0;
1994 NAPI_GRO_CB(skb)->flush = 0;
1995 NAPI_GRO_CB(skb)->free = 0;
1996 - NAPI_GRO_CB(skb)->udp_mark = 0;
1997 + NAPI_GRO_CB(skb)->encap_mark = 0;
1998 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
1999
2000 /* Setup for GRO checksum validation */
2001 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
2002 index 5c5db6636704..1a5c1ca3ad3c 100644
2003 --- a/net/ipv4/af_inet.c
2004 +++ b/net/ipv4/af_inet.c
2005 @@ -1383,6 +1383,19 @@ out:
2006 return pp;
2007 }
2008
2009 +static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
2010 + struct sk_buff *skb)
2011 +{
2012 + if (NAPI_GRO_CB(skb)->encap_mark) {
2013 + NAPI_GRO_CB(skb)->flush = 1;
2014 + return NULL;
2015 + }
2016 +
2017 + NAPI_GRO_CB(skb)->encap_mark = 1;
2018 +
2019 + return inet_gro_receive(head, skb);
2020 +}
2021 +
2022 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
2023 {
2024 if (sk->sk_family == AF_INET)
2025 @@ -1425,6 +1438,13 @@ out_unlock:
2026 return err;
2027 }
2028
2029 +static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
2030 +{
2031 + skb->encapsulation = 1;
2032 + skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP;
2033 + return inet_gro_complete(skb, nhoff);
2034 +}
2035 +
2036 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
2037 unsigned short type, unsigned char protocol,
2038 struct net *net)
2039 @@ -1652,8 +1672,8 @@ static struct packet_offload ip_packet_offload __read_mostly = {
2040 static const struct net_offload ipip_offload = {
2041 .callbacks = {
2042 .gso_segment = inet_gso_segment,
2043 - .gro_receive = inet_gro_receive,
2044 - .gro_complete = inet_gro_complete,
2045 + .gro_receive = ipip_gro_receive,
2046 + .gro_complete = ipip_gro_complete,
2047 },
2048 };
2049
2050 diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
2051 index bd903fe0f750..08d7de55e57e 100644
2052 --- a/net/ipv4/fou.c
2053 +++ b/net/ipv4/fou.c
2054 @@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk)
2055 return sk->sk_user_data;
2056 }
2057
2058 -static void fou_recv_pull(struct sk_buff *skb, size_t len)
2059 +static int fou_recv_pull(struct sk_buff *skb, size_t len)
2060 {
2061 struct iphdr *iph = ip_hdr(skb);
2062
2063 @@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len)
2064 __skb_pull(skb, len);
2065 skb_postpull_rcsum(skb, udp_hdr(skb), len);
2066 skb_reset_transport_header(skb);
2067 + return iptunnel_pull_offloads(skb);
2068 }
2069
2070 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
2071 @@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
2072 if (!fou)
2073 return 1;
2074
2075 - fou_recv_pull(skb, sizeof(struct udphdr));
2076 + if (fou_recv_pull(skb, sizeof(struct udphdr)))
2077 + goto drop;
2078
2079 return -fou->protocol;
2080 +
2081 +drop:
2082 + kfree_skb(skb);
2083 + return 0;
2084 }
2085
2086 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
2087 @@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
2088 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
2089 skb_reset_transport_header(skb);
2090
2091 + if (iptunnel_pull_offloads(skb))
2092 + goto drop;
2093 +
2094 return -guehdr->proto_ctype;
2095
2096 drop:
2097 diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
2098 index 5a8ee3282550..e603004c1af8 100644
2099 --- a/net/ipv4/gre_offload.c
2100 +++ b/net/ipv4/gre_offload.c
2101 @@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
2102 struct packet_offload *ptype;
2103 __be16 type;
2104
2105 + if (NAPI_GRO_CB(skb)->encap_mark)
2106 + goto out;
2107 +
2108 + NAPI_GRO_CB(skb)->encap_mark = 1;
2109 +
2110 off = skb_gro_offset(skb);
2111 hlen = off + sizeof(*greh);
2112 greh = skb_gro_header_fast(skb, off);
2113 diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
2114 index 6cb9009c3d96..dbda0565781c 100644
2115 --- a/net/ipv4/ip_tunnel_core.c
2116 +++ b/net/ipv4/ip_tunnel_core.c
2117 @@ -116,7 +116,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
2118 skb->vlan_tci = 0;
2119 skb_set_queue_mapping(skb, 0);
2120 skb->pkt_type = PACKET_HOST;
2121 - return 0;
2122 +
2123 + return iptunnel_pull_offloads(skb);
2124 }
2125 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
2126
2127 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
2128 index f9386160cbee..0e36e56dfd22 100644
2129 --- a/net/ipv4/udp_offload.c
2130 +++ b/net/ipv4/udp_offload.c
2131 @@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
2132 unsigned int off = skb_gro_offset(skb);
2133 int flush = 1;
2134
2135 - if (NAPI_GRO_CB(skb)->udp_mark ||
2136 + if (NAPI_GRO_CB(skb)->encap_mark ||
2137 (skb->ip_summed != CHECKSUM_PARTIAL &&
2138 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2139 !NAPI_GRO_CB(skb)->csum_valid))
2140 goto out;
2141
2142 - /* mark that this skb passed once through the udp gro layer */
2143 - NAPI_GRO_CB(skb)->udp_mark = 1;
2144 + /* mark that this skb passed once through the tunnel gro layer */
2145 + NAPI_GRO_CB(skb)->encap_mark = 1;
2146
2147 rcu_read_lock();
2148 uo_priv = rcu_dereference(udp_offload_base);
2149 diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
2150 index eeca943f12dc..82e9f3076028 100644
2151 --- a/net/ipv6/ip6_offload.c
2152 +++ b/net/ipv6/ip6_offload.c
2153 @@ -258,6 +258,19 @@ out:
2154 return pp;
2155 }
2156
2157 +static struct sk_buff **sit_gro_receive(struct sk_buff **head,
2158 + struct sk_buff *skb)
2159 +{
2160 + if (NAPI_GRO_CB(skb)->encap_mark) {
2161 + NAPI_GRO_CB(skb)->flush = 1;
2162 + return NULL;
2163 + }
2164 +
2165 + NAPI_GRO_CB(skb)->encap_mark = 1;
2166 +
2167 + return ipv6_gro_receive(head, skb);
2168 +}
2169 +
2170 static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
2171 {
2172 const struct net_offload *ops;
2173 @@ -302,7 +315,7 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
2174 static const struct net_offload sit_offload = {
2175 .callbacks = {
2176 .gso_segment = ipv6_gso_segment,
2177 - .gro_receive = ipv6_gro_receive,
2178 + .gro_receive = sit_gro_receive,
2179 .gro_complete = sit_gro_complete,
2180 },
2181 };
2182 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2183 index ba3d2f3d66d2..3da2b16356eb 100644
2184 --- a/net/ipv6/sit.c
2185 +++ b/net/ipv6/sit.c
2186 @@ -681,14 +681,15 @@ static int ipip6_rcv(struct sk_buff *skb)
2187 skb->mac_header = skb->network_header;
2188 skb_reset_network_header(skb);
2189 IPCB(skb)->flags = 0;
2190 - skb->protocol = htons(ETH_P_IPV6);
2191 + skb->dev = tunnel->dev;
2192
2193 if (packet_is_spoofed(skb, iph, tunnel)) {
2194 tunnel->dev->stats.rx_errors++;
2195 goto out;
2196 }
2197
2198 - __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
2199 + if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
2200 + goto out;
2201
2202 err = IP_ECN_decapsulate(iph, skb);
2203 if (unlikely(err)) {
2204 diff --git a/scripts/sortextable.c b/scripts/sortextable.c
2205 index c2423d913b46..7b29fb14f870 100644
2206 --- a/scripts/sortextable.c
2207 +++ b/scripts/sortextable.c
2208 @@ -209,6 +209,35 @@ static int compare_relative_table(const void *a, const void *b)
2209 return 0;
2210 }
2211
2212 +static void x86_sort_relative_table(char *extab_image, int image_size)
2213 +{
2214 + int i;
2215 +
2216 + i = 0;
2217 + while (i < image_size) {
2218 + uint32_t *loc = (uint32_t *)(extab_image + i);
2219 +
2220 + w(r(loc) + i, loc);
2221 + w(r(loc + 1) + i + 4, loc + 1);
2222 + w(r(loc + 2) + i + 8, loc + 2);
2223 +
2224 + i += sizeof(uint32_t) * 3;
2225 + }
2226 +
2227 + qsort(extab_image, image_size / 12, 12, compare_relative_table);
2228 +
2229 + i = 0;
2230 + while (i < image_size) {
2231 + uint32_t *loc = (uint32_t *)(extab_image + i);
2232 +
2233 + w(r(loc) - i, loc);
2234 + w(r(loc + 1) - (i + 4), loc + 1);
2235 + w(r(loc + 2) - (i + 8), loc + 2);
2236 +
2237 + i += sizeof(uint32_t) * 3;
2238 + }
2239 +}
2240 +
2241 static void sort_relative_table(char *extab_image, int image_size)
2242 {
2243 int i;
2244 @@ -281,6 +310,9 @@ do_file(char const *const fname)
2245 break;
2246 case EM_386:
2247 case EM_X86_64:
2248 + custom_sort = x86_sort_relative_table;
2249 + break;
2250 +
2251 case EM_S390:
2252 custom_sort = sort_relative_table;
2253 break;
2254 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2255 index afb70a5d4fd3..b8a256dfed7e 100644
2256 --- a/sound/soc/soc-dapm.c
2257 +++ b/sound/soc/soc-dapm.c
2258 @@ -823,6 +823,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
2259 case snd_soc_dapm_switch:
2260 case snd_soc_dapm_mixer:
2261 case snd_soc_dapm_pga:
2262 + case snd_soc_dapm_out_drv:
2263 wname_in_long_name = true;
2264 kcname_in_long_name = true;
2265 break;
2266 @@ -3015,6 +3016,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
2267 }
2268 mutex_unlock(&card->dapm_mutex);
2269
2270 + if (ret)
2271 + return ret;
2272 +
2273 if (invert)
2274 ucontrol->value.integer.value[0] = max - val;
2275 else
2276 @@ -3166,7 +3170,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
2277 if (e->shift_l != e->shift_r) {
2278 if (item[1] > e->items)
2279 return -EINVAL;
2280 - val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l;
2281 + val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
2282 mask |= e->mask << e->shift_r;
2283 }
2284
2285 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
2286 index 6963ba20991c..70396d3f6472 100644
2287 --- a/sound/soc/soc-topology.c
2288 +++ b/sound/soc/soc-topology.c
2289 @@ -1484,6 +1484,7 @@ widget:
2290 if (widget == NULL) {
2291 dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
2292 w->name);
2293 + ret = -ENOMEM;
2294 goto hdr_err;
2295 }
2296
2297 diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
2298 index 3900386a3629..d802938644b5 100644
2299 --- a/tools/perf/ui/browsers/hists.c
2300 +++ b/tools/perf/ui/browsers/hists.c
2301 @@ -684,7 +684,6 @@ static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
2302 ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
2303 ui_browser__printf(arg->b, "%s", hpp->buf);
2304
2305 - advance_hpp(hpp, ret);
2306 return ret;
2307 }
2308
2309 diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
2310 index 4a3a72cb5805..6ce624cb7001 100644
2311 --- a/tools/perf/util/stat.c
2312 +++ b/tools/perf/util/stat.c
2313 @@ -311,6 +311,16 @@ int perf_stat_process_counter(struct perf_stat_config *config,
2314
2315 aggr->val = aggr->ena = aggr->run = 0;
2316
2317 + /*
2318 + * We calculate counter's data every interval,
2319 + * and the display code shows ps->res_stats
2320 + * avg value. We need to zero the stats for
2321 + * interval mode, otherwise overall avg running
2322 + * averages will be shown for each interval.
2323 + */
2324 + if (config->interval)
2325 + init_stats(ps->res_stats);
2326 +
2327 if (counter->per_pkg)
2328 zero_per_pkg(counter);
2329
2330 diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
2331 index 475d88d0a1c9..27ae382feb2d 100644
2332 --- a/tools/perf/util/symbol-elf.c
2333 +++ b/tools/perf/util/symbol-elf.c
2334 @@ -1091,9 +1091,8 @@ new_symbol:
2335 * For misannotated, zeroed, ASM function sizes.
2336 */
2337 if (nr > 0) {
2338 - if (!symbol_conf.allow_aliases)
2339 - symbols__fixup_duplicate(&dso->symbols[map->type]);
2340 symbols__fixup_end(&dso->symbols[map->type]);
2341 + symbols__fixup_duplicate(&dso->symbols[map->type]);
2342 if (kmap) {
2343 /*
2344 * We need to fixup this here too because we create new
2345 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
2346 index cd08027a6d2c..520a32a12f8a 100644
2347 --- a/tools/perf/util/symbol.c
2348 +++ b/tools/perf/util/symbol.c
2349 @@ -151,6 +151,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
2350 struct rb_node *nd;
2351 struct symbol *curr, *next;
2352
2353 + if (symbol_conf.allow_aliases)
2354 + return;
2355 +
2356 nd = rb_first(symbols);
2357
2358 while (nd) {
2359 @@ -1275,8 +1278,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
2360 if (kallsyms__delta(map, filename, &delta))
2361 return -1;
2362
2363 - symbols__fixup_duplicate(&dso->symbols[map->type]);
2364 symbols__fixup_end(&dso->symbols[map->type]);
2365 + symbols__fixup_duplicate(&dso->symbols[map->type]);
2366
2367 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
2368 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;