Magellan Linux

Contents of /trunk/kernel-alx/patches-3.12/0115-3.12.16-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2430 - (show annotations) (download)
Tue May 13 11:02:41 2014 UTC (10 years ago) by niro
File size: 72316 byte(s)
-linux-3.12.16
1 diff --git a/Makefile b/Makefile
2 index 517391a3093e..4aab3be88e9b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 12
8 -SUBLEVEL = 15
9 +SUBLEVEL = 16
10 EXTRAVERSION =
11 NAME = One Giant Leap for Frogkind
12
13 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
14 index 5689c18c85f5..ceb4807ee8b2 100644
15 --- a/arch/arm/include/asm/pgtable-3level.h
16 +++ b/arch/arm/include/asm/pgtable-3level.h
17 @@ -120,11 +120,14 @@
18 /*
19 * 2nd stage PTE definitions for LPAE.
20 */
21 -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
22 -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
23 -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
24 -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
25 -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
26 +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
27 +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
28 +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
29 +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
30 +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
31 +
32 +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
33 +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
34
35 /*
36 * Hyp-mode PL2 PTE definitions for LPAE.
37 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
38 index 0e1e2b3afa45..2a767d262c17 100644
39 --- a/arch/arm/kernel/setup.c
40 +++ b/arch/arm/kernel/setup.c
41 @@ -622,6 +622,7 @@ void __init dump_machine_table(void)
42 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
43 {
44 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
45 + u64 aligned_start;
46
47 if (meminfo.nr_banks >= NR_BANKS) {
48 printk(KERN_CRIT "NR_BANKS too low, "
49 @@ -634,10 +635,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
50 * Size is appropriately rounded down, start is rounded up.
51 */
52 size -= start & ~PAGE_MASK;
53 - bank->start = PAGE_ALIGN(start);
54 + aligned_start = PAGE_ALIGN(start);
55
56 -#ifndef CONFIG_ARM_LPAE
57 - if (bank->start + size < bank->start) {
58 +#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
59 + if (aligned_start > ULONG_MAX) {
60 + printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
61 + "32-bit physical address space\n", (long long)start);
62 + return -EINVAL;
63 + }
64 +
65 + if (aligned_start + size > ULONG_MAX) {
66 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
67 "32-bit physical address space\n", (long long)start);
68 /*
69 @@ -645,10 +652,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
70 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
71 * This means we lose a page after masking.
72 */
73 - size = ULONG_MAX - bank->start;
74 + size = ULONG_MAX - aligned_start;
75 }
76 #endif
77
78 + if (aligned_start < PHYS_OFFSET) {
79 + if (aligned_start + size <= PHYS_OFFSET) {
80 + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
81 + aligned_start, aligned_start + size);
82 + return -EINVAL;
83 + }
84 +
85 + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
86 + aligned_start, (u64)PHYS_OFFSET);
87 +
88 + size -= PHYS_OFFSET - aligned_start;
89 + aligned_start = PHYS_OFFSET;
90 + }
91 +
92 + bank->start = aligned_start;
93 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
94
95 /*
96 diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
97 index 94a119a35af8..3c405f43ca39 100644
98 --- a/arch/arm/mach-tegra/common.c
99 +++ b/arch/arm/mach-tegra/common.c
100 @@ -22,6 +22,7 @@
101 #include <linux/io.h>
102 #include <linux/clk.h>
103 #include <linux/delay.h>
104 +#include <linux/of.h>
105 #include <linux/reboot.h>
106 #include <linux/irqchip.h>
107 #include <linux/clk-provider.h>
108 @@ -82,10 +83,20 @@ void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd)
109 static void __init tegra_init_cache(void)
110 {
111 #ifdef CONFIG_CACHE_L2X0
112 + static const struct of_device_id pl310_ids[] __initconst = {
113 + { .compatible = "arm,pl310-cache", },
114 + {}
115 + };
116 +
117 + struct device_node *np;
118 int ret;
119 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
120 u32 aux_ctrl, cache_type;
121
122 + np = of_find_matching_node(NULL, pl310_ids);
123 + if (!np)
124 + return;
125 +
126 cache_type = readl(p + L2X0_CACHE_TYPE);
127 aux_ctrl = (cache_type & 0x700) << (17-8);
128 aux_ctrl |= 0x7C400001;
129 diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
130 index d5a4e9ad8f0f..33eab618b3f1 100644
131 --- a/arch/arm/mm/mm.h
132 +++ b/arch/arm/mm/mm.h
133 @@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
134
135 struct mem_type {
136 pteval_t prot_pte;
137 + pteval_t prot_pte_s2;
138 pmdval_t prot_l1;
139 pmdval_t prot_sect;
140 unsigned int domain;
141 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
142 index b1d17eeb59b8..0222ba7603af 100644
143 --- a/arch/arm/mm/mmu.c
144 +++ b/arch/arm/mm/mmu.c
145 @@ -229,12 +229,16 @@ __setup("noalign", noalign_setup);
146 #endif /* ifdef CONFIG_CPU_CP15 / else */
147
148 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
149 +#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
150 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
151
152 static struct mem_type mem_types[] = {
153 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
154 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
155 L_PTE_SHARED,
156 + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
157 + s2_policy(L_PTE_S2_MT_DEV_SHARED) |
158 + L_PTE_SHARED,
159 .prot_l1 = PMD_TYPE_TABLE,
160 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
161 .domain = DOMAIN_IO,
162 @@ -456,7 +460,8 @@ static void __init build_mem_type_table(void)
163 cp = &cache_policies[cachepolicy];
164 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
165 s2_pgprot = cp->pte_s2;
166 - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
167 + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
168 + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
169
170 /*
171 * ARMv6 and above have extended page tables.
172 diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
173 index 84fcc5018284..519c4b2c0687 100644
174 --- a/arch/arm64/boot/dts/foundation-v8.dts
175 +++ b/arch/arm64/boot/dts/foundation-v8.dts
176 @@ -6,6 +6,8 @@
177
178 /dts-v1/;
179
180 +/memreserve/ 0x80000000 0x00010000;
181 +
182 / {
183 model = "Foundation-v8A";
184 compatible = "arm,foundation-aarch64", "arm,vexpress";
185 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
186 index 965c28ff7b3b..82d95a7e9466 100644
187 --- a/arch/arm64/include/asm/pgtable.h
188 +++ b/arch/arm64/include/asm/pgtable.h
189 @@ -255,7 +255,7 @@ static inline int has_transparent_hugepage(void)
190 #define pgprot_noncached(prot) \
191 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
192 #define pgprot_writecombine(prot) \
193 - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
194 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
195 #define pgprot_dmacoherent(prot) \
196 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
197 #define __HAVE_PHYS_MEM_ACCESS_PROT
198 diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
199 index 89c047f9a971..70ba9d4ee978 100644
200 --- a/arch/arm64/include/asm/syscall.h
201 +++ b/arch/arm64/include/asm/syscall.h
202 @@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
203 unsigned int i, unsigned int n,
204 unsigned long *args)
205 {
206 + if (n == 0)
207 + return;
208 +
209 if (i + n > SYSCALL_MAX_ARGS) {
210 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
211 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
212 @@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
213 unsigned int i, unsigned int n,
214 const unsigned long *args)
215 {
216 + if (n == 0)
217 + return;
218 +
219 if (i + n > SYSCALL_MAX_ARGS) {
220 pr_warning("%s called with max args %d, handling only %d\n",
221 __func__, i + n, SYSCALL_MAX_ARGS);
222 diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
223 index d3e5e9bc8f94..e37db7f2a5fa 100644
224 --- a/arch/powerpc/include/asm/eeh.h
225 +++ b/arch/powerpc/include/asm/eeh.h
226 @@ -117,6 +117,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
227 return edev ? edev->pdev : NULL;
228 }
229
230 +/* Return values from eeh_ops::next_error */
231 +enum {
232 + EEH_NEXT_ERR_NONE = 0,
233 + EEH_NEXT_ERR_INF,
234 + EEH_NEXT_ERR_FROZEN_PE,
235 + EEH_NEXT_ERR_FENCED_PHB,
236 + EEH_NEXT_ERR_DEAD_PHB,
237 + EEH_NEXT_ERR_DEAD_IOC
238 +};
239 +
240 /*
241 * The struct is used to trace the registered EEH operation
242 * callback functions. Actually, those operation callback
243 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
244 index 36bed5a12750..d3a132c9127c 100644
245 --- a/arch/powerpc/kernel/eeh_driver.c
246 +++ b/arch/powerpc/kernel/eeh_driver.c
247 @@ -626,84 +626,90 @@ static void eeh_handle_special_event(void)
248 {
249 struct eeh_pe *pe, *phb_pe;
250 struct pci_bus *bus;
251 - struct pci_controller *hose, *tmp;
252 + struct pci_controller *hose;
253 unsigned long flags;
254 - int rc = 0;
255 + int rc;
256
257 - /*
258 - * The return value from next_error() has been classified as follows.
259 - * It might be good to enumerate them. However, next_error() is only
260 - * supported by PowerNV platform for now. So it would be fine to use
261 - * integer directly:
262 - *
263 - * 4 - Dead IOC 3 - Dead PHB
264 - * 2 - Fenced PHB 1 - Frozen PE
265 - * 0 - No error found
266 - *
267 - */
268 - rc = eeh_ops->next_error(&pe);
269 - if (rc <= 0)
270 - return;
271
272 - switch (rc) {
273 - case 4:
274 - /* Mark all PHBs in dead state */
275 - eeh_serialize_lock(&flags);
276 - list_for_each_entry_safe(hose, tmp,
277 - &hose_list, list_node) {
278 - phb_pe = eeh_phb_pe_get(hose);
279 - if (!phb_pe) continue;
280 -
281 - eeh_pe_state_mark(phb_pe,
282 - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
283 + do {
284 + rc = eeh_ops->next_error(&pe);
285 +
286 + switch (rc) {
287 + case EEH_NEXT_ERR_DEAD_IOC:
288 + /* Mark all PHBs in dead state */
289 + eeh_serialize_lock(&flags);
290 +
291 + /* Purge all events */
292 + eeh_remove_event(NULL);
293 +
294 + list_for_each_entry(hose, &hose_list, list_node) {
295 + phb_pe = eeh_phb_pe_get(hose);
296 + if (!phb_pe) continue;
297 +
298 + eeh_pe_state_mark(phb_pe,
299 + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
300 + }
301 +
302 + eeh_serialize_unlock(flags);
303 +
304 + break;
305 + case EEH_NEXT_ERR_FROZEN_PE:
306 + case EEH_NEXT_ERR_FENCED_PHB:
307 + case EEH_NEXT_ERR_DEAD_PHB:
308 + /* Mark the PE in fenced state */
309 + eeh_serialize_lock(&flags);
310 +
311 + /* Purge all events of the PHB */
312 + eeh_remove_event(pe);
313 +
314 + if (rc == EEH_NEXT_ERR_DEAD_PHB)
315 + eeh_pe_state_mark(pe,
316 + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
317 + else
318 + eeh_pe_state_mark(pe,
319 + EEH_PE_ISOLATED | EEH_PE_RECOVERING);
320 +
321 + eeh_serialize_unlock(flags);
322 +
323 + break;
324 + case EEH_NEXT_ERR_NONE:
325 + return;
326 + default:
327 + pr_warn("%s: Invalid value %d from next_error()\n",
328 + __func__, rc);
329 + return;
330 }
331 - eeh_serialize_unlock(flags);
332 -
333 - /* Purge all events */
334 - eeh_remove_event(NULL);
335 - break;
336 - case 3:
337 - case 2:
338 - case 1:
339 - /* Mark the PE in fenced state */
340 - eeh_serialize_lock(&flags);
341 - if (rc == 3)
342 - eeh_pe_state_mark(pe,
343 - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
344 - else
345 - eeh_pe_state_mark(pe,
346 - EEH_PE_ISOLATED | EEH_PE_RECOVERING);
347 - eeh_serialize_unlock(flags);
348 -
349 - /* Purge all events of the PHB */
350 - eeh_remove_event(pe);
351 - break;
352 - default:
353 - pr_err("%s: Invalid value %d from next_error()\n",
354 - __func__, rc);
355 - return;
356 - }
357
358 - /*
359 - * For fenced PHB and frozen PE, it's handled as normal
360 - * event. We have to remove the affected PHBs for dead
361 - * PHB and IOC
362 - */
363 - if (rc == 2 || rc == 1)
364 - eeh_handle_normal_event(pe);
365 - else {
366 - list_for_each_entry_safe(hose, tmp,
367 - &hose_list, list_node) {
368 - phb_pe = eeh_phb_pe_get(hose);
369 - if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
370 - continue;
371 -
372 - bus = eeh_pe_bus_get(phb_pe);
373 - /* Notify all devices that they're about to go down. */
374 - eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
375 - pcibios_remove_pci_devices(bus);
376 + /*
377 + * For fenced PHB and frozen PE, it's handled as normal
378 + * event. We have to remove the affected PHBs for dead
379 + * PHB and IOC
380 + */
381 + if (rc == EEH_NEXT_ERR_FROZEN_PE ||
382 + rc == EEH_NEXT_ERR_FENCED_PHB) {
383 + eeh_handle_normal_event(pe);
384 + } else {
385 + list_for_each_entry(hose, &hose_list, list_node) {
386 + phb_pe = eeh_phb_pe_get(hose);
387 + if (!phb_pe ||
388 + !(phb_pe->state & EEH_PE_PHB_DEAD))
389 + continue;
390 +
391 + /* Notify all devices to be down */
392 + bus = eeh_pe_bus_get(phb_pe);
393 + eeh_pe_dev_traverse(pe,
394 + eeh_report_failure, NULL);
395 + pcibios_remove_pci_devices(bus);
396 + }
397 }
398 - }
399 +
400 + /*
401 + * If we have detected dead IOC, we needn't proceed
402 + * any more since all PHBs would have been removed
403 + */
404 + if (rc == EEH_NEXT_ERR_DEAD_IOC)
405 + break;
406 + } while (rc != EEH_NEXT_ERR_NONE);
407 }
408
409 /**
410 diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
411 index b7eb5d4f4c89..227c7fe4067f 100644
412 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c
413 +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
414 @@ -766,12 +766,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
415 */
416 static int ioda_eeh_next_error(struct eeh_pe **pe)
417 {
418 - struct pci_controller *hose, *tmp;
419 + struct pci_controller *hose;
420 struct pnv_phb *phb;
421 u64 frozen_pe_no;
422 u16 err_type, severity;
423 long rc;
424 - int ret = 1;
425 + int ret = EEH_NEXT_ERR_NONE;
426
427 /*
428 * While running here, it's safe to purge the event queue.
429 @@ -781,7 +781,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
430 eeh_remove_event(NULL);
431 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
432
433 - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
434 + list_for_each_entry(hose, &hose_list, list_node) {
435 /*
436 * If the subordinate PCI buses of the PHB has been
437 * removed, we needn't take care of it any more.
438 @@ -820,19 +820,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
439 switch (err_type) {
440 case OPAL_EEH_IOC_ERROR:
441 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
442 - list_for_each_entry_safe(hose, tmp,
443 - &hose_list, list_node) {
444 + list_for_each_entry(hose, &hose_list,
445 + list_node) {
446 phb = hose->private_data;
447 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
448 }
449
450 pr_err("EEH: dead IOC detected\n");
451 - ret = 4;
452 - goto out;
453 + ret = EEH_NEXT_ERR_DEAD_IOC;
454 } else if (severity == OPAL_EEH_SEV_INF) {
455 pr_info("EEH: IOC informative error "
456 "detected\n");
457 ioda_eeh_hub_diag(hose);
458 + ret = EEH_NEXT_ERR_NONE;
459 }
460
461 break;
462 @@ -844,21 +844,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
463 pr_err("EEH: dead PHB#%x detected\n",
464 hose->global_number);
465 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
466 - ret = 3;
467 - goto out;
468 + ret = EEH_NEXT_ERR_DEAD_PHB;
469 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
470 if (ioda_eeh_get_phb_pe(hose, pe))
471 break;
472
473 pr_err("EEH: fenced PHB#%x detected\n",
474 hose->global_number);
475 - ret = 2;
476 - goto out;
477 + ret = EEH_NEXT_ERR_FENCED_PHB;
478 } else if (severity == OPAL_EEH_SEV_INF) {
479 pr_info("EEH: PHB#%x informative error "
480 "detected\n",
481 hose->global_number);
482 ioda_eeh_phb_diag(hose);
483 + ret = EEH_NEXT_ERR_NONE;
484 }
485
486 break;
487 @@ -868,13 +867,23 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
488
489 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
490 (*pe)->addr, (*pe)->phb->global_number);
491 - ret = 1;
492 - goto out;
493 + ret = EEH_NEXT_ERR_FROZEN_PE;
494 + break;
495 + default:
496 + pr_warn("%s: Unexpected error type %d\n",
497 + __func__, err_type);
498 }
499 +
500 + /*
501 + * If we have no errors on the specific PHB or only
502 + * informative error there, we continue poking it.
503 + * Otherwise, we need actions to be taken by upper
504 + * layer.
505 + */
506 + if (ret > EEH_NEXT_ERR_INF)
507 + break;
508 }
509
510 - ret = 0;
511 -out:
512 return ret;
513 }
514
515 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
516 index dce0df8150df..74dd12952ea8 100644
517 --- a/arch/x86/kvm/mmu.c
518 +++ b/arch/x86/kvm/mmu.c
519 @@ -2664,6 +2664,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
520 int emulate = 0;
521 gfn_t pseudo_gfn;
522
523 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
524 + return 0;
525 +
526 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
527 if (iterator.level == level) {
528 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
529 @@ -2834,6 +2837,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
530 bool ret = false;
531 u64 spte = 0ull;
532
533 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
534 + return false;
535 +
536 if (!page_fault_can_be_fast(error_code))
537 return false;
538
539 @@ -3229,6 +3235,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
540 struct kvm_shadow_walk_iterator iterator;
541 u64 spte = 0ull;
542
543 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
544 + return spte;
545 +
546 walk_shadow_page_lockless_begin(vcpu);
547 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
548 if (!is_shadow_present_pte(spte))
549 @@ -4557,6 +4566,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
550 u64 spte;
551 int nr_sptes = 0;
552
553 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
554 + return nr_sptes;
555 +
556 walk_shadow_page_lockless_begin(vcpu);
557 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
558 sptes[iterator.level-1] = spte;
559 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
560 index ad75d77999d0..cba218a2f08d 100644
561 --- a/arch/x86/kvm/paging_tmpl.h
562 +++ b/arch/x86/kvm/paging_tmpl.h
563 @@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
564 if (FNAME(gpte_changed)(vcpu, gw, top_level))
565 goto out_gpte_changed;
566
567 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
568 + goto out_gpte_changed;
569 +
570 for (shadow_walk_init(&it, vcpu, addr);
571 shadow_walk_okay(&it) && it.level > gw->level;
572 shadow_walk_next(&it)) {
573 @@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
574 */
575 mmu_topup_memory_caches(vcpu);
576
577 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
578 + WARN_ON(1);
579 + return;
580 + }
581 +
582 spin_lock(&vcpu->kvm->mmu_lock);
583 for_each_shadow_entry(vcpu, gva, iterator) {
584 level = iterator.level;
585 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
586 index 6128914ee873..59181e653826 100644
587 --- a/arch/x86/kvm/vmx.c
588 +++ b/arch/x86/kvm/vmx.c
589 @@ -7294,8 +7294,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
590 struct vcpu_vmx *vmx = to_vmx(vcpu);
591
592 free_vpid(vmx);
593 - free_nested(vmx);
594 free_loaded_vmcs(vmx->loaded_vmcs);
595 + free_nested(vmx);
596 kfree(vmx->guest_msrs);
597 kvm_vcpu_uninit(vcpu);
598 kmem_cache_free(kvm_vcpu_cache, vmx);
599 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
600 index 877b9a1b2152..01495755701b 100644
601 --- a/arch/x86/net/bpf_jit.S
602 +++ b/arch/x86/net/bpf_jit.S
603 @@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
604 push %r9; \
605 push SKBDATA; \
606 /* rsi already has offset */ \
607 - mov $SIZE,%ecx; /* size */ \
608 + mov $SIZE,%edx; /* size */ \
609 call bpf_internal_load_pointer_neg_helper; \
610 test %rax,%rax; \
611 pop SKBDATA; \
612 diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
613 index 587e0202a70b..e5590953630e 100644
614 --- a/drivers/clocksource/vf_pit_timer.c
615 +++ b/drivers/clocksource/vf_pit_timer.c
616 @@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
617
618 static unsigned int pit_read_sched_clock(void)
619 {
620 - return __raw_readl(clksrc_base + PITCVAL);
621 + return ~__raw_readl(clksrc_base + PITCVAL);
622 }
623
624 static int __init pit_clocksource_init(unsigned long rate)
625 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
626 index fe4a7d16e261..c077df094ae5 100644
627 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
628 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
629 @@ -201,6 +201,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
630 struct drm_i915_private *dev_priv = dev->dev_private;
631 int bios_reserved = 0;
632
633 +#ifdef CONFIG_INTEL_IOMMU
634 + if (intel_iommu_gfx_mapped) {
635 + DRM_INFO("DMAR active, disabling use of stolen memory\n");
636 + return 0;
637 + }
638 +#endif
639 +
640 if (dev_priv->gtt.stolen_size == 0)
641 return 0;
642
643 diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
644 index 49f6cc0f9919..351805362290 100644
645 --- a/drivers/hid/hid-lg4ff.c
646 +++ b/drivers/hid/hid-lg4ff.c
647 @@ -574,17 +574,6 @@ int lg4ff_init(struct hid_device *hid)
648 if (error)
649 return error;
650
651 - /* Check if autocentering is available and
652 - * set the centering force to zero by default */
653 - if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
654 - if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
655 - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
656 - else
657 - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
658 -
659 - dev->ff->set_autocenter(dev, 0);
660 - }
661 -
662 /* Get private driver data */
663 drv_data = hid_get_drvdata(hid);
664 if (!drv_data) {
665 @@ -605,6 +594,17 @@ int lg4ff_init(struct hid_device *hid)
666 entry->max_range = lg4ff_devices[i].max_range;
667 entry->set_range = lg4ff_devices[i].set_range;
668
669 + /* Check if autocentering is available and
670 + * set the centering force to zero by default */
671 + if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
672 + if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
673 + dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
674 + else
675 + dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
676 +
677 + dev->ff->set_autocenter(dev, 0);
678 + }
679 +
680 /* Create sysfs interface */
681 error = device_create_file(&hid->dev, &dev_attr_range);
682 if (error)
683 diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
684 index 6a6dd5cd7833..d0a0034d1734 100644
685 --- a/drivers/hid/hidraw.c
686 +++ b/drivers/hid/hidraw.c
687 @@ -313,13 +313,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
688 hid_hw_close(hidraw->hid);
689 wake_up_interruptible(&hidraw->wait);
690 }
691 + device_destroy(hidraw_class,
692 + MKDEV(hidraw_major, hidraw->minor));
693 } else {
694 --hidraw->open;
695 }
696 if (!hidraw->open) {
697 if (!hidraw->exist) {
698 - device_destroy(hidraw_class,
699 - MKDEV(hidraw_major, hidraw->minor));
700 hidraw_table[hidraw->minor] = NULL;
701 kfree(hidraw);
702 } else {
703 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
704 index ba93ef85652d..09c71293ab4b 100644
705 --- a/drivers/infiniband/ulp/isert/ib_isert.c
706 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
707 @@ -1652,7 +1652,6 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
708 static void
709 isert_cq_rx_comp_err(struct isert_conn *isert_conn)
710 {
711 - struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
712 struct iscsi_conn *conn = isert_conn->conn;
713
714 if (isert_conn->post_recv_buf_count)
715 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
716 index 597e9b8fc18d..ef1cf52f8bb9 100644
717 --- a/drivers/input/mouse/elantech.c
718 +++ b/drivers/input/mouse/elantech.c
719 @@ -486,6 +486,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
720 unsigned char *packet = psmouse->packet;
721
722 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
723 + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
724 input_mt_report_pointer_emulation(dev, true);
725 input_sync(dev);
726 }
727 @@ -984,6 +985,44 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
728 }
729
730 /*
731 + * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in
732 + * fw_version for this is based on the following fw_version & caps table:
733 + *
734 + * Laptop-model: fw_version: caps: buttons:
735 + * Acer S3 0x461f00 10, 13, 0e clickpad
736 + * Acer S7-392 0x581f01 50, 17, 0d clickpad
737 + * Acer V5-131 0x461f02 01, 16, 0c clickpad
738 + * Acer V5-551 0x461f00 ? clickpad
739 + * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
740 + * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
741 + * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
742 + * Asus UX31 0x361f00 20, 15, 0e clickpad
743 + * Asus UX32VD 0x361f02 00, 15, 0e clickpad
744 + * Avatar AVIU-145A2 0x361f00 ? clickpad
745 + * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
746 + * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
747 + * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons
748 + * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad
749 + * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad
750 + * Samsung NP900X3E-A02 0x575f03 ? clickpad
751 + * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad
752 + * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons
753 + * Samsung RF710 0x450f00 ? 2 hw buttons
754 + * System76 Pangolin 0x250f01 ? 2 hw buttons
755 + * (*) + 3 trackpoint buttons
756 + */
757 +static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
758 +{
759 + struct input_dev *dev = psmouse->dev;
760 + struct elantech_data *etd = psmouse->private;
761 +
762 + if (etd->fw_version & 0x001000) {
763 + __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
764 + __clear_bit(BTN_RIGHT, dev->keybit);
765 + }
766 +}
767 +
768 +/*
769 * Set the appropriate event bits for the input subsystem
770 */
771 static int elantech_set_input_params(struct psmouse *psmouse)
772 @@ -1026,6 +1065,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
773 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
774 /* fall through */
775 case 3:
776 + if (etd->hw_version == 3)
777 + elantech_set_buttonpad_prop(psmouse);
778 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
779 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
780 if (etd->reports_pressure) {
781 @@ -1047,9 +1088,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
782 */
783 psmouse_warn(psmouse, "couldn't query resolution data.\n");
784 }
785 - /* v4 is clickpad, with only one button. */
786 - __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
787 - __clear_bit(BTN_RIGHT, dev->keybit);
788 + elantech_set_buttonpad_prop(psmouse);
789 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
790 /* For X to recognize me as touchpad. */
791 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
792 diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
793 index e53416a4d7f3..a6debe13d5b9 100644
794 --- a/drivers/input/tablet/wacom_sys.c
795 +++ b/drivers/input/tablet/wacom_sys.c
796 @@ -304,7 +304,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
797 struct usb_device *dev = interface_to_usbdev(intf);
798 char limit = 0;
799 /* result has to be defined as int for some devices */
800 - int result = 0;
801 + int result = 0, touch_max = 0;
802 int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
803 unsigned char *report;
804
805 @@ -351,7 +351,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
806 if (usage == WCM_DESKTOP) {
807 if (finger) {
808 features->device_type = BTN_TOOL_FINGER;
809 -
810 + /* touch device at least supports one touch point */
811 + touch_max = 1;
812 switch (features->type) {
813 case TABLETPC2FG:
814 features->pktlen = WACOM_PKGLEN_TPC2FG;
815 @@ -504,6 +505,8 @@ static int wacom_parse_hid(struct usb_interface *intf,
816 }
817
818 out:
819 + if (!features->touch_max && touch_max)
820 + features->touch_max = touch_max;
821 result = 0;
822 kfree(report);
823 return result;
824 diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
825 index 6386ced910c2..91c694ba42f4 100644
826 --- a/drivers/media/pci/cx18/cx18-driver.c
827 +++ b/drivers/media/pci/cx18/cx18-driver.c
828 @@ -327,13 +327,16 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
829 struct i2c_client *c;
830 u8 eedata[256];
831
832 + memset(tv, 0, sizeof(*tv));
833 +
834 c = kzalloc(sizeof(*c), GFP_KERNEL);
835 + if (!c)
836 + return;
837
838 strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name));
839 c->adapter = &cx->i2c_adap[0];
840 c->addr = 0xa0 >> 1;
841
842 - memset(tv, 0, sizeof(*tv));
843 if (tveeprom_read(c, eedata, sizeof(eedata)))
844 goto ret;
845
846 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
847 index 20e345d9fe8f..a1c641e18362 100644
848 --- a/drivers/media/usb/dvb-usb/cxusb.c
849 +++ b/drivers/media/usb/dvb-usb/cxusb.c
850 @@ -149,6 +149,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
851 int num)
852 {
853 struct dvb_usb_device *d = i2c_get_adapdata(adap);
854 + int ret;
855 int i;
856
857 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
858 @@ -173,7 +174,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
859 if (1 + msg[i].len > sizeof(ibuf)) {
860 warn("i2c rd: len=%d is too big!\n",
861 msg[i].len);
862 - return -EOPNOTSUPP;
863 + ret = -EOPNOTSUPP;
864 + goto unlock;
865 }
866 obuf[0] = 0;
867 obuf[1] = msg[i].len;
868 @@ -193,12 +195,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
869 if (3 + msg[i].len > sizeof(obuf)) {
870 warn("i2c wr: len=%d is too big!\n",
871 msg[i].len);
872 - return -EOPNOTSUPP;
873 + ret = -EOPNOTSUPP;
874 + goto unlock;
875 }
876 if (1 + msg[i + 1].len > sizeof(ibuf)) {
877 warn("i2c rd: len=%d is too big!\n",
878 msg[i + 1].len);
879 - return -EOPNOTSUPP;
880 + ret = -EOPNOTSUPP;
881 + goto unlock;
882 }
883 obuf[0] = msg[i].len;
884 obuf[1] = msg[i+1].len;
885 @@ -223,7 +227,8 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
886 if (2 + msg[i].len > sizeof(obuf)) {
887 warn("i2c wr: len=%d is too big!\n",
888 msg[i].len);
889 - return -EOPNOTSUPP;
890 + ret = -EOPNOTSUPP;
891 + goto unlock;
892 }
893 obuf[0] = msg[i].addr;
894 obuf[1] = msg[i].len;
895 @@ -237,8 +242,14 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
896 }
897 }
898
899 + if (i == num)
900 + ret = num;
901 + else
902 + ret = -EREMOTEIO;
903 +
904 +unlock:
905 mutex_unlock(&d->i2c_mutex);
906 - return i == num ? num : -EREMOTEIO;
907 + return ret;
908 }
909
910 static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
911 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
912 index 71b22f5a05ce..4170a45d17e0 100644
913 --- a/drivers/media/usb/dvb-usb/dw2102.c
914 +++ b/drivers/media/usb/dvb-usb/dw2102.c
915 @@ -301,6 +301,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
916 static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
917 {
918 struct dvb_usb_device *d = i2c_get_adapdata(adap);
919 + int ret;
920
921 if (!d)
922 return -ENODEV;
923 @@ -316,7 +317,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
924 if (2 + msg[1].len > sizeof(ibuf)) {
925 warn("i2c rd: len=%d is too big!\n",
926 msg[1].len);
927 - return -EOPNOTSUPP;
928 + ret = -EOPNOTSUPP;
929 + goto unlock;
930 }
931
932 obuf[0] = msg[0].addr << 1;
933 @@ -340,7 +342,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
934 if (2 + msg[0].len > sizeof(obuf)) {
935 warn("i2c wr: len=%d is too big!\n",
936 msg[1].len);
937 - return -EOPNOTSUPP;
938 + ret = -EOPNOTSUPP;
939 + goto unlock;
940 }
941
942 obuf[0] = msg[0].addr << 1;
943 @@ -357,7 +360,8 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
944 if (2 + msg[0].len > sizeof(obuf)) {
945 warn("i2c wr: len=%d is too big!\n",
946 msg[1].len);
947 - return -EOPNOTSUPP;
948 + ret = -EOPNOTSUPP;
949 + goto unlock;
950 }
951
952 obuf[0] = msg[0].addr << 1;
953 @@ -386,15 +390,17 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
954
955 break;
956 }
957 + ret = num;
958
959 +unlock:
960 mutex_unlock(&d->i2c_mutex);
961 - return num;
962 + return ret;
963 }
964
965 static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
966 {
967 struct dvb_usb_device *d = i2c_get_adapdata(adap);
968 - int len, i, j;
969 + int len, i, j, ret;
970
971 if (!d)
972 return -ENODEV;
973 @@ -430,7 +436,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
974 if (2 + msg[j].len > sizeof(ibuf)) {
975 warn("i2c rd: len=%d is too big!\n",
976 msg[j].len);
977 - return -EOPNOTSUPP;
978 + ret = -EOPNOTSUPP;
979 + goto unlock;
980 }
981
982 dw210x_op_rw(d->udev, 0xc3,
983 @@ -466,7 +473,8 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
984 if (2 + msg[j].len > sizeof(obuf)) {
985 warn("i2c wr: len=%d is too big!\n",
986 msg[j].len);
987 - return -EOPNOTSUPP;
988 + ret = -EOPNOTSUPP;
989 + goto unlock;
990 }
991
992 obuf[0] = msg[j].addr << 1;
993 @@ -481,15 +489,18 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
994 }
995
996 }
997 + ret = num;
998
999 +unlock:
1000 mutex_unlock(&d->i2c_mutex);
1001 - return num;
1002 + return ret;
1003 }
1004
1005 static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1006 int num)
1007 {
1008 struct dvb_usb_device *d = i2c_get_adapdata(adap);
1009 + int ret;
1010 int i;
1011
1012 if (!d)
1013 @@ -506,7 +517,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1014 if (2 + msg[1].len > sizeof(ibuf)) {
1015 warn("i2c rd: len=%d is too big!\n",
1016 msg[1].len);
1017 - return -EOPNOTSUPP;
1018 + ret = -EOPNOTSUPP;
1019 + goto unlock;
1020 }
1021 obuf[0] = msg[0].addr << 1;
1022 obuf[1] = msg[0].len;
1023 @@ -530,7 +542,8 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1024 if (2 + msg[0].len > sizeof(obuf)) {
1025 warn("i2c wr: len=%d is too big!\n",
1026 msg[0].len);
1027 - return -EOPNOTSUPP;
1028 + ret = -EOPNOTSUPP;
1029 + goto unlock;
1030 }
1031 obuf[0] = msg[0].addr << 1;
1032 obuf[1] = msg[0].len;
1033 @@ -556,9 +569,11 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1034 msg[i].flags == 0 ? ">>>" : "<<<");
1035 debug_dump(msg[i].buf, msg[i].len, deb_xfer);
1036 }
1037 + ret = num;
1038
1039 +unlock:
1040 mutex_unlock(&d->i2c_mutex);
1041 - return num;
1042 + return ret;
1043 }
1044
1045 static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1046 @@ -566,7 +581,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1047 {
1048 struct dvb_usb_device *d = i2c_get_adapdata(adap);
1049 struct usb_device *udev;
1050 - int len, i, j;
1051 + int len, i, j, ret;
1052
1053 if (!d)
1054 return -ENODEV;
1055 @@ -618,7 +633,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1056 if (msg[j].len > sizeof(ibuf)) {
1057 warn("i2c rd: len=%d is too big!\n",
1058 msg[j].len);
1059 - return -EOPNOTSUPP;
1060 + ret = -EOPNOTSUPP;
1061 + goto unlock;
1062 }
1063
1064 dw210x_op_rw(d->udev, 0x91, 0, 0,
1065 @@ -652,7 +668,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1066 if (2 + msg[j].len > sizeof(obuf)) {
1067 warn("i2c wr: len=%d is too big!\n",
1068 msg[j].len);
1069 - return -EOPNOTSUPP;
1070 + ret = -EOPNOTSUPP;
1071 + goto unlock;
1072 }
1073
1074 obuf[0] = msg[j + 1].len;
1075 @@ -671,7 +688,8 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1076 if (2 + msg[j].len > sizeof(obuf)) {
1077 warn("i2c wr: len=%d is too big!\n",
1078 msg[j].len);
1079 - return -EOPNOTSUPP;
1080 + ret = -EOPNOTSUPP;
1081 + goto unlock;
1082 }
1083 obuf[0] = msg[j].len + 1;
1084 obuf[1] = (msg[j].addr << 1);
1085 @@ -685,9 +703,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1086 }
1087 }
1088 }
1089 + ret = num;
1090
1091 +unlock:
1092 mutex_unlock(&d->i2c_mutex);
1093 - return num;
1094 + return ret;
1095 }
1096
1097 static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
1098 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
1099 index 5d41aee69d16..6c0fd8e0f9bf 100644
1100 --- a/drivers/net/ethernet/ibm/ibmveth.c
1101 +++ b/drivers/net/ethernet/ibm/ibmveth.c
1102 @@ -523,10 +523,21 @@ retry:
1103 return rc;
1104 }
1105
1106 +static u64 ibmveth_encode_mac_addr(u8 *mac)
1107 +{
1108 + int i;
1109 + u64 encoded = 0;
1110 +
1111 + for (i = 0; i < ETH_ALEN; i++)
1112 + encoded = (encoded << 8) | mac[i];
1113 +
1114 + return encoded;
1115 +}
1116 +
1117 static int ibmveth_open(struct net_device *netdev)
1118 {
1119 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1120 - u64 mac_address = 0;
1121 + u64 mac_address;
1122 int rxq_entries = 1;
1123 unsigned long lpar_rc;
1124 int rc;
1125 @@ -580,8 +591,7 @@ static int ibmveth_open(struct net_device *netdev)
1126 adapter->rx_queue.num_slots = rxq_entries;
1127 adapter->rx_queue.toggle = 1;
1128
1129 - memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
1130 - mac_address = mac_address >> 16;
1131 + mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
1132
1133 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
1134 adapter->rx_queue.queue_len;
1135 @@ -1184,8 +1194,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1136 /* add the addresses to the filter table */
1137 netdev_for_each_mc_addr(ha, netdev) {
1138 /* add the multicast address to the filter table */
1139 - unsigned long mcast_addr = 0;
1140 - memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1141 + u64 mcast_addr;
1142 + mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1143 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1144 IbmVethMcastAddFilter,
1145 mcast_addr);
1146 @@ -1369,9 +1379,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1147
1148 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1149
1150 - adapter->mac_addr = 0;
1151 - memcpy(&adapter->mac_addr, mac_addr_p, 6);
1152 -
1153 netdev->irq = dev->irq;
1154 netdev->netdev_ops = &ibmveth_netdev_ops;
1155 netdev->ethtool_ops = &netdev_ethtool_ops;
1156 @@ -1380,7 +1387,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1157 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1158 netdev->features |= netdev->hw_features;
1159
1160 - memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1161 + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1162
1163 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1164 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1165 diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
1166 index 84066bafe057..2c636cbf1341 100644
1167 --- a/drivers/net/ethernet/ibm/ibmveth.h
1168 +++ b/drivers/net/ethernet/ibm/ibmveth.h
1169 @@ -139,7 +139,6 @@ struct ibmveth_adapter {
1170 struct napi_struct napi;
1171 struct net_device_stats stats;
1172 unsigned int mcastFilterSize;
1173 - unsigned long mac_addr;
1174 void * buffer_list_addr;
1175 void * filter_list_addr;
1176 dma_addr_t buffer_list_dma;
1177 diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
1178 index ada6e210279f..48f0b06f4448 100644
1179 --- a/drivers/net/ethernet/intel/e100.c
1180 +++ b/drivers/net/ethernet/intel/e100.c
1181 @@ -3036,7 +3036,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
1182 *enable_wake = false;
1183 }
1184
1185 - pci_disable_device(pdev);
1186 + pci_clear_master(pdev);
1187 }
1188
1189 static int __e100_power_off(struct pci_dev *pdev, bool wake)
1190 diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
1191 index 34d00f5771fe..b6b601cebb9e 100644
1192 --- a/drivers/net/ethernet/sfc/efx.h
1193 +++ b/drivers/net/ethernet/sfc/efx.h
1194 @@ -67,6 +67,9 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
1195 #define EFX_RXQ_MIN_ENT 128U
1196 #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
1197
1198 +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
1199 + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
1200 +
1201 /* Filters */
1202
1203 /**
1204 diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
1205 index 5b471cf5c323..3b2356bc6fba 100644
1206 --- a/drivers/net/ethernet/sfc/ethtool.c
1207 +++ b/drivers/net/ethernet/sfc/ethtool.c
1208 @@ -582,7 +582,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
1209 struct efx_nic *efx = netdev_priv(net_dev);
1210
1211 ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
1212 - ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
1213 + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
1214 ring->rx_pending = efx->rxq_entries;
1215 ring->tx_pending = efx->txq_entries;
1216 }
1217 @@ -595,7 +595,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
1218
1219 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
1220 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
1221 - ring->tx_pending > EFX_MAX_DMAQ_SIZE)
1222 + ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
1223 return -EINVAL;
1224
1225 if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
1226 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
1227 index 6a32ef9d63ae..2b0aab130205 100644
1228 --- a/drivers/net/ethernet/ti/davinci_emac.c
1229 +++ b/drivers/net/ethernet/ti/davinci_emac.c
1230 @@ -1531,7 +1531,7 @@ static int emac_dev_open(struct net_device *ndev)
1231 struct device *emac_dev = &ndev->dev;
1232 u32 cnt;
1233 struct resource *res;
1234 - int ret;
1235 + int q, m, ret;
1236 int i = 0;
1237 int k = 0;
1238 struct emac_priv *priv = netdev_priv(ndev);
1239 @@ -1566,8 +1566,7 @@ static int emac_dev_open(struct net_device *ndev)
1240
1241 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1242 for (i = res->start; i <= res->end; i++) {
1243 - if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1244 - 0, ndev->name, ndev))
1245 + if (request_irq(i, emac_irq, 0, ndev->name, ndev))
1246 goto rollback;
1247 }
1248 k++;
1249 @@ -1640,7 +1639,15 @@ static int emac_dev_open(struct net_device *ndev)
1250
1251 rollback:
1252
1253 - dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
1254 + dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
1255 +
1256 + for (q = k; k >= 0; k--) {
1257 + for (m = i; m >= res->start; m--)
1258 + free_irq(m, ndev);
1259 + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
1260 + m = res->end;
1261 + }
1262 +
1263 ret = -EBUSY;
1264 err:
1265 pm_runtime_put(&priv->pdev->dev);
1266 @@ -1658,6 +1665,9 @@ err:
1267 */
1268 static int emac_dev_stop(struct net_device *ndev)
1269 {
1270 + struct resource *res;
1271 + int i = 0;
1272 + int irq_num;
1273 struct emac_priv *priv = netdev_priv(ndev);
1274 struct device *emac_dev = &ndev->dev;
1275
1276 @@ -1673,6 +1683,13 @@ static int emac_dev_stop(struct net_device *ndev)
1277 if (priv->phydev)
1278 phy_disconnect(priv->phydev);
1279
1280 + /* Free IRQ */
1281 + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
1282 + for (irq_num = res->start; irq_num <= res->end; irq_num++)
1283 + free_irq(irq_num, priv->ndev);
1284 + i++;
1285 + }
1286 +
1287 if (netif_msg_drv(priv))
1288 dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
1289
1290 diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1291 index 0fad98b85f60..eee2ef6ce248 100644
1292 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1293 +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1294 @@ -596,8 +596,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1295
1296 mutex_lock(&mvm->mutex);
1297
1298 - /* Rssi update while not associated ?! */
1299 - if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
1300 + /*
1301 + * Rssi update while not associated - can happen since the statistics
1302 + * are handled asynchronously
1303 + */
1304 + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1305 goto out_unlock;
1306
1307 /* No open connection - reports should be disabled */
1308 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
1309 index f95de0d16216..1de59b0f8fa8 100644
1310 --- a/drivers/net/wireless/p54/txrx.c
1311 +++ b/drivers/net/wireless/p54/txrx.c
1312 @@ -587,7 +587,7 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
1313 chan = priv->curchan;
1314 if (chan) {
1315 struct survey_info *survey = &priv->survey[chan->hw_value];
1316 - survey->noise = clamp_t(s8, priv->noise, -128, 127);
1317 + survey->noise = clamp(priv->noise, -128, 127);
1318 survey->channel_time = priv->survey_raw.active;
1319 survey->channel_time_tx = priv->survey_raw.tx;
1320 survey->channel_time_busy = priv->survey_raw.tx +
1321 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
1322 index d97fbf4eb65b..ea83084cb7d9 100644
1323 --- a/drivers/regulator/core.c
1324 +++ b/drivers/regulator/core.c
1325 @@ -1806,8 +1806,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1326
1327 trace_regulator_disable_complete(rdev_get_name(rdev));
1328
1329 - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
1330 - NULL);
1331 return 0;
1332 }
1333
1334 @@ -1831,6 +1829,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
1335 rdev_err(rdev, "failed to disable\n");
1336 return ret;
1337 }
1338 + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
1339 + NULL);
1340 }
1341
1342 rdev->use_count = 0;
1343 @@ -1883,20 +1883,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
1344 {
1345 int ret = 0;
1346
1347 - /* force disable */
1348 - if (rdev->desc->ops->disable) {
1349 - /* ah well, who wants to live forever... */
1350 - ret = rdev->desc->ops->disable(rdev);
1351 - if (ret < 0) {
1352 - rdev_err(rdev, "failed to force disable\n");
1353 - return ret;
1354 - }
1355 - /* notify other consumers that power has been forced off */
1356 - _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
1357 - REGULATOR_EVENT_DISABLE, NULL);
1358 + ret = _regulator_do_disable(rdev);
1359 + if (ret < 0) {
1360 + rdev_err(rdev, "failed to force disable\n");
1361 + return ret;
1362 }
1363
1364 - return ret;
1365 + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
1366 + REGULATOR_EVENT_DISABLE, NULL);
1367 +
1368 + return 0;
1369 }
1370
1371 /**
1372 @@ -3569,8 +3565,6 @@ int regulator_suspend_finish(void)
1373
1374 mutex_lock(&regulator_list_mutex);
1375 list_for_each_entry(rdev, &regulator_list, list) {
1376 - struct regulator_ops *ops = rdev->desc->ops;
1377 -
1378 mutex_lock(&rdev->mutex);
1379 if (rdev->use_count > 0 || rdev->constraints->always_on) {
1380 error = _regulator_do_enable(rdev);
1381 @@ -3579,12 +3573,10 @@ int regulator_suspend_finish(void)
1382 } else {
1383 if (!has_full_constraints)
1384 goto unlock;
1385 - if (!ops->disable)
1386 - goto unlock;
1387 if (!_regulator_is_enabled(rdev))
1388 goto unlock;
1389
1390 - error = ops->disable(rdev);
1391 + error = _regulator_do_disable(rdev);
1392 if (error)
1393 ret = error;
1394 }
1395 @@ -3774,7 +3766,7 @@ static int __init regulator_init_complete(void)
1396 ops = rdev->desc->ops;
1397 c = rdev->constraints;
1398
1399 - if (!ops->disable || (c && c->always_on))
1400 + if (c && c->always_on)
1401 continue;
1402
1403 mutex_lock(&rdev->mutex);
1404 @@ -3795,7 +3787,7 @@ static int __init regulator_init_complete(void)
1405 /* We log since this may kill the system if it
1406 * goes wrong. */
1407 rdev_info(rdev, "disabling\n");
1408 - ret = ops->disable(rdev);
1409 + ret = _regulator_do_disable(rdev);
1410 if (ret != 0) {
1411 rdev_err(rdev, "couldn't disable: %d\n", ret);
1412 }
1413 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1414 index 451bf99582ff..846d5c6609d8 100644
1415 --- a/drivers/s390/block/dasd.c
1416 +++ b/drivers/s390/block/dasd.c
1417 @@ -2978,12 +2978,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
1418
1419 elevator_exit(block->request_queue->elevator);
1420 block->request_queue->elevator = NULL;
1421 + mutex_lock(&block->request_queue->sysfs_lock);
1422 rc = elevator_init(block->request_queue, "deadline");
1423 - if (rc) {
1424 + if (rc)
1425 blk_cleanup_queue(block->request_queue);
1426 - return rc;
1427 - }
1428 - return 0;
1429 + mutex_unlock(&block->request_queue->sysfs_lock);
1430 + return rc;
1431 }
1432
1433 /*
1434 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1435 index 73f5208714a4..1af67a214d33 100644
1436 --- a/drivers/usb/host/xhci-pci.c
1437 +++ b/drivers/usb/host/xhci-pci.c
1438 @@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1439 "QUIRK: Resetting on resume");
1440 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1441 }
1442 + if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1443 + pdev->device == 0x0015 &&
1444 + pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
1445 + pdev->subsystem_device == 0xc0cd)
1446 + xhci->quirks |= XHCI_RESET_ON_RESUME;
1447 if (pdev->vendor == PCI_VENDOR_ID_VIA)
1448 xhci->quirks |= XHCI_RESET_ON_RESUME;
1449 }
1450 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1451 index a53651743d4d..5a5fb98edb8a 100644
1452 --- a/fs/nfs/nfs4proc.c
1453 +++ b/fs/nfs/nfs4proc.c
1454 @@ -5779,21 +5779,20 @@ struct nfs_release_lockowner_data {
1455 struct nfs4_lock_state *lsp;
1456 struct nfs_server *server;
1457 struct nfs_release_lockowner_args args;
1458 - struct nfs4_sequence_args seq_args;
1459 - struct nfs4_sequence_res seq_res;
1460 + struct nfs_release_lockowner_res res;
1461 };
1462
1463 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
1464 {
1465 struct nfs_release_lockowner_data *data = calldata;
1466 nfs40_setup_sequence(data->server,
1467 - &data->seq_args, &data->seq_res, task);
1468 + &data->args.seq_args, &data->res.seq_res, task);
1469 }
1470
1471 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
1472 {
1473 struct nfs_release_lockowner_data *data = calldata;
1474 - nfs40_sequence_done(task, &data->seq_res);
1475 + nfs40_sequence_done(task, &data->res.seq_res);
1476 }
1477
1478 static void nfs4_release_lockowner_release(void *calldata)
1479 @@ -5822,7 +5821,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
1480 data = kmalloc(sizeof(*data), GFP_NOFS);
1481 if (!data)
1482 return -ENOMEM;
1483 - nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
1484 data->lsp = lsp;
1485 data->server = server;
1486 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
1487 @@ -5830,6 +5828,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
1488 data->args.lock_owner.s_dev = server->s_dev;
1489
1490 msg.rpc_argp = &data->args;
1491 + msg.rpc_resp = &data->res;
1492 + nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
1493 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
1494 return 0;
1495 }
1496 diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
1497 index 106a83570630..9fa215473b1d 100644
1498 --- a/fs/proc/proc_devtree.c
1499 +++ b/fs/proc/proc_devtree.c
1500 @@ -235,6 +235,7 @@ void __init proc_device_tree_init(void)
1501 return;
1502 root = of_find_node_by_path("/");
1503 if (root == NULL) {
1504 + remove_proc_entry("device-tree", NULL);
1505 pr_debug("/proc/device-tree: can't find root\n");
1506 return;
1507 }
1508 diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
1509 index 7c1420bb1dce..6ade97de7a85 100644
1510 --- a/include/linux/ceph/messenger.h
1511 +++ b/include/linux/ceph/messenger.h
1512 @@ -157,7 +157,7 @@ struct ceph_msg {
1513 bool front_is_vmalloc;
1514 bool more_to_follow;
1515 bool needs_out_seq;
1516 - int front_max;
1517 + int front_alloc_len;
1518 unsigned long ack_stamp; /* tx: when we were acked */
1519
1520 struct ceph_msgpool *pool;
1521 diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
1522 index 8f47625a0661..4fb6a8938957 100644
1523 --- a/include/linux/ceph/osd_client.h
1524 +++ b/include/linux/ceph/osd_client.h
1525 @@ -138,6 +138,7 @@ struct ceph_osd_request {
1526 __le64 *r_request_pool;
1527 void *r_request_pgid;
1528 __le32 *r_request_attempts;
1529 + bool r_paused;
1530 struct ceph_eversion *r_request_reassert_version;
1531
1532 int r_result;
1533 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
1534 index 3561d305b1e0..7b3a119c51c2 100644
1535 --- a/include/linux/cgroup.h
1536 +++ b/include/linux/cgroup.h
1537 @@ -169,6 +169,8 @@ struct cgroup {
1538 *
1539 * The ID of the root cgroup is always 0, and a new cgroup
1540 * will be assigned with a smallest available ID.
1541 + *
1542 + * Allocating/Removing ID must be protected by cgroup_mutex.
1543 */
1544 int id;
1545
1546 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
1547 index 5eaa746735ff..20aebdbab9a4 100644
1548 --- a/include/linux/ftrace_event.h
1549 +++ b/include/linux/ftrace_event.h
1550 @@ -325,10 +325,6 @@ enum {
1551 FILTER_TRACE_FN,
1552 };
1553
1554 -#define EVENT_STORAGE_SIZE 128
1555 -extern struct mutex event_storage_mutex;
1556 -extern char event_storage[EVENT_STORAGE_SIZE];
1557 -
1558 extern int trace_event_raw_init(struct ftrace_event_call *call);
1559 extern int trace_define_field(struct ftrace_event_call *call, const char *type,
1560 const char *name, int offset, int size,
1561 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
1562 index ea9e076a91bf..2b307018979d 100644
1563 --- a/include/linux/nfs_xdr.h
1564 +++ b/include/linux/nfs_xdr.h
1565 @@ -467,9 +467,14 @@ struct nfs_lockt_res {
1566 };
1567
1568 struct nfs_release_lockowner_args {
1569 + struct nfs4_sequence_args seq_args;
1570 struct nfs_lowner lock_owner;
1571 };
1572
1573 +struct nfs_release_lockowner_res {
1574 + struct nfs4_sequence_res seq_res;
1575 +};
1576 +
1577 struct nfs4_delegreturnargs {
1578 struct nfs4_sequence_args seq_args;
1579 const struct nfs_fh *fhandle;
1580 diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
1581 index 712ea36067ab..645d749d3c9c 100644
1582 --- a/include/trace/ftrace.h
1583 +++ b/include/trace/ftrace.h
1584 @@ -303,15 +303,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
1585 #undef __array
1586 #define __array(type, item, len) \
1587 do { \
1588 - mutex_lock(&event_storage_mutex); \
1589 + char *type_str = #type"["__stringify(len)"]"; \
1590 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
1591 - snprintf(event_storage, sizeof(event_storage), \
1592 - "%s[%d]", #type, len); \
1593 - ret = trace_define_field(event_call, event_storage, #item, \
1594 + ret = trace_define_field(event_call, type_str, #item, \
1595 offsetof(typeof(field), item), \
1596 sizeof(field.item), \
1597 is_signed_type(type), FILTER_OTHER); \
1598 - mutex_unlock(&event_storage_mutex); \
1599 if (ret) \
1600 return ret; \
1601 } while (0);
1602 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1603 index c4f8bc79d075..1c204fdb85d8 100644
1604 --- a/kernel/cgroup.c
1605 +++ b/kernel/cgroup.c
1606 @@ -4410,16 +4410,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
1607 rcu_assign_pointer(cgrp->name, name);
1608
1609 /*
1610 - * Temporarily set the pointer to NULL, so idr_find() won't return
1611 - * a half-baked cgroup.
1612 - */
1613 - cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
1614 - if (cgrp->id < 0) {
1615 - err = -ENOMEM;
1616 - goto err_free_name;
1617 - }
1618 -
1619 - /*
1620 * Only live parents can have children. Note that the liveliness
1621 * check isn't strictly necessary because cgroup_mkdir() and
1622 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
1623 @@ -4428,7 +4418,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
1624 */
1625 if (!cgroup_lock_live_group(parent)) {
1626 err = -ENODEV;
1627 - goto err_free_id;
1628 + goto err_free_name;
1629 }
1630
1631 /* Grab a reference on the superblock so the hierarchy doesn't
1632 @@ -4438,6 +4428,16 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
1633 * fs */
1634 atomic_inc(&sb->s_active);
1635
1636 + /*
1637 + * Temporarily set the pointer to NULL, so idr_find() won't return
1638 + * a half-baked cgroup.
1639 + */
1640 + cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
1641 + if (cgrp->id < 0) {
1642 + err = -ENOMEM;
1643 + goto err_unlock;
1644 + }
1645 +
1646 init_cgroup_housekeeping(cgrp);
1647
1648 dentry->d_fsdata = cgrp;
1649 @@ -4544,11 +4544,11 @@ err_free_all:
1650 ss->css_free(css);
1651 }
1652 }
1653 + idr_remove(&root->cgroup_idr, cgrp->id);
1654 +err_unlock:
1655 mutex_unlock(&cgroup_mutex);
1656 /* Release the reference count that we took on the superblock */
1657 deactivate_super(sb);
1658 -err_free_id:
1659 - idr_remove(&root->cgroup_idr, cgrp->id);
1660 err_free_name:
1661 kfree(rcu_dereference_raw(cgrp->name));
1662 err_free_cgrp:
1663 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
1664 index b4e8500afdb3..c59896c65ac3 100644
1665 --- a/kernel/printk/printk.c
1666 +++ b/kernel/printk/printk.c
1667 @@ -1080,7 +1080,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1668 next_seq = log_next_seq;
1669
1670 len = 0;
1671 - prev = 0;
1672 while (len >= 0 && seq < next_seq) {
1673 struct printk_log *msg = log_from_idx(idx);
1674 int textlen;
1675 @@ -2790,7 +2789,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
1676 next_idx = idx;
1677
1678 l = 0;
1679 - prev = 0;
1680 while (seq < dumper->next_seq) {
1681 struct printk_log *msg = log_from_idx(idx);
1682
1683 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1684 index b03b1f897b5e..bc1bd20f7942 100644
1685 --- a/kernel/trace/trace_events.c
1686 +++ b/kernel/trace/trace_events.c
1687 @@ -27,12 +27,6 @@
1688
1689 DEFINE_MUTEX(event_mutex);
1690
1691 -DEFINE_MUTEX(event_storage_mutex);
1692 -EXPORT_SYMBOL_GPL(event_storage_mutex);
1693 -
1694 -char event_storage[EVENT_STORAGE_SIZE];
1695 -EXPORT_SYMBOL_GPL(event_storage);
1696 -
1697 LIST_HEAD(ftrace_events);
1698 static LIST_HEAD(ftrace_common_fields);
1699
1700 diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
1701 index d21a74670088..d7d0b50b1b70 100644
1702 --- a/kernel/trace/trace_export.c
1703 +++ b/kernel/trace/trace_export.c
1704 @@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
1705 #undef __array
1706 #define __array(type, item, len) \
1707 do { \
1708 + char *type_str = #type"["__stringify(len)"]"; \
1709 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
1710 - mutex_lock(&event_storage_mutex); \
1711 - snprintf(event_storage, sizeof(event_storage), \
1712 - "%s[%d]", #type, len); \
1713 - ret = trace_define_field(event_call, event_storage, #item, \
1714 + ret = trace_define_field(event_call, type_str, #item, \
1715 offsetof(typeof(field), item), \
1716 sizeof(field.item), \
1717 is_signed_type(type), filter_type); \
1718 - mutex_unlock(&event_storage_mutex); \
1719 if (ret) \
1720 return ret; \
1721 } while (0);
1722 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
1723 index 4a5df7b1cc9f..464303f61730 100644
1724 --- a/net/ceph/messenger.c
1725 +++ b/net/ceph/messenger.c
1726 @@ -3126,7 +3126,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
1727 INIT_LIST_HEAD(&m->data);
1728
1729 /* front */
1730 - m->front_max = front_len;
1731 if (front_len) {
1732 if (front_len > PAGE_CACHE_SIZE) {
1733 m->front.iov_base = __vmalloc(front_len, flags,
1734 @@ -3143,7 +3142,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
1735 } else {
1736 m->front.iov_base = NULL;
1737 }
1738 - m->front.iov_len = front_len;
1739 + m->front_alloc_len = m->front.iov_len = front_len;
1740
1741 dout("ceph_msg_new %p front %d\n", m, front_len);
1742 return m;
1743 @@ -3301,8 +3300,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
1744
1745 void ceph_msg_dump(struct ceph_msg *msg)
1746 {
1747 - pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
1748 - msg->front_max, msg->data_length);
1749 + pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
1750 + msg->front_alloc_len, msg->data_length);
1751 print_hex_dump(KERN_DEBUG, "header: ",
1752 DUMP_PREFIX_OFFSET, 16, 1,
1753 &msg->hdr, sizeof(msg->hdr), true);
1754 diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
1755 index 1fe25cd29d0e..2ac9ef35110b 100644
1756 --- a/net/ceph/mon_client.c
1757 +++ b/net/ceph/mon_client.c
1758 @@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
1759 /* initiatiate authentication handshake */
1760 ret = ceph_auth_build_hello(monc->auth,
1761 monc->m_auth->front.iov_base,
1762 - monc->m_auth->front_max);
1763 + monc->m_auth->front_alloc_len);
1764 __send_prepared_auth_request(monc, ret);
1765 } else {
1766 dout("open_session mon%d already open\n", monc->cur_mon);
1767 @@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
1768 int num;
1769
1770 p = msg->front.iov_base;
1771 - end = p + msg->front_max;
1772 + end = p + msg->front_alloc_len;
1773
1774 num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
1775 ceph_encode_32(&p, num);
1776 @@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
1777 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
1778 msg->front.iov_len,
1779 monc->m_auth->front.iov_base,
1780 - monc->m_auth->front_max);
1781 + monc->m_auth->front_alloc_len);
1782 if (ret < 0) {
1783 monc->client->auth_err = ret;
1784 wake_up_all(&monc->client->auth_wq);
1785 @@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
1786 return 0;
1787
1788 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
1789 - monc->m_auth->front_max);
1790 + monc->m_auth->front_alloc_len);
1791 if (ret <= 0)
1792 return ret; /* either an error, or no need to authenticate */
1793 __send_prepared_auth_request(monc, ret);
1794 diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
1795 index 2b4b32aaa893..e6b2db68b4fa 100644
1796 --- a/net/ceph/osd_client.c
1797 +++ b/net/ceph/osd_client.c
1798 @@ -1232,6 +1232,22 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1799 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1800
1801 /*
1802 + * Returns whether a request should be blocked from being sent
1803 + * based on the current osdmap and osd_client settings.
1804 + *
1805 + * Caller should hold map_sem for read.
1806 + */
1807 +static bool __req_should_be_paused(struct ceph_osd_client *osdc,
1808 + struct ceph_osd_request *req)
1809 +{
1810 + bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
1811 + bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
1812 + ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
1813 + return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
1814 + (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
1815 +}
1816 +
1817 +/*
1818 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1819 * (as needed), and set the request r_osd appropriately. If there is
1820 * no up osd, set r_osd to NULL. Move the request to the appropriate list
1821 @@ -1248,6 +1264,7 @@ static int __map_request(struct ceph_osd_client *osdc,
1822 int acting[CEPH_PG_MAX_SIZE];
1823 int o = -1, num = 0;
1824 int err;
1825 + bool was_paused;
1826
1827 dout("map_request %p tid %lld\n", req, req->r_tid);
1828 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1829 @@ -1264,12 +1281,18 @@ static int __map_request(struct ceph_osd_client *osdc,
1830 num = err;
1831 }
1832
1833 + was_paused = req->r_paused;
1834 + req->r_paused = __req_should_be_paused(osdc, req);
1835 + if (was_paused && !req->r_paused)
1836 + force_resend = 1;
1837 +
1838 if ((!force_resend &&
1839 req->r_osd && req->r_osd->o_osd == o &&
1840 req->r_sent >= req->r_osd->o_incarnation &&
1841 req->r_num_pg_osds == num &&
1842 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
1843 - (req->r_osd == NULL && o == -1))
1844 + (req->r_osd == NULL && o == -1) ||
1845 + req->r_paused)
1846 return 0; /* no change */
1847
1848 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1849 @@ -1613,14 +1636,17 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1850 *
1851 * Caller should hold map_sem for read.
1852 */
1853 -static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1854 +static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
1855 + bool force_resend_writes)
1856 {
1857 struct ceph_osd_request *req, *nreq;
1858 struct rb_node *p;
1859 int needmap = 0;
1860 int err;
1861 + bool force_resend_req;
1862
1863 - dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
1864 + dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
1865 + force_resend_writes ? " (force resend writes)" : "");
1866 mutex_lock(&osdc->request_mutex);
1867 for (p = rb_first(&osdc->requests); p; ) {
1868 req = rb_entry(p, struct ceph_osd_request, r_node);
1869 @@ -1645,7 +1671,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1870 continue;
1871 }
1872
1873 - err = __map_request(osdc, req, force_resend);
1874 + force_resend_req = force_resend ||
1875 + (force_resend_writes &&
1876 + req->r_flags & CEPH_OSD_FLAG_WRITE);
1877 + err = __map_request(osdc, req, force_resend_req);
1878 if (err < 0)
1879 continue; /* error */
1880 if (req->r_osd == NULL) {
1881 @@ -1665,7 +1694,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1882 r_linger_item) {
1883 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1884
1885 - err = __map_request(osdc, req, force_resend);
1886 + err = __map_request(osdc, req,
1887 + force_resend || force_resend_writes);
1888 dout("__map_request returned %d\n", err);
1889 if (err == 0)
1890 continue; /* no change and no osd was specified */
1891 @@ -1707,6 +1737,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1892 struct ceph_osdmap *newmap = NULL, *oldmap;
1893 int err;
1894 struct ceph_fsid fsid;
1895 + bool was_full;
1896
1897 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1898 p = msg->front.iov_base;
1899 @@ -1720,6 +1751,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1900
1901 down_write(&osdc->map_sem);
1902
1903 + was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
1904 +
1905 /* incremental maps */
1906 ceph_decode_32_safe(&p, end, nr_maps, bad);
1907 dout(" %d inc maps\n", nr_maps);
1908 @@ -1744,7 +1777,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1909 ceph_osdmap_destroy(osdc->osdmap);
1910 osdc->osdmap = newmap;
1911 }
1912 - kick_requests(osdc, 0);
1913 + was_full = was_full ||
1914 + ceph_osdmap_flag(osdc->osdmap,
1915 + CEPH_OSDMAP_FULL);
1916 + kick_requests(osdc, 0, was_full);
1917 } else {
1918 dout("ignoring incremental map %u len %d\n",
1919 epoch, maplen);
1920 @@ -1787,7 +1823,10 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1921 skipped_map = 1;
1922 ceph_osdmap_destroy(oldmap);
1923 }
1924 - kick_requests(osdc, skipped_map);
1925 + was_full = was_full ||
1926 + ceph_osdmap_flag(osdc->osdmap,
1927 + CEPH_OSDMAP_FULL);
1928 + kick_requests(osdc, skipped_map, was_full);
1929 }
1930 p += maplen;
1931 nr_maps--;
1932 @@ -1804,7 +1843,9 @@ done:
1933 * we find out when we are no longer full and stop returning
1934 * ENOSPC.
1935 */
1936 - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1937 + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1938 + ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
1939 + ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
1940 ceph_monc_request_next_osdmap(&osdc->client->monc);
1941
1942 mutex_lock(&osdc->request_mutex);
1943 @@ -2454,7 +2495,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1944 struct ceph_osd_client *osdc = osd->o_osdc;
1945 struct ceph_msg *m;
1946 struct ceph_osd_request *req;
1947 - int front = le32_to_cpu(hdr->front_len);
1948 + int front_len = le32_to_cpu(hdr->front_len);
1949 int data_len = le32_to_cpu(hdr->data_len);
1950 u64 tid;
1951
1952 @@ -2474,12 +2515,13 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1953 req->r_reply, req->r_reply->con);
1954 ceph_msg_revoke_incoming(req->r_reply);
1955
1956 - if (front > req->r_reply->front.iov_len) {
1957 + if (front_len > req->r_reply->front_alloc_len) {
1958 pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n",
1959 - front, (int)req->r_reply->front.iov_len,
1960 + front_len, req->r_reply->front_alloc_len,
1961 (unsigned int)con->peer_name.type,
1962 le64_to_cpu(con->peer_name.num));
1963 - m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
1964 + m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
1965 + false);
1966 if (!m)
1967 goto out;
1968 ceph_msg_put(req->r_reply);
1969 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
1970 index 90e521fde35f..c1bb9be00fa0 100644
1971 --- a/scripts/package/builddeb
1972 +++ b/scripts/package/builddeb
1973 @@ -41,9 +41,9 @@ create_package() {
1974 parisc*)
1975 debarch=hppa ;;
1976 mips*)
1977 - debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
1978 + debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
1979 arm*)
1980 - debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
1981 + debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
1982 *)
1983 echo "" >&2
1984 echo "** ** ** WARNING ** ** **" >&2
1985 @@ -62,7 +62,7 @@ create_package() {
1986 fi
1987
1988 # Create the package
1989 - dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
1990 + dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
1991 dpkg --build "$pdir" ..
1992 }
1993
1994 @@ -288,15 +288,14 @@ mkdir -p "$destdir"
1995 (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
1996 ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
1997 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
1998 -arch=$(dpkg --print-architecture)
1999
2000 cat <<EOF >> debian/control
2001
2002 Package: $kernel_headers_packagename
2003 Provides: linux-headers, linux-headers-2.6
2004 -Architecture: $arch
2005 -Description: Linux kernel headers for $KERNELRELEASE on $arch
2006 - This package provides kernel header files for $KERNELRELEASE on $arch
2007 +Architecture: any
2008 +Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
2009 + This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
2010 .
2011 This is useful for people who need to build external modules
2012 EOF
2013 diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
2014 index d9af6387f37c..dac296a7faad 100644
2015 --- a/sound/core/compress_offload.c
2016 +++ b/sound/core/compress_offload.c
2017 @@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
2018 kfree(data);
2019 }
2020 snd_card_unref(compr->card);
2021 - return 0;
2022 + return ret;
2023 }
2024
2025 static int snd_compr_free(struct inode *inode, struct file *f)
2026 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2027 index ccf5eb6b3d37..b5c4c2e4360b 100644
2028 --- a/sound/pci/hda/hda_intel.c
2029 +++ b/sound/pci/hda/hda_intel.c
2030 @@ -4007,6 +4007,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2031 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
2032 { PCI_DEVICE(0x8086, 0x0d0c),
2033 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
2034 + /* Broadwell */
2035 + { PCI_DEVICE(0x8086, 0x160c),
2036 + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
2037 /* 5 Series/3400 */
2038 { PCI_DEVICE(0x8086, 0x3b56),
2039 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
2040 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2041 index adb374babd18..23e0bc6d6568 100644
2042 --- a/sound/pci/hda/patch_hdmi.c
2043 +++ b/sound/pci/hda/patch_hdmi.c
2044 @@ -45,6 +45,9 @@ module_param(static_hdmi_pcm, bool, 0644);
2045 MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
2046
2047 #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
2048 +#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
2049 +#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
2050 +
2051 #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
2052
2053 struct hdmi_spec_per_cvt {
2054 @@ -1014,7 +1017,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2055 if (!channels)
2056 return;
2057
2058 - if (is_haswell(codec))
2059 + if (is_haswell_plus(codec))
2060 snd_hda_codec_write(codec, pin_nid, 0,
2061 AC_VERB_SET_AMP_GAIN_MUTE,
2062 AMP_OUT_UNMUTE);
2063 @@ -1196,7 +1199,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
2064 int pinctl;
2065 int new_pinctl = 0;
2066
2067 - if (is_haswell(codec))
2068 + if (is_haswell_plus(codec))
2069 haswell_verify_D0(codec, cvt_nid, pin_nid);
2070
2071 if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
2072 @@ -1357,7 +1360,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
2073 mux_idx);
2074
2075 /* configure unused pins to choose other converters */
2076 - if (is_haswell(codec) || is_valleyview(codec))
2077 + if (is_haswell_plus(codec) || is_valleyview(codec))
2078 intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
2079
2080 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
2081 @@ -1543,7 +1546,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
2082 if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
2083 return 0;
2084
2085 - if (is_haswell(codec))
2086 + if (is_haswell_plus(codec))
2087 intel_haswell_fixup_connect_list(codec, pin_nid);
2088
2089 pin_idx = spec->num_pins;
2090 @@ -2169,7 +2172,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2091 codec->spec = spec;
2092 hdmi_array_init(spec, 4);
2093
2094 - if (is_haswell(codec)) {
2095 + if (is_haswell_plus(codec)) {
2096 intel_haswell_enable_all_pins(codec, true);
2097 intel_haswell_fixup_enable_dp12(codec);
2098 }
2099 @@ -2180,7 +2183,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2100 return -EINVAL;
2101 }
2102 codec->patch_ops = generic_hdmi_patch_ops;
2103 - if (is_haswell(codec)) {
2104 + if (is_haswell_plus(codec)) {
2105 codec->patch_ops.set_power_state = haswell_set_power_state;
2106 codec->dp_mst = true;
2107 }
2108 @@ -2846,6 +2849,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
2109 { .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
2110 { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
2111 { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
2112 +{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
2113 { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
2114 { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
2115 { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
2116 @@ -2901,6 +2905,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862804");
2117 MODULE_ALIAS("snd-hda-codec-id:80862805");
2118 MODULE_ALIAS("snd-hda-codec-id:80862806");
2119 MODULE_ALIAS("snd-hda-codec-id:80862807");
2120 +MODULE_ALIAS("snd-hda-codec-id:80862808");
2121 MODULE_ALIAS("snd-hda-codec-id:80862880");
2122 MODULE_ALIAS("snd-hda-codec-id:80862882");
2123 MODULE_ALIAS("snd-hda-codec-id:808629fb");
2124 diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
2125 index 5f728808eed4..8bddf3f20a5e 100644
2126 --- a/sound/soc/codecs/max98090.c
2127 +++ b/sound/soc/codecs/max98090.c
2128 @@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
2129 case M98090_REG_RECORD_TDM_SLOT:
2130 case M98090_REG_SAMPLE_RATE:
2131 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
2132 + case M98090_REG_REVISION_ID:
2133 return true;
2134 default:
2135 return false;