Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.1.2-r2/0100-3.1.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1591 - (show annotations) (download)
Thu Dec 1 15:53:51 2011 UTC (12 years, 4 months ago) by niro
File size: 403626 byte(s)
-3.1.2-magellan-r2
1 diff --git a/Documentation/hwspinlock.txt b/Documentation/hwspinlock.txt
2 index 7dcd1a4..6996681 100644
3 --- a/Documentation/hwspinlock.txt
4 +++ b/Documentation/hwspinlock.txt
5 @@ -39,23 +39,20 @@ independent, drivers.
6 in case an unused hwspinlock isn't available. Users of this
7 API will usually want to communicate the lock's id to the remote core
8 before it can be used to achieve synchronization.
9 - Can be called from an atomic context (this function will not sleep) but
10 - not from within interrupt context.
11 + Should be called from a process context (might sleep).
12
13 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
14 - assign a specific hwspinlock id and return its address, or NULL
15 if that hwspinlock is already in use. Usually board code will
16 be calling this function in order to reserve specific hwspinlock
17 ids for predefined purposes.
18 - Can be called from an atomic context (this function will not sleep) but
19 - not from within interrupt context.
20 + Should be called from a process context (might sleep).
21
22 int hwspin_lock_free(struct hwspinlock *hwlock);
23 - free a previously-assigned hwspinlock; returns 0 on success, or an
24 appropriate error code on failure (e.g. -EINVAL if the hwspinlock
25 is already free).
26 - Can be called from an atomic context (this function will not sleep) but
27 - not from within interrupt context.
28 + Should be called from a process context (might sleep).
29
30 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
31 - lock a previously-assigned hwspinlock with a timeout limit (specified in
32 @@ -232,15 +229,14 @@ int hwspinlock_example2(void)
33
34 int hwspin_lock_register(struct hwspinlock *hwlock);
35 - to be called from the underlying platform-specific implementation, in
36 - order to register a new hwspinlock instance. Can be called from an atomic
37 - context (this function will not sleep) but not from within interrupt
38 - context. Returns 0 on success, or appropriate error code on failure.
39 + order to register a new hwspinlock instance. Should be called from
40 + a process context (this function might sleep).
41 + Returns 0 on success, or appropriate error code on failure.
42
43 struct hwspinlock *hwspin_lock_unregister(unsigned int id);
44 - to be called from the underlying vendor-specific implementation, in order
45 to unregister an existing (and unused) hwspinlock instance.
46 - Can be called from an atomic context (will not sleep) but not from
47 - within interrupt context.
48 + Should be called from a process context (this function might sleep).
49 Returns the address of hwspinlock on success, or NULL on error (e.g.
50 if the hwspinlock is sill in use).
51
52 diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
53 index 6066e3a..d3710dc 100644
54 --- a/Documentation/power/runtime_pm.txt
55 +++ b/Documentation/power/runtime_pm.txt
56 @@ -782,6 +782,16 @@ will behave normally, not taking the autosuspend delay into account.
57 Similarly, if the power.use_autosuspend field isn't set then the autosuspend
58 helper functions will behave just like the non-autosuspend counterparts.
59
60 +Under some circumstances a driver or subsystem may want to prevent a device
61 +from autosuspending immediately, even though the usage counter is zero and the
62 +autosuspend delay time has expired. If the ->runtime_suspend() callback
63 +returns -EAGAIN or -EBUSY, and if the next autosuspend delay expiration time is
64 +in the future (as it normally would be if the callback invoked
65 +pm_runtime_mark_last_busy()), the PM core will automatically reschedule the
66 +autosuspend. The ->runtime_suspend() callback can't do this rescheduling
67 +itself because no suspend requests of any kind are accepted while the device is
68 +suspending (i.e., while the callback is running).
69 +
70 The implementation is well suited for asynchronous use in interrupt contexts.
71 However such use inevitably involves races, because the PM core can't
72 synchronize ->runtime_suspend() callbacks with the arrival of I/O requests.
73 diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
74 index e213f45..21fd05c 100644
75 --- a/Documentation/stable_kernel_rules.txt
76 +++ b/Documentation/stable_kernel_rules.txt
77 @@ -24,10 +24,10 @@ Rules on what kind of patches are accepted, and which ones are not, into the
78 Procedure for submitting patches to the -stable tree:
79
80 - Send the patch, after verifying that it follows the above rules, to
81 - stable@kernel.org. You must note the upstream commit ID in the changelog
82 - of your submission.
83 + stable@vger.kernel.org. You must note the upstream commit ID in the
84 + changelog of your submission.
85 - To have the patch automatically included in the stable tree, add the tag
86 - Cc: stable@kernel.org
87 + Cc: stable@vger.kernel.org
88 in the sign-off area. Once the patch is merged it will be applied to
89 the stable tree without anything else needing to be done by the author
90 or subsystem maintainer.
91 @@ -35,10 +35,10 @@ Procedure for submitting patches to the -stable tree:
92 cherry-picked than this can be specified in the following format in
93 the sign-off area:
94
95 - Cc: <stable@kernel.org> # .32.x: a1f84a3: sched: Check for idle
96 - Cc: <stable@kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
97 - Cc: <stable@kernel.org> # .32.x: fd21073: sched: Fix affinity logic
98 - Cc: <stable@kernel.org> # .32.x
99 + Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
100 + Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
101 + Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
102 + Cc: <stable@vger.kernel.org> # .32.x
103 Signed-off-by: Ingo Molnar <mingo@elte.hu>
104
105 The tag sequence has the meaning of:
106 diff --git a/MAINTAINERS b/MAINTAINERS
107 index 6185d05..e608038 100644
108 --- a/MAINTAINERS
109 +++ b/MAINTAINERS
110 @@ -1230,7 +1230,7 @@ F: Documentation/aoe/
111 F: drivers/block/aoe/
112
113 ATHEROS ATH GENERIC UTILITIES
114 -M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
115 +M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
116 L: linux-wireless@vger.kernel.org
117 S: Supported
118 F: drivers/net/wireless/ath/*
119 @@ -1238,7 +1238,7 @@ F: drivers/net/wireless/ath/*
120 ATHEROS ATH5K WIRELESS DRIVER
121 M: Jiri Slaby <jirislaby@gmail.com>
122 M: Nick Kossifidis <mickflemm@gmail.com>
123 -M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
124 +M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
125 M: Bob Copeland <me@bobcopeland.com>
126 L: linux-wireless@vger.kernel.org
127 L: ath5k-devel@lists.ath5k.org
128 @@ -1247,10 +1247,10 @@ S: Maintained
129 F: drivers/net/wireless/ath/ath5k/
130
131 ATHEROS ATH9K WIRELESS DRIVER
132 -M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
133 -M: Jouni Malinen <jmalinen@atheros.com>
134 -M: Vasanthakumar Thiagarajan <vasanth@atheros.com>
135 -M: Senthil Balasubramanian <senthilkumar@atheros.com>
136 +M: "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
137 +M: Jouni Malinen <jouni@qca.qualcomm.com>
138 +M: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
139 +M: Senthil Balasubramanian <senthilb@qca.qualcomm.com>
140 L: linux-wireless@vger.kernel.org
141 L: ath9k-devel@lists.ath9k.org
142 W: http://wireless.kernel.org/en/users/Drivers/ath9k
143 diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
144 index df6ef1b..0c90896 100644
145 --- a/arch/arm/mach-exynos4/platsmp.c
146 +++ b/arch/arm/mach-exynos4/platsmp.c
147 @@ -193,12 +193,10 @@ void __init smp_init_cpus(void)
148 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
149
150 /* sanity check */
151 - if (ncores > NR_CPUS) {
152 - printk(KERN_WARNING
153 - "EXYNOS4: no. of cores (%d) greater than configured "
154 - "maximum of %d - clipping\n",
155 - ncores, NR_CPUS);
156 - ncores = NR_CPUS;
157 + if (ncores > nr_cpu_ids) {
158 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
159 + ncores, nr_cpu_ids);
160 + ncores = nr_cpu_ids;
161 }
162
163 for (i = 0; i < ncores; i++)
164 diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
165 index 1a1af9e..7276595 100644
166 --- a/arch/arm/mach-msm/platsmp.c
167 +++ b/arch/arm/mach-msm/platsmp.c
168 @@ -156,6 +156,12 @@ void __init smp_init_cpus(void)
169 {
170 unsigned int i, ncores = get_core_count();
171
172 + if (ncores > nr_cpu_ids) {
173 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
174 + ncores, nr_cpu_ids);
175 + ncores = nr_cpu_ids;
176 + }
177 +
178 for (i = 0; i < ncores; i++)
179 set_cpu_possible(i, true);
180
181 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
182 index ce65e93..889464d 100644
183 --- a/arch/arm/mach-omap2/omap-smp.c
184 +++ b/arch/arm/mach-omap2/omap-smp.c
185 @@ -109,12 +109,10 @@ void __init smp_init_cpus(void)
186 ncores = scu_get_core_count(scu_base);
187
188 /* sanity check */
189 - if (ncores > NR_CPUS) {
190 - printk(KERN_WARNING
191 - "OMAP4: no. of cores (%d) greater than configured "
192 - "maximum of %d - clipping\n",
193 - ncores, NR_CPUS);
194 - ncores = NR_CPUS;
195 + if (ncores > nr_cpu_ids) {
196 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
197 + ncores, nr_cpu_ids);
198 + ncores = nr_cpu_ids;
199 }
200
201 for (i = 0; i < ncores; i++)
202 diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
203 index b6a5134..3814e12 100644
204 --- a/arch/arm/mach-pxa/cm-x300.c
205 +++ b/arch/arm/mach-pxa/cm-x300.c
206 @@ -775,7 +775,6 @@ static struct gpio cm_x300_wi2wi_gpios[] __initdata = {
207
208 static void __init cm_x300_init_wi2wi(void)
209 {
210 - int bt_reset, wlan_en;
211 int err;
212
213 if (system_rev < 130) {
214 @@ -791,12 +790,11 @@ static void __init cm_x300_init_wi2wi(void)
215 }
216
217 udelay(10);
218 - gpio_set_value(bt_reset, 0);
219 + gpio_set_value(cm_x300_wi2wi_gpios[1].gpio, 0);
220 udelay(10);
221 - gpio_set_value(bt_reset, 1);
222 + gpio_set_value(cm_x300_wi2wi_gpios[1].gpio, 1);
223
224 - gpio_free(wlan_en);
225 - gpio_free(bt_reset);
226 + gpio_free_array(ARRAY_AND_SIZE(cm_x300_wi2wi_gpios));
227 }
228
229 /* MFP */
230 diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
231 index 4ae943b..e83c654 100644
232 --- a/arch/arm/mach-realview/platsmp.c
233 +++ b/arch/arm/mach-realview/platsmp.c
234 @@ -52,12 +52,10 @@ void __init smp_init_cpus(void)
235 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
236
237 /* sanity check */
238 - if (ncores > NR_CPUS) {
239 - printk(KERN_WARNING
240 - "Realview: no. of cores (%d) greater than configured "
241 - "maximum of %d - clipping\n",
242 - ncores, NR_CPUS);
243 - ncores = NR_CPUS;
244 + if (ncores > nr_cpu_ids) {
245 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
246 + ncores, nr_cpu_ids);
247 + ncores = nr_cpu_ids;
248 }
249
250 for (i = 0; i < ncores; i++)
251 diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
252 index 66f9806..e4e485f 100644
253 --- a/arch/arm/mach-shmobile/platsmp.c
254 +++ b/arch/arm/mach-shmobile/platsmp.c
255 @@ -56,6 +56,12 @@ void __init smp_init_cpus(void)
256 unsigned int ncores = shmobile_smp_get_core_count();
257 unsigned int i;
258
259 + if (ncores > nr_cpu_ids) {
260 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
261 + ncores, nr_cpu_ids);
262 + ncores = nr_cpu_ids;
263 + }
264 +
265 for (i = 0; i < ncores; i++)
266 set_cpu_possible(i, true);
267
268 diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
269 index 0886cbc..7d2b5d0 100644
270 --- a/arch/arm/mach-tegra/platsmp.c
271 +++ b/arch/arm/mach-tegra/platsmp.c
272 @@ -114,10 +114,10 @@ void __init smp_init_cpus(void)
273 {
274 unsigned int i, ncores = scu_get_core_count(scu_base);
275
276 - if (ncores > NR_CPUS) {
277 - printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
278 - ncores, NR_CPUS);
279 - ncores = NR_CPUS;
280 + if (ncores > nr_cpu_ids) {
281 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
282 + ncores, nr_cpu_ids);
283 + ncores = nr_cpu_ids;
284 }
285
286 for (i = 0; i < ncores; i++)
287 diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
288 index 1da23bb..8aa104a 100644
289 --- a/arch/arm/mach-ux500/cpu.c
290 +++ b/arch/arm/mach-ux500/cpu.c
291 @@ -99,7 +99,27 @@ static void ux500_l2x0_inv_all(void)
292 ux500_cache_sync();
293 }
294
295 -static int ux500_l2x0_init(void)
296 +static int __init ux500_l2x0_unlock(void)
297 +{
298 + int i;
299 +
300 + /*
301 + * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
302 + * apparently locks both caches before jumping to the kernel. The
303 + * l2x0 core will not touch the unlock registers if the l2x0 is
304 + * already enabled, so we do it right here instead. The PL310 has
305 + * 8 sets of registers, one per possible CPU.
306 + */
307 + for (i = 0; i < 8; i++) {
308 + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
309 + i * L2X0_LOCKDOWN_STRIDE);
310 + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
311 + i * L2X0_LOCKDOWN_STRIDE);
312 + }
313 + return 0;
314 +}
315 +
316 +static int __init ux500_l2x0_init(void)
317 {
318 if (cpu_is_u5500())
319 l2x0_base = __io_address(U5500_L2CC_BASE);
320 @@ -108,6 +128,9 @@ static int ux500_l2x0_init(void)
321 else
322 ux500_unknown_soc();
323
324 + /* Unlock before init */
325 + ux500_l2x0_unlock();
326 +
327 /* 64KB way size, 8 way associativity, force WA */
328 l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
329
330 diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
331 index a33df5f..eb51991 100644
332 --- a/arch/arm/mach-ux500/platsmp.c
333 +++ b/arch/arm/mach-ux500/platsmp.c
334 @@ -156,12 +156,10 @@ void __init smp_init_cpus(void)
335 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
336
337 /* sanity check */
338 - if (ncores > NR_CPUS) {
339 - printk(KERN_WARNING
340 - "U8500: no. of cores (%d) greater than configured "
341 - "maximum of %d - clipping\n",
342 - ncores, NR_CPUS);
343 - ncores = NR_CPUS;
344 + if (ncores > nr_cpu_ids) {
345 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
346 + ncores, nr_cpu_ids);
347 + ncores = nr_cpu_ids;
348 }
349
350 for (i = 0; i < ncores; i++)
351 diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
352 index bfd32f5..2b1e836 100644
353 --- a/arch/arm/mach-vexpress/ct-ca9x4.c
354 +++ b/arch/arm/mach-vexpress/ct-ca9x4.c
355 @@ -221,6 +221,12 @@ static void ct_ca9x4_init_cpu_map(void)
356 {
357 int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
358
359 + if (ncores > nr_cpu_ids) {
360 + pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
361 + ncores, nr_cpu_ids);
362 + ncores = nr_cpu_ids;
363 + }
364 +
365 for (i = 0; i < ncores; ++i)
366 set_cpu_possible(i, true);
367
368 diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
369 index ebbce33..4509956 100644
370 --- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
371 +++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
372 @@ -89,11 +89,11 @@ typedef u64 iomux_v3_cfg_t;
373 #define PAD_CTL_HYS (1 << 8)
374
375 #define PAD_CTL_PKE (1 << 7)
376 -#define PAD_CTL_PUE (1 << 6)
377 -#define PAD_CTL_PUS_100K_DOWN (0 << 4)
378 -#define PAD_CTL_PUS_47K_UP (1 << 4)
379 -#define PAD_CTL_PUS_100K_UP (2 << 4)
380 -#define PAD_CTL_PUS_22K_UP (3 << 4)
381 +#define PAD_CTL_PUE (1 << 6 | PAD_CTL_PKE)
382 +#define PAD_CTL_PUS_100K_DOWN (0 << 4 | PAD_CTL_PUE)
383 +#define PAD_CTL_PUS_47K_UP (1 << 4 | PAD_CTL_PUE)
384 +#define PAD_CTL_PUS_100K_UP (2 << 4 | PAD_CTL_PUE)
385 +#define PAD_CTL_PUS_22K_UP (3 << 4 | PAD_CTL_PUE)
386
387 #define PAD_CTL_ODE (1 << 3)
388
389 diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
390 index 54a47ea..0c5fa31 100644
391 --- a/arch/powerpc/include/asm/sparsemem.h
392 +++ b/arch/powerpc/include/asm/sparsemem.h
393 @@ -16,7 +16,7 @@
394 #endif /* CONFIG_SPARSEMEM */
395
396 #ifdef CONFIG_MEMORY_HOTPLUG
397 -extern void create_section_mapping(unsigned long start, unsigned long end);
398 +extern int create_section_mapping(unsigned long start, unsigned long end);
399 extern int remove_section_mapping(unsigned long start, unsigned long end);
400 #ifdef CONFIG_NUMA
401 extern int hot_add_scn_to_nid(unsigned long scn_addr);
402 diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
403 index fec1320..d7efdbf 100644
404 --- a/arch/powerpc/mm/gup.c
405 +++ b/arch/powerpc/mm/gup.c
406 @@ -16,16 +16,6 @@
407
408 #ifdef __HAVE_ARCH_PTE_SPECIAL
409
410 -static inline void get_huge_page_tail(struct page *page)
411 -{
412 - /*
413 - * __split_huge_page_refcount() cannot run
414 - * from under us.
415 - */
416 - VM_BUG_ON(atomic_read(&page->_count) < 0);
417 - atomic_inc(&page->_count);
418 -}
419 -
420 /*
421 * The performance critical leaf functions are made noinline otherwise gcc
422 * inlines everything into a single function which results in too much
423 @@ -57,8 +47,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
424 put_page(page);
425 return 0;
426 }
427 - if (PageTail(page))
428 - get_huge_page_tail(page);
429 pages[*nr] = page;
430 (*nr)++;
431
432 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
433 index 26b2872..07f9e9f 100644
434 --- a/arch/powerpc/mm/hash_utils_64.c
435 +++ b/arch/powerpc/mm/hash_utils_64.c
436 @@ -534,11 +534,11 @@ static unsigned long __init htab_get_table_size(void)
437 }
438
439 #ifdef CONFIG_MEMORY_HOTPLUG
440 -void create_section_mapping(unsigned long start, unsigned long end)
441 +int create_section_mapping(unsigned long start, unsigned long end)
442 {
443 - BUG_ON(htab_bolt_mapping(start, end, __pa(start),
444 + return htab_bolt_mapping(start, end, __pa(start),
445 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
446 - mmu_kernel_ssize));
447 + mmu_kernel_ssize);
448 }
449
450 int remove_section_mapping(unsigned long start, unsigned long end)
451 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
452 index 0b9a5c1..da5eb38 100644
453 --- a/arch/powerpc/mm/hugetlbpage.c
454 +++ b/arch/powerpc/mm/hugetlbpage.c
455 @@ -390,7 +390,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
456 {
457 unsigned long mask;
458 unsigned long pte_end;
459 - struct page *head, *page;
460 + struct page *head, *page, *tail;
461 pte_t pte;
462 int refs;
463
464 @@ -413,6 +413,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
465 head = pte_page(pte);
466
467 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
468 + tail = page;
469 do {
470 VM_BUG_ON(compound_head(page) != head);
471 pages[*nr] = page;
472 @@ -428,10 +429,20 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
473
474 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
475 /* Could be optimized better */
476 - while (*nr) {
477 - put_page(page);
478 - (*nr)--;
479 - }
480 + *nr -= refs;
481 + while (refs--)
482 + put_page(head);
483 + return 0;
484 + }
485 +
486 + /*
487 + * Any tail page need their mapcount reference taken before we
488 + * return.
489 + */
490 + while (refs--) {
491 + if (PageTail(tail))
492 + get_huge_page_tail(tail);
493 + tail++;
494 }
495
496 return 1;
497 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
498 index c781bbc..95985f2 100644
499 --- a/arch/powerpc/mm/mem.c
500 +++ b/arch/powerpc/mm/mem.c
501 @@ -123,7 +123,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
502 pgdata = NODE_DATA(nid);
503
504 start = (unsigned long)__va(start);
505 - create_section_mapping(start, start + size);
506 + if (create_section_mapping(start, start + size))
507 + return -EINVAL;
508
509 /* this should work for most non-highmem platforms */
510 zone = pgdata->node_zones;
511 diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
512 index 3bafc3d..4ff587e 100644
513 --- a/arch/powerpc/mm/mmu_context_hash64.c
514 +++ b/arch/powerpc/mm/mmu_context_hash64.c
515 @@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
516 if (!mm || !acop)
517 return -EINVAL;
518
519 - /* We need to make sure mm_users doesn't change */
520 - down_read(&mm->mmap_sem);
521 + /* The page_table_lock ensures mm_users won't change under us */
522 + spin_lock(&mm->page_table_lock);
523 spin_lock(mm->context.cop_lockp);
524
525 if (mm->context.cop_pid == COP_PID_NONE) {
526 @@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
527
528 out:
529 spin_unlock(mm->context.cop_lockp);
530 - up_read(&mm->mmap_sem);
531 + spin_unlock(&mm->page_table_lock);
532
533 return ret;
534 }
535 @@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
536 if (WARN_ON_ONCE(!mm))
537 return;
538
539 - /* We need to make sure mm_users doesn't change */
540 - down_read(&mm->mmap_sem);
541 + /* The page_table_lock ensures mm_users won't change under us */
542 + spin_lock(&mm->page_table_lock);
543 spin_lock(mm->context.cop_lockp);
544
545 mm->context.acop &= ~acop;
546 @@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
547 }
548
549 spin_unlock(mm->context.cop_lockp);
550 - up_read(&mm->mmap_sem);
551 + spin_unlock(&mm->page_table_lock);
552 }
553 EXPORT_SYMBOL_GPL(drop_cop);
554
555 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
556 index 2164006..2c1ae7a 100644
557 --- a/arch/powerpc/mm/numa.c
558 +++ b/arch/powerpc/mm/numa.c
559 @@ -1214,11 +1214,12 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
560 break;
561 }
562
563 - of_node_put(memory);
564 if (nid >= 0)
565 break;
566 }
567
568 + of_node_put(memory);
569 +
570 return nid;
571 }
572
573 diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
574 index e9be25b..0f1b706 100644
575 --- a/arch/powerpc/platforms/pseries/dlpar.c
576 +++ b/arch/powerpc/platforms/pseries/dlpar.c
577 @@ -112,6 +112,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
578 dlpar_free_one_cc_node(dn);
579 }
580
581 +#define COMPLETE 0
582 #define NEXT_SIBLING 1
583 #define NEXT_CHILD 2
584 #define NEXT_PROPERTY 3
585 @@ -158,6 +159,9 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
586 spin_unlock(&rtas_data_buf_lock);
587
588 switch (rc) {
589 + case COMPLETE:
590 + break;
591 +
592 case NEXT_SIBLING:
593 dn = dlpar_parse_cc_node(ccwa);
594 if (!dn)
595 diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
596 index ada6e07..d42f37d 100644
597 --- a/arch/powerpc/platforms/pseries/eeh.c
598 +++ b/arch/powerpc/platforms/pseries/eeh.c
599 @@ -1338,7 +1338,7 @@ static const struct file_operations proc_eeh_operations = {
600 static int __init eeh_init_proc(void)
601 {
602 if (machine_is(pseries))
603 - proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
604 + proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
605 return 0;
606 }
607 __initcall(eeh_init_proc);
608 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
609 index ef86ad2..ae0e14b 100644
610 --- a/arch/s390/kernel/ptrace.c
611 +++ b/arch/s390/kernel/ptrace.c
612 @@ -47,29 +47,31 @@ enum s390_regset {
613
614 void update_per_regs(struct task_struct *task)
615 {
616 - static const struct per_regs per_single_step = {
617 - .control = PER_EVENT_IFETCH,
618 - .start = 0,
619 - .end = PSW_ADDR_INSN,
620 - };
621 struct pt_regs *regs = task_pt_regs(task);
622 struct thread_struct *thread = &task->thread;
623 - const struct per_regs *new;
624 - struct per_regs old;
625 + struct per_regs old, new;
626
627 - /* TIF_SINGLE_STEP overrides the user specified PER registers. */
628 - new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
629 - &per_single_step : &thread->per_user;
630 + /* Copy user specified PER registers */
631 + new.control = thread->per_user.control;
632 + new.start = thread->per_user.start;
633 + new.end = thread->per_user.end;
634 +
635 + /* merge TIF_SINGLE_STEP into user specified PER registers. */
636 + if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
637 + new.control |= PER_EVENT_IFETCH;
638 + new.start = 0;
639 + new.end = PSW_ADDR_INSN;
640 + }
641
642 /* Take care of the PER enablement bit in the PSW. */
643 - if (!(new->control & PER_EVENT_MASK)) {
644 + if (!(new.control & PER_EVENT_MASK)) {
645 regs->psw.mask &= ~PSW_MASK_PER;
646 return;
647 }
648 regs->psw.mask |= PSW_MASK_PER;
649 __ctl_store(old, 9, 11);
650 - if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
651 - __ctl_load(*new, 9, 11);
652 + if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
653 + __ctl_load(new, 9, 11);
654 }
655
656 void user_enable_single_step(struct task_struct *task)
657 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
658 index dc2b580..0cba935 100644
659 --- a/arch/s390/kvm/kvm-s390.c
660 +++ b/arch/s390/kvm/kvm-s390.c
661 @@ -312,11 +312,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
662 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
663 unsigned int id)
664 {
665 - struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
666 - int rc = -ENOMEM;
667 + struct kvm_vcpu *vcpu;
668 + int rc = -EINVAL;
669 +
670 + if (id >= KVM_MAX_VCPUS)
671 + goto out;
672 +
673 + rc = -ENOMEM;
674
675 + vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
676 if (!vcpu)
677 - goto out_nomem;
678 + goto out;
679
680 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
681 get_zeroed_page(GFP_KERNEL);
682 @@ -352,7 +358,7 @@ out_free_sie_block:
683 free_page((unsigned long)(vcpu->arch.sie_block));
684 out_free_cpu:
685 kfree(vcpu);
686 -out_nomem:
687 +out:
688 return ERR_PTR(rc);
689 }
690
691 diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
692 index 45b405c..65cb06e 100644
693 --- a/arch/s390/mm/gup.c
694 +++ b/arch/s390/mm/gup.c
695 @@ -52,7 +52,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
696 unsigned long end, int write, struct page **pages, int *nr)
697 {
698 unsigned long mask, result;
699 - struct page *head, *page;
700 + struct page *head, *page, *tail;
701 int refs;
702
703 result = write ? 0 : _SEGMENT_ENTRY_RO;
704 @@ -64,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
705 refs = 0;
706 head = pmd_page(pmd);
707 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
708 + tail = page;
709 do {
710 VM_BUG_ON(compound_head(page) != head);
711 pages[*nr] = page;
712 @@ -81,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
713 *nr -= refs;
714 while (refs--)
715 put_page(head);
716 + return 0;
717 + }
718 +
719 + /*
720 + * Any tail page need their mapcount reference taken before we
721 + * return.
722 + */
723 + while (refs--) {
724 + if (PageTail(tail))
725 + get_huge_page_tail(tail);
726 + tail++;
727 }
728
729 return 1;
730 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
731 index 5d56c2b..529a088 100644
732 --- a/arch/s390/mm/pgtable.c
733 +++ b/arch/s390/mm/pgtable.c
734 @@ -662,8 +662,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
735
736 void __tlb_remove_table(void *_table)
737 {
738 - void *table = (void *)((unsigned long) _table & PAGE_MASK);
739 - unsigned type = (unsigned long) _table & ~PAGE_MASK;
740 + const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
741 + void *table = (void *)((unsigned long) _table & ~mask);
742 + unsigned type = (unsigned long) _table & mask;
743
744 if (type)
745 __page_table_free_rcu(table, type);
746 diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
747 index a986b5d..42c55df 100644
748 --- a/arch/sparc/mm/gup.c
749 +++ b/arch/sparc/mm/gup.c
750 @@ -56,6 +56,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
751 put_page(head);
752 return 0;
753 }
754 + if (head != page)
755 + get_huge_page_tail(page);
756
757 pages[*nr] = page;
758 (*nr)++;
759 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
760 index 620f5b7..0491e40 100644
761 --- a/arch/um/drivers/ubd_kern.c
762 +++ b/arch/um/drivers/ubd_kern.c
763 @@ -513,8 +513,37 @@ __uml_exitcall(kill_io_thread);
764 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
765 {
766 char *file;
767 + int fd;
768 + int err;
769 +
770 + __u32 version;
771 + __u32 align;
772 + char *backing_file;
773 + time_t mtime;
774 + unsigned long long size;
775 + int sector_size;
776 + int bitmap_offset;
777 +
778 + if (ubd_dev->file && ubd_dev->cow.file) {
779 + file = ubd_dev->cow.file;
780 +
781 + goto out;
782 + }
783
784 - file = ubd_dev->cow.file ? ubd_dev->cow.file : ubd_dev->file;
785 + fd = os_open_file(ubd_dev->file, global_openflags, 0);
786 + if (fd < 0)
787 + return fd;
788 +
789 + err = read_cow_header(file_reader, &fd, &version, &backing_file, \
790 + &mtime, &size, &sector_size, &align, &bitmap_offset);
791 + os_close_file(fd);
792 +
793 + if(err == -EINVAL)
794 + file = ubd_dev->file;
795 + else
796 + file = backing_file;
797 +
798 +out:
799 return os_file_size(file, size_out);
800 }
801
802 diff --git a/arch/um/sys-x86_64/vdso/vma.c b/arch/um/sys-x86_64/vdso/vma.c
803 index 9495c8d..91f4ec9 100644
804 --- a/arch/um/sys-x86_64/vdso/vma.c
805 +++ b/arch/um/sys-x86_64/vdso/vma.c
806 @@ -28,7 +28,7 @@ static int __init init_vdso(void)
807
808 um_vdso_addr = task_size - PAGE_SIZE;
809
810 - vdsop = kmalloc(GFP_KERNEL, sizeof(struct page *));
811 + vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
812 if (!vdsop)
813 goto oom;
814
815 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
816 index 7b3ca83..9b7273c 100644
817 --- a/arch/x86/include/asm/apic.h
818 +++ b/arch/x86/include/asm/apic.h
819 @@ -495,7 +495,7 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert)
820 return;
821 }
822
823 -extern struct apic *generic_bigsmp_probe(void);
824 +extern void generic_bigsmp_probe(void);
825
826
827 #ifdef CONFIG_X86_LOCAL_APIC
828 diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
829 index 37d3698..0c767a8 100644
830 --- a/arch/x86/include/asm/uv/uv_bau.h
831 +++ b/arch/x86/include/asm/uv/uv_bau.h
832 @@ -55,6 +55,7 @@
833 #define UV_BAU_TUNABLES_DIR "sgi_uv"
834 #define UV_BAU_TUNABLES_FILE "bau_tunables"
835 #define WHITESPACE " \t\n"
836 +#define uv_mmask ((1UL << uv_hub_info->m_val) - 1)
837 #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
838 #define cpubit_isset(cpu, bau_local_cpumask) \
839 test_bit((cpu), (bau_local_cpumask).bits)
840 diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
841 index f26544a..54a13aa 100644
842 --- a/arch/x86/include/asm/uv/uv_hub.h
843 +++ b/arch/x86/include/asm/uv/uv_hub.h
844 @@ -46,6 +46,13 @@
845 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
846 * of the nasid for socket usage.
847 *
848 + * GPA - (global physical address) a socket physical address converted
849 + * so that it can be used by the GRU as a global address. Socket
850 + * physical addresses 1) need additional NASID (node) bits added
851 + * to the high end of the address, and 2) unaliased if the
852 + * partition does not have a physical address 0. In addition, on
853 + * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
854 + *
855 *
856 * NumaLink Global Physical Address Format:
857 * +--------------------------------+---------------------+
858 @@ -141,6 +148,8 @@ struct uv_hub_info_s {
859 unsigned int gnode_extra;
860 unsigned char hub_revision;
861 unsigned char apic_pnode_shift;
862 + unsigned char m_shift;
863 + unsigned char n_lshift;
864 unsigned long gnode_upper;
865 unsigned long lowmem_remap_top;
866 unsigned long lowmem_remap_base;
867 @@ -177,6 +186,16 @@ static inline int is_uv2_hub(void)
868 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
869 }
870
871 +static inline int is_uv2_1_hub(void)
872 +{
873 + return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE;
874 +}
875 +
876 +static inline int is_uv2_2_hub(void)
877 +{
878 + return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1;
879 +}
880 +
881 union uvh_apicid {
882 unsigned long v;
883 struct uvh_apicid_s {
884 @@ -276,7 +295,10 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
885 {
886 if (paddr < uv_hub_info->lowmem_remap_top)
887 paddr |= uv_hub_info->lowmem_remap_base;
888 - return paddr | uv_hub_info->gnode_upper;
889 + paddr |= uv_hub_info->gnode_upper;
890 + paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
891 + ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
892 + return paddr;
893 }
894
895
896 @@ -300,16 +322,19 @@ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
897 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
898 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
899
900 + gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
901 + ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
902 + gpa = gpa & uv_hub_info->gpa_mask;
903 if (paddr >= remap_base && paddr < remap_base + remap_top)
904 paddr -= remap_base;
905 return paddr;
906 }
907
908
909 -/* gnode -> pnode */
910 +/* gpa -> pnode */
911 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
912 {
913 - return gpa >> uv_hub_info->m_val;
914 + return gpa >> uv_hub_info->n_lshift;
915 }
916
917 /* gpa -> pnode */
918 @@ -320,6 +345,12 @@ static inline int uv_gpa_to_pnode(unsigned long gpa)
919 return uv_gpa_to_gnode(gpa) & n_mask;
920 }
921
922 +/* gpa -> node offset*/
923 +static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
924 +{
925 + return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
926 +}
927 +
928 /* pnode, offset --> socket virtual */
929 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
930 {
931 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
932 index efd737e..521bead 100644
933 --- a/arch/x86/kernel/apic/bigsmp_32.c
934 +++ b/arch/x86/kernel/apic/bigsmp_32.c
935 @@ -255,12 +255,24 @@ static struct apic apic_bigsmp = {
936 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
937 };
938
939 -struct apic * __init generic_bigsmp_probe(void)
940 +void __init generic_bigsmp_probe(void)
941 {
942 - if (probe_bigsmp())
943 - return &apic_bigsmp;
944 + unsigned int cpu;
945
946 - return NULL;
947 + if (!probe_bigsmp())
948 + return;
949 +
950 + apic = &apic_bigsmp;
951 +
952 + for_each_possible_cpu(cpu) {
953 + if (early_per_cpu(x86_cpu_to_logical_apicid,
954 + cpu) == BAD_APICID)
955 + continue;
956 + early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
957 + bigsmp_early_logical_apicid(cpu);
958 + }
959 +
960 + pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
961 }
962
963 apic_driver(apic_bigsmp);
964 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
965 index b5254ad..0787bb3 100644
966 --- a/arch/x86/kernel/apic/probe_32.c
967 +++ b/arch/x86/kernel/apic/probe_32.c
968 @@ -200,14 +200,8 @@ void __init default_setup_apic_routing(void)
969 * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
970 */
971
972 - if (!cmdline_apic && apic == &apic_default) {
973 - struct apic *bigsmp = generic_bigsmp_probe();
974 - if (bigsmp) {
975 - apic = bigsmp;
976 - printk(KERN_INFO "Overriding APIC driver with %s\n",
977 - apic->name);
978 - }
979 - }
980 + if (!cmdline_apic && apic == &apic_default)
981 + generic_bigsmp_probe();
982 #endif
983
984 if (apic->setup_apic_routing)
985 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
986 index 34b1859..cfeb978 100644
987 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
988 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
989 @@ -832,6 +832,10 @@ void __init uv_system_init(void)
990 uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
991 uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
992
993 + uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
994 + uv_cpu_hub_info(cpu)->n_lshift = is_uv2_1_hub() ?
995 + (m_val == 40 ? 40 : 39) : m_val;
996 +
997 pnode = uv_apicid_to_pnode(apicid);
998 blade = boot_pnode_to_blade(pnode);
999 lcpu = uv_blade_info[blade].nr_possible_cpus;
1000 @@ -862,8 +866,7 @@ void __init uv_system_init(void)
1001 if (uv_node_to_blade[nid] >= 0)
1002 continue;
1003 paddr = node_start_pfn(nid) << PAGE_SHIFT;
1004 - paddr = uv_soc_phys_ram_to_gpa(paddr);
1005 - pnode = (paddr >> m_val) & pnode_mask;
1006 + pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
1007 blade = boot_pnode_to_blade(pnode);
1008 uv_node_to_blade[nid] = blade;
1009 }
1010 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
1011 index f1a6244..794bc95 100644
1012 --- a/arch/x86/kernel/kprobes.c
1013 +++ b/arch/x86/kernel/kprobes.c
1014 @@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
1015 /*
1016 * Undefined/reserved opcodes, conditional jump, Opcode Extension
1017 * Groups, and some special opcodes can not boost.
1018 + * This is non-const to keep gcc from statically optimizing it out, as
1019 + * variable_test_bit makes gcc think only *(unsigned long*) is used.
1020 */
1021 -static const u32 twobyte_is_boostable[256 / 32] = {
1022 +static u32 twobyte_is_boostable[256 / 32] = {
1023 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1024 /* ---------------------------------------------- */
1025 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
1026 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
1027 index dbe34b9..ea30585 100644
1028 --- a/arch/x86/mm/gup.c
1029 +++ b/arch/x86/mm/gup.c
1030 @@ -108,16 +108,6 @@ static inline void get_head_page_multiple(struct page *page, int nr)
1031 SetPageReferenced(page);
1032 }
1033
1034 -static inline void get_huge_page_tail(struct page *page)
1035 -{
1036 - /*
1037 - * __split_huge_page_refcount() cannot run
1038 - * from under us.
1039 - */
1040 - VM_BUG_ON(atomic_read(&page->_count) < 0);
1041 - atomic_inc(&page->_count);
1042 -}
1043 -
1044 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
1045 unsigned long end, int write, struct page **pages, int *nr)
1046 {
1047 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
1048 index db8b915..5b55219 100644
1049 --- a/arch/x86/platform/uv/tlb_uv.c
1050 +++ b/arch/x86/platform/uv/tlb_uv.c
1051 @@ -115,9 +115,6 @@ early_param("nobau", setup_nobau);
1052
1053 /* base pnode in this partition */
1054 static int uv_base_pnode __read_mostly;
1055 -/* position of pnode (which is nasid>>1): */
1056 -static int uv_nshift __read_mostly;
1057 -static unsigned long uv_mmask __read_mostly;
1058
1059 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
1060 static DEFINE_PER_CPU(struct bau_control, bau_control);
1061 @@ -1435,7 +1432,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1062 {
1063 int i;
1064 int cpu;
1065 - unsigned long pa;
1066 + unsigned long gpa;
1067 unsigned long m;
1068 unsigned long n;
1069 size_t dsize;
1070 @@ -1451,9 +1448,9 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1071 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1072 BUG_ON(!bau_desc);
1073
1074 - pa = uv_gpa(bau_desc); /* need the real nasid*/
1075 - n = pa >> uv_nshift;
1076 - m = pa & uv_mmask;
1077 + gpa = uv_gpa(bau_desc);
1078 + n = uv_gpa_to_gnode(gpa);
1079 + m = uv_gpa_to_offset(gpa);
1080
1081 /* the 14-bit pnode */
1082 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1083 @@ -1525,9 +1522,9 @@ static void pq_init(int node, int pnode)
1084 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1085 }
1086 /*
1087 - * need the pnode of where the memory was really allocated
1088 + * need the gnode of where the memory was really allocated
1089 */
1090 - pn = uv_gpa(pqp) >> uv_nshift;
1091 + pn = uv_gpa_to_gnode(uv_gpa(pqp));
1092 first = uv_physnodeaddr(pqp);
1093 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1094 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1095 @@ -1837,8 +1834,6 @@ static int __init uv_bau_init(void)
1096 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
1097 }
1098
1099 - uv_nshift = uv_hub_info->m_val;
1100 - uv_mmask = (1UL << uv_hub_info->m_val) - 1;
1101 nuvhubs = uv_num_possible_blades();
1102 spin_lock_init(&disable_lock);
1103 congested_cycles = usec_2_cycles(congested_respns_us);
1104 diff --git a/block/blk-core.c b/block/blk-core.c
1105 index d34433a..795154e 100644
1106 --- a/block/blk-core.c
1107 +++ b/block/blk-core.c
1108 @@ -1725,6 +1725,8 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1109 where = ELEVATOR_INSERT_FLUSH;
1110
1111 add_acct_request(q, rq, where);
1112 + if (where == ELEVATOR_INSERT_FLUSH)
1113 + __blk_run_queue(q);
1114 spin_unlock_irqrestore(q->queue_lock, flags);
1115
1116 return 0;
1117 diff --git a/block/blk-flush.c b/block/blk-flush.c
1118 index 491eb30..720ad60 100644
1119 --- a/block/blk-flush.c
1120 +++ b/block/blk-flush.c
1121 @@ -320,7 +320,7 @@ void blk_insert_flush(struct request *rq)
1122 return;
1123 }
1124
1125 - BUG_ON(!rq->bio || rq->bio != rq->biotail);
1126 + BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
1127
1128 /*
1129 * If there's data but flush is not necessary, the request can be
1130 @@ -330,7 +330,6 @@ void blk_insert_flush(struct request *rq)
1131 if ((policy & REQ_FSEQ_DATA) &&
1132 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
1133 list_add_tail(&rq->queuelist, &q->queue_head);
1134 - blk_run_queue_async(q);
1135 return;
1136 }
1137
1138 diff --git a/block/genhd.c b/block/genhd.c
1139 index e2f6790..d261b73 100644
1140 --- a/block/genhd.c
1141 +++ b/block/genhd.c
1142 @@ -611,6 +611,12 @@ void add_disk(struct gendisk *disk)
1143 register_disk(disk);
1144 blk_register_queue(disk);
1145
1146 + /*
1147 + * Take an extra ref on queue which will be put on disk_release()
1148 + * so that it sticks around as long as @disk is there.
1149 + */
1150 + WARN_ON_ONCE(blk_get_queue(disk->queue));
1151 +
1152 retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
1153 "bdi");
1154 WARN_ON(retval);
1155 @@ -1095,6 +1101,8 @@ static void disk_release(struct device *dev)
1156 disk_replace_part_tbl(disk, NULL);
1157 free_part_stats(&disk->part0);
1158 free_part_info(&disk->part0);
1159 + if (disk->queue)
1160 + blk_put_queue(disk->queue);
1161 kfree(disk);
1162 }
1163 struct class block_class = {
1164 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
1165 index e46d21a..671d4d6 100644
1166 --- a/crypto/cryptd.c
1167 +++ b/crypto/cryptd.c
1168 @@ -945,7 +945,7 @@ static void __exit cryptd_exit(void)
1169 crypto_unregister_template(&cryptd_tmpl);
1170 }
1171
1172 -module_init(cryptd_init);
1173 +subsys_initcall(cryptd_init);
1174 module_exit(cryptd_exit);
1175
1176 MODULE_LICENSE("GPL");
1177 diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
1178 index 7489b89..f151afe 100644
1179 --- a/drivers/acpi/atomicio.c
1180 +++ b/drivers/acpi/atomicio.c
1181 @@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
1182 {
1183 struct acpi_iomap *map;
1184
1185 - map = __acpi_find_iomap(paddr, size);
1186 + map = __acpi_find_iomap(paddr, size/8);
1187 if (map)
1188 return map->vaddr + (paddr - map->paddr);
1189 else
1190 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
1191 index 43107e9..cc431d6 100644
1192 --- a/drivers/ata/ata_piix.c
1193 +++ b/drivers/ata/ata_piix.c
1194 @@ -113,6 +113,8 @@ enum {
1195 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
1196 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
1197
1198 + PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/
1199 +
1200 PIIX_80C_PRI = (1 << 5) | (1 << 4),
1201 PIIX_80C_SEC = (1 << 7) | (1 << 6),
1202
1203 @@ -147,6 +149,7 @@ enum piix_controller_ids {
1204 ich8m_apple_sata, /* locks up on second port enable */
1205 tolapai_sata,
1206 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
1207 + ich8_sata_snb,
1208 };
1209
1210 struct piix_map_db {
1211 @@ -177,6 +180,7 @@ static int piix_sidpr_scr_write(struct ata_link *link,
1212 static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
1213 unsigned hints);
1214 static bool piix_irq_check(struct ata_port *ap);
1215 +static int piix_port_start(struct ata_port *ap);
1216 #ifdef CONFIG_PM
1217 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
1218 static int piix_pci_device_resume(struct pci_dev *pdev);
1219 @@ -298,21 +302,21 @@ static const struct pci_device_id piix_pci_tbl[] = {
1220 /* SATA Controller IDE (PCH) */
1221 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1222 /* SATA Controller IDE (CPT) */
1223 - { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1224 + { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1225 /* SATA Controller IDE (CPT) */
1226 - { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1227 + { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1228 /* SATA Controller IDE (CPT) */
1229 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1230 /* SATA Controller IDE (CPT) */
1231 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1232 /* SATA Controller IDE (PBG) */
1233 - { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1234 + { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1235 /* SATA Controller IDE (PBG) */
1236 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1237 /* SATA Controller IDE (Panther Point) */
1238 - { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1239 + { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1240 /* SATA Controller IDE (Panther Point) */
1241 - { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1242 + { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
1243 /* SATA Controller IDE (Panther Point) */
1244 { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1245 /* SATA Controller IDE (Panther Point) */
1246 @@ -338,6 +342,7 @@ static struct scsi_host_template piix_sht = {
1247 static struct ata_port_operations piix_sata_ops = {
1248 .inherits = &ata_bmdma32_port_ops,
1249 .sff_irq_check = piix_irq_check,
1250 + .port_start = piix_port_start,
1251 };
1252
1253 static struct ata_port_operations piix_pata_ops = {
1254 @@ -478,6 +483,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
1255 [ich8_2port_sata] = &ich8_2port_map_db,
1256 [ich8m_apple_sata] = &ich8m_apple_map_db,
1257 [tolapai_sata] = &tolapai_map_db,
1258 + [ich8_sata_snb] = &ich8_map_db,
1259 };
1260
1261 static struct ata_port_info piix_port_info[] = {
1262 @@ -606,6 +612,19 @@ static struct ata_port_info piix_port_info[] = {
1263 .port_ops = &piix_vmw_ops,
1264 },
1265
1266 + /*
1267 + * some Sandybridge chipsets have broken 32 mode up to now,
1268 + * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
1269 + */
1270 + [ich8_sata_snb] =
1271 + {
1272 + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
1273 + .pio_mask = ATA_PIO4,
1274 + .mwdma_mask = ATA_MWDMA2,
1275 + .udma_mask = ATA_UDMA6,
1276 + .port_ops = &piix_sata_ops,
1277 + },
1278 +
1279 };
1280
1281 static struct pci_bits piix_enable_bits[] = {
1282 @@ -649,6 +668,14 @@ static const struct ich_laptop ich_laptop[] = {
1283 { 0, }
1284 };
1285
1286 +static int piix_port_start(struct ata_port *ap)
1287 +{
1288 + if (!(ap->flags & PIIX_FLAG_PIO16))
1289 + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
1290 +
1291 + return ata_bmdma_port_start(ap);
1292 +}
1293 +
1294 /**
1295 * ich_pata_cable_detect - Probe host controller cable detect info
1296 * @ap: Port for which cable detect info is desired
1297 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
1298 index acb3f83..6a7f7b0 100644
1299 --- a/drivers/base/power/runtime.c
1300 +++ b/drivers/base/power/runtime.c
1301 @@ -285,6 +285,9 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
1302 * If a deferred resume was requested while the callback was running then carry
1303 * it out; otherwise send an idle notification for the device (if the suspend
1304 * failed) or for its parent (if the suspend succeeded).
1305 + * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
1306 + * flag is set and the next autosuspend-delay expiration time is in the
1307 + * future, schedule another autosuspend attempt.
1308 *
1309 * This function must be called under dev->power.lock with interrupts disabled.
1310 */
1311 @@ -396,10 +399,21 @@ static int rpm_suspend(struct device *dev, int rpmflags)
1312 if (retval) {
1313 __update_runtime_status(dev, RPM_ACTIVE);
1314 dev->power.deferred_resume = false;
1315 - if (retval == -EAGAIN || retval == -EBUSY)
1316 + if (retval == -EAGAIN || retval == -EBUSY) {
1317 dev->power.runtime_error = 0;
1318 - else
1319 +
1320 + /*
1321 + * If the callback routine failed an autosuspend, and
1322 + * if the last_busy time has been updated so that there
1323 + * is a new autosuspend expiration time, automatically
1324 + * reschedule another autosuspend.
1325 + */
1326 + if ((rpmflags & RPM_AUTO) &&
1327 + pm_runtime_autosuspend_expiration(dev) != 0)
1328 + goto repeat;
1329 + } else {
1330 pm_runtime_cancel_pending(dev);
1331 + }
1332 } else {
1333 no_callback:
1334 __update_runtime_status(dev, RPM_SUSPENDED);
1335 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
1336 index 8f4ef65..c2f9b3e 100644
1337 --- a/drivers/block/cciss.c
1338 +++ b/drivers/block/cciss.c
1339 @@ -4533,6 +4533,13 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
1340 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1341 pmcsr |= PCI_D0;
1342 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
1343 +
1344 + /*
1345 + * The P600 requires a small delay when changing states.
1346 + * Otherwise we may think the board did not reset and we bail.
1347 + * This for kdump only and is particular to the P600.
1348 + */
1349 + msleep(500);
1350 }
1351 return 0;
1352 }
1353 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
1354 index 2330a9a..2f01073 100644
1355 --- a/drivers/block/xen-blkback/blkback.c
1356 +++ b/drivers/block/xen-blkback/blkback.c
1357 @@ -685,7 +685,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1358
1359 if (operation == READ)
1360 blkif->st_rd_sect += preq.nr_sects;
1361 - else if (operation == WRITE || operation == WRITE_FLUSH)
1362 + else if (operation & WRITE)
1363 blkif->st_wr_sect += preq.nr_sects;
1364
1365 return 0;
1366 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1367 index 44fef5e..7c64db4 100644
1368 --- a/drivers/gpu/drm/i915/intel_dp.c
1369 +++ b/drivers/gpu/drm/i915/intel_dp.c
1370 @@ -1683,6 +1683,31 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1371 return intel_dp_detect_dpcd(intel_dp);
1372 }
1373
1374 +static struct edid *
1375 +intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1376 +{
1377 + struct intel_dp *intel_dp = intel_attached_dp(connector);
1378 + struct edid *edid;
1379 +
1380 + ironlake_edp_panel_vdd_on(intel_dp);
1381 + edid = drm_get_edid(connector, adapter);
1382 + ironlake_edp_panel_vdd_off(intel_dp);
1383 + return edid;
1384 +}
1385 +
1386 +static int
1387 +intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
1388 +{
1389 + struct intel_dp *intel_dp = intel_attached_dp(connector);
1390 + int ret;
1391 +
1392 + ironlake_edp_panel_vdd_on(intel_dp);
1393 + ret = intel_ddc_get_modes(connector, adapter);
1394 + ironlake_edp_panel_vdd_off(intel_dp);
1395 + return ret;
1396 +}
1397 +
1398 +
1399 /**
1400 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
1401 *
1402 @@ -1715,7 +1740,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1403 if (intel_dp->force_audio) {
1404 intel_dp->has_audio = intel_dp->force_audio > 0;
1405 } else {
1406 - edid = drm_get_edid(connector, &intel_dp->adapter);
1407 + edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1408 if (edid) {
1409 intel_dp->has_audio = drm_detect_monitor_audio(edid);
1410 connector->display_info.raw_edid = NULL;
1411 @@ -1736,7 +1761,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1412 /* We should parse the EDID data and find out if it has an audio sink
1413 */
1414
1415 - ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
1416 + ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
1417 if (ret) {
1418 if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
1419 struct drm_display_mode *newmode;
1420 @@ -1772,7 +1797,7 @@ intel_dp_detect_audio(struct drm_connector *connector)
1421 struct edid *edid;
1422 bool has_audio = false;
1423
1424 - edid = drm_get_edid(connector, &intel_dp->adapter);
1425 + edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1426 if (edid) {
1427 has_audio = drm_detect_monitor_audio(edid);
1428
1429 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1430 index fe1099d..2480cfa 100644
1431 --- a/drivers/gpu/drm/i915/intel_drv.h
1432 +++ b/drivers/gpu/drm/i915/intel_drv.h
1433 @@ -184,7 +184,7 @@ struct intel_crtc {
1434 #define DIP_VERSION_AVI 0x2
1435 #define DIP_LEN_AVI 13
1436
1437 -#define DIP_TYPE_SPD 0x3
1438 +#define DIP_TYPE_SPD 0x83
1439 #define DIP_VERSION_SPD 0x1
1440 #define DIP_LEN_SPD 25
1441 #define DIP_SPD_UNKNOWN 0
1442 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
1443 index a9e0c7b..af08ff3 100644
1444 --- a/drivers/gpu/drm/i915/intel_panel.c
1445 +++ b/drivers/gpu/drm/i915/intel_panel.c
1446 @@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
1447 I915_WRITE(BLC_PWM_CPU_CTL, val | level);
1448 }
1449
1450 -void intel_panel_set_backlight(struct drm_device *dev, u32 level)
1451 +static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
1452 {
1453 struct drm_i915_private *dev_priv = dev->dev_private;
1454 u32 tmp;
1455 @@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
1456 I915_WRITE(BLC_PWM_CTL, tmp | level);
1457 }
1458
1459 -void intel_panel_disable_backlight(struct drm_device *dev)
1460 +void intel_panel_set_backlight(struct drm_device *dev, u32 level)
1461 {
1462 struct drm_i915_private *dev_priv = dev->dev_private;
1463
1464 - if (dev_priv->backlight_enabled) {
1465 - dev_priv->backlight_level = intel_panel_get_backlight(dev);
1466 - dev_priv->backlight_enabled = false;
1467 - }
1468 + dev_priv->backlight_level = level;
1469 + if (dev_priv->backlight_enabled)
1470 + intel_panel_actually_set_backlight(dev, level);
1471 +}
1472 +
1473 +void intel_panel_disable_backlight(struct drm_device *dev)
1474 +{
1475 + struct drm_i915_private *dev_priv = dev->dev_private;
1476
1477 - intel_panel_set_backlight(dev, 0);
1478 + dev_priv->backlight_enabled = false;
1479 + intel_panel_actually_set_backlight(dev, 0);
1480 }
1481
1482 void intel_panel_enable_backlight(struct drm_device *dev)
1483 @@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev)
1484 if (dev_priv->backlight_level == 0)
1485 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
1486
1487 - intel_panel_set_backlight(dev, dev_priv->backlight_level);
1488 dev_priv->backlight_enabled = true;
1489 + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
1490 }
1491
1492 static void intel_panel_init_backlight(struct drm_device *dev)
1493 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1494 index 79e8ebc..b5628ce 100644
1495 --- a/drivers/gpu/drm/radeon/atombios_dp.c
1496 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1497 @@ -553,6 +553,7 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
1498 {
1499 struct drm_device *dev = encoder->dev;
1500 struct radeon_device *rdev = dev->dev_private;
1501 + struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1502 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1503
1504 if (!ASIC_IS_DCE4(rdev))
1505 @@ -560,10 +561,20 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
1506
1507 if (radeon_connector_encoder_is_dp_bridge(connector))
1508 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
1509 + else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1510 + u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
1511 + if (tmp & 1)
1512 + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
1513 + }
1514
1515 atombios_dig_encoder_setup(encoder,
1516 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1517 panel_mode);
1518 +
1519 + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
1520 + (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
1521 + radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
1522 + }
1523 }
1524
1525 void radeon_dp_set_link_config(struct drm_connector *connector,
1526 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1527 index c4ffa14f..fb0a00a 100644
1528 --- a/drivers/gpu/drm/radeon/evergreen.c
1529 +++ b/drivers/gpu/drm/radeon/evergreen.c
1530 @@ -353,6 +353,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
1531 default:
1532 break;
1533 }
1534 + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1535 }
1536 if (rdev->irq.installed)
1537 evergreen_irq_set(rdev);
1538 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
1539 index 7fcdbbb..c9a0dae 100644
1540 --- a/drivers/gpu/drm/radeon/r100.c
1541 +++ b/drivers/gpu/drm/radeon/r100.c
1542 @@ -434,6 +434,7 @@ void r100_hpd_init(struct radeon_device *rdev)
1543 default:
1544 break;
1545 }
1546 + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1547 }
1548 if (rdev->irq.installed)
1549 r100_irq_set(rdev);
1550 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1551 index 720dd99..9b62a97 100644
1552 --- a/drivers/gpu/drm/radeon/r600.c
1553 +++ b/drivers/gpu/drm/radeon/r600.c
1554 @@ -762,13 +762,14 @@ void r600_hpd_init(struct radeon_device *rdev)
1555 struct drm_device *dev = rdev->ddev;
1556 struct drm_connector *connector;
1557
1558 - if (ASIC_IS_DCE3(rdev)) {
1559 - u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
1560 - if (ASIC_IS_DCE32(rdev))
1561 - tmp |= DC_HPDx_EN;
1562 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1563 + struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1564 +
1565 + if (ASIC_IS_DCE3(rdev)) {
1566 + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
1567 + if (ASIC_IS_DCE32(rdev))
1568 + tmp |= DC_HPDx_EN;
1569
1570 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1571 - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1572 switch (radeon_connector->hpd.hpd) {
1573 case RADEON_HPD_1:
1574 WREG32(DC_HPD1_CONTROL, tmp);
1575 @@ -798,10 +799,7 @@ void r600_hpd_init(struct radeon_device *rdev)
1576 default:
1577 break;
1578 }
1579 - }
1580 - } else {
1581 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1582 - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1583 + } else {
1584 switch (radeon_connector->hpd.hpd) {
1585 case RADEON_HPD_1:
1586 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1587 @@ -819,6 +817,7 @@ void r600_hpd_init(struct radeon_device *rdev)
1588 break;
1589 }
1590 }
1591 + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1592 }
1593 if (rdev->irq.installed)
1594 r600_irq_set(rdev);
1595 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1596 index c1e056b..184628c 100644
1597 --- a/drivers/gpu/drm/radeon/radeon.h
1598 +++ b/drivers/gpu/drm/radeon/radeon.h
1599 @@ -93,6 +93,7 @@ extern int radeon_audio;
1600 extern int radeon_disp_priority;
1601 extern int radeon_hw_i2c;
1602 extern int radeon_pcie_gen2;
1603 +extern int radeon_msi;
1604
1605 /*
1606 * Copy from radeon_drv.h so we don't have to include both and have conflicting
1607 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
1608 index 6367524..8bf83c4 100644
1609 --- a/drivers/gpu/drm/radeon/radeon_combios.c
1610 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
1611 @@ -620,8 +620,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
1612 i2c.y_data_mask = 0x80;
1613 } else {
1614 /* default masks for ddc pads */
1615 - i2c.mask_clk_mask = RADEON_GPIO_EN_1;
1616 - i2c.mask_data_mask = RADEON_GPIO_EN_0;
1617 + i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
1618 + i2c.mask_data_mask = RADEON_GPIO_MASK_0;
1619 i2c.a_clk_mask = RADEON_GPIO_A_1;
1620 i2c.a_data_mask = RADEON_GPIO_A_0;
1621 i2c.en_clk_mask = RADEON_GPIO_EN_1;
1622 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1623 index 449c3d8..ff6a2e0 100644
1624 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
1625 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1626 @@ -724,6 +724,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1627 dret = radeon_ddc_probe(radeon_connector,
1628 radeon_connector->requires_extended_probe);
1629 if (dret) {
1630 + radeon_connector->detected_by_load = false;
1631 if (radeon_connector->edid) {
1632 kfree(radeon_connector->edid);
1633 radeon_connector->edid = NULL;
1634 @@ -750,12 +751,21 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
1635 } else {
1636
1637 /* if we aren't forcing don't do destructive polling */
1638 - if (!force)
1639 - return connector->status;
1640 + if (!force) {
1641 + /* only return the previous status if we last
1642 + * detected a monitor via load.
1643 + */
1644 + if (radeon_connector->detected_by_load)
1645 + return connector->status;
1646 + else
1647 + return ret;
1648 + }
1649
1650 if (radeon_connector->dac_load_detect && encoder) {
1651 encoder_funcs = encoder->helper_private;
1652 ret = encoder_funcs->detect(encoder, connector);
1653 + if (ret != connector_status_disconnected)
1654 + radeon_connector->detected_by_load = true;
1655 }
1656 }
1657
1658 @@ -897,6 +907,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1659 dret = radeon_ddc_probe(radeon_connector,
1660 radeon_connector->requires_extended_probe);
1661 if (dret) {
1662 + radeon_connector->detected_by_load = false;
1663 if (radeon_connector->edid) {
1664 kfree(radeon_connector->edid);
1665 radeon_connector->edid = NULL;
1666 @@ -959,8 +970,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1667 if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
1668 goto out;
1669
1670 + /* DVI-D and HDMI-A are digital only */
1671 + if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
1672 + (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
1673 + goto out;
1674 +
1675 + /* if we aren't forcing don't do destructive polling */
1676 if (!force) {
1677 - ret = connector->status;
1678 + /* only return the previous status if we last
1679 + * detected a monitor via load.
1680 + */
1681 + if (radeon_connector->detected_by_load)
1682 + ret = connector->status;
1683 goto out;
1684 }
1685
1686 @@ -985,6 +1006,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1687 if (ret == connector_status_connected) {
1688 radeon_connector->use_digital = false;
1689 }
1690 + if (ret != connector_status_disconnected)
1691 + radeon_connector->detected_by_load = true;
1692 }
1693 break;
1694 }
1695 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
1696 index e71d2ed..c12b077 100644
1697 --- a/drivers/gpu/drm/radeon/radeon_drv.c
1698 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
1699 @@ -118,6 +118,7 @@ int radeon_audio = 0;
1700 int radeon_disp_priority = 0;
1701 int radeon_hw_i2c = 0;
1702 int radeon_pcie_gen2 = 0;
1703 +int radeon_msi = -1;
1704
1705 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
1706 module_param_named(no_wb, radeon_no_wb, int, 0444);
1707 @@ -164,6 +165,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
1708 MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
1709 module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
1710
1711 +MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
1712 +module_param_named(msi, radeon_msi, int, 0444);
1713 +
1714 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
1715 {
1716 drm_radeon_private_t *dev_priv = dev->dev_private;
1717 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1718 index 9ec830c..fecc1aa 100644
1719 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
1720 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1721 @@ -108,6 +108,46 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
1722 radeon_irq_set(rdev);
1723 }
1724
1725 +static bool radeon_msi_ok(struct radeon_device *rdev)
1726 +{
1727 + /* RV370/RV380 was first asic with MSI support */
1728 + if (rdev->family < CHIP_RV380)
1729 + return false;
1730 +
1731 + /* MSIs don't work on AGP */
1732 + if (rdev->flags & RADEON_IS_AGP)
1733 + return false;
1734 +
1735 + /* force MSI on */
1736 + if (radeon_msi == 1)
1737 + return true;
1738 + else if (radeon_msi == 0)
1739 + return false;
1740 +
1741 + /* Quirks */
1742 + /* HP RS690 only seems to work with MSIs. */
1743 + if ((rdev->pdev->device == 0x791f) &&
1744 + (rdev->pdev->subsystem_vendor == 0x103c) &&
1745 + (rdev->pdev->subsystem_device == 0x30c2))
1746 + return true;
1747 +
1748 + /* Dell RS690 only seems to work with MSIs. */
1749 + if ((rdev->pdev->device == 0x791f) &&
1750 + (rdev->pdev->subsystem_vendor == 0x1028) &&
1751 + (rdev->pdev->subsystem_device == 0x01fd))
1752 + return true;
1753 +
1754 + if (rdev->flags & RADEON_IS_IGP) {
1755 + /* APUs work fine with MSIs */
1756 + if (rdev->family >= CHIP_PALM)
1757 + return true;
1758 + /* lots of IGPs have problems with MSIs */
1759 + return false;
1760 + }
1761 +
1762 + return true;
1763 +}
1764 +
1765 int radeon_irq_kms_init(struct radeon_device *rdev)
1766 {
1767 int i;
1768 @@ -124,12 +164,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
1769 }
1770 /* enable msi */
1771 rdev->msi_enabled = 0;
1772 - /* MSIs don't seem to work reliably on all IGP
1773 - * chips. Disable MSI on them for now.
1774 - */
1775 - if ((rdev->family >= CHIP_RV380) &&
1776 - ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
1777 - (!(rdev->flags & RADEON_IS_AGP))) {
1778 +
1779 + if (radeon_msi_ok(rdev)) {
1780 int ret = pci_enable_msi(rdev->pdev);
1781 if (!ret) {
1782 rdev->msi_enabled = 1;
1783 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
1784 index 68820f5..ed0178f 100644
1785 --- a/drivers/gpu/drm/radeon/radeon_mode.h
1786 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
1787 @@ -447,6 +447,7 @@ struct radeon_connector {
1788 struct edid *edid;
1789 void *con_priv;
1790 bool dac_load_detect;
1791 + bool detected_by_load; /* if the connection status was determined by load */
1792 uint16_t connector_object_id;
1793 struct radeon_hpd hpd;
1794 struct radeon_router router;
1795 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1796 index 4b5d0e6..29d85cf 100644
1797 --- a/drivers/gpu/drm/radeon/rs600.c
1798 +++ b/drivers/gpu/drm/radeon/rs600.c
1799 @@ -287,6 +287,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
1800 default:
1801 break;
1802 }
1803 + radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1804 }
1805 if (rdev->irq.installed)
1806 rs600_irq_set(rdev);
1807 diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
1808 index 1130a89..7978c55 100644
1809 --- a/drivers/hid/Kconfig
1810 +++ b/drivers/hid/Kconfig
1811 @@ -69,7 +69,7 @@ config HID_ACRUX
1812 Say Y here if you want to enable support for ACRUX game controllers.
1813
1814 config HID_ACRUX_FF
1815 - tristate "ACRUX force feedback support"
1816 + bool "ACRUX force feedback support"
1817 depends on HID_ACRUX
1818 select INPUT_FF_MEMLESS
1819 ---help---
1820 @@ -328,6 +328,7 @@ config HID_MULTITOUCH
1821 - Hanvon dual touch panels
1822 - Ilitek dual touch panels
1823 - IrTouch Infrared USB panels
1824 + - LG Display panels (Dell ST2220Tc)
1825 - Lumio CrystalTouch panels
1826 - MosArt dual-touch panels
1827 - PenMount dual touch panels
1828 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
1829 index 18b3bc6..299d238 100644
1830 --- a/drivers/hid/hid-apple.c
1831 +++ b/drivers/hid/hid-apple.c
1832 @@ -455,6 +455,9 @@ static const struct hid_device_id apple_devices[] = {
1833 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
1834 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
1835 APPLE_ISO_KEYBOARD },
1836 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
1837 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
1838 + APPLE_ISO_KEYBOARD },
1839 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
1840 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
1841 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
1842 @@ -493,6 +496,24 @@ static const struct hid_device_id apple_devices[] = {
1843 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1844 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
1845 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1846 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
1847 + .driver_data = APPLE_HAS_FN },
1848 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
1849 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1850 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
1851 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1852 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
1853 + .driver_data = APPLE_HAS_FN },
1854 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
1855 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1856 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
1857 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1858 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
1859 + .driver_data = APPLE_HAS_FN },
1860 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
1861 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1862 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
1863 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1864 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
1865 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
1866 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
1867 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1868 index 242353d..5be9f47 100644
1869 --- a/drivers/hid/hid-core.c
1870 +++ b/drivers/hid/hid-core.c
1871 @@ -1340,12 +1340,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
1872 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
1875 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
1876 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
1877 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
1878 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
1879 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
1880 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
1881 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
1882 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
1883 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
1884 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
1885 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
1886 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
1887 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1888 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1889 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1890 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
1891 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1892 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1893 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
1894 @@ -1399,6 +1409,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1895 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1896 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1897 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
1898 + { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
1899 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
1900 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
1901 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
1902 @@ -1892,6 +1903,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
1903 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
1904 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
1905 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
1906 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
1907 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
1908 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
1909 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1910 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1911 { }
1912 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1913 index 7484e1b..0d87d98 100644
1914 --- a/drivers/hid/hid-ids.h
1915 +++ b/drivers/hid/hid-ids.h
1916 @@ -109,12 +109,22 @@
1917 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
1918 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
1919 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
1920 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
1921 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
1922 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
1923 +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
1924 +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
1925 +#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
1926 #define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
1927 #define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
1928 #define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
1929 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
1930 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
1931 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
1932 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
1933 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
1934 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
1935 +#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
1936 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
1937 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
1938 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
1939 @@ -423,6 +433,9 @@
1940 #define USB_DEVICE_ID_LD_HYBRID 0x2090
1941 #define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
1942
1943 +#define USB_VENDOR_ID_LG 0x1fd2
1944 +#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
1945 +
1946 #define USB_VENDOR_ID_LOGITECH 0x046d
1947 #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
1948 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
1949 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1950 index 58d0e7a..9fc15e1 100644
1951 --- a/drivers/hid/hid-multitouch.c
1952 +++ b/drivers/hid/hid-multitouch.c
1953 @@ -672,6 +672,11 @@ static const struct hid_device_id mt_devices[] = {
1954 HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
1955 USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
1956
1957 + /* LG Display panels */
1958 + { .driver_data = MT_CLS_DEFAULT,
1959 + HID_USB_DEVICE(USB_VENDOR_ID_LG,
1960 + USB_DEVICE_ID_LG_MULTITOUCH) },
1961 +
1962 /* Lumio panels */
1963 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
1964 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
1965 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1966 index 9323837..62800de 100644
1967 --- a/drivers/hwmon/coretemp.c
1968 +++ b/drivers/hwmon/coretemp.c
1969 @@ -60,14 +60,13 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
1970 #ifdef CONFIG_SMP
1971 #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
1972 #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
1973 -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
1974 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
1975 #else
1976 #define TO_PHYS_ID(cpu) (cpu)
1977 #define TO_CORE_ID(cpu) (cpu)
1978 -#define TO_ATTR_NO(cpu) (cpu)
1979 #define for_each_sibling(i, cpu) for (i = 0; false; )
1980 #endif
1981 +#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
1982
1983 /*
1984 * Per-Core Temperature Data
1985 diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
1986 index 36d7f27..4b2fc50 100644
1987 --- a/drivers/hwmon/w83627ehf.c
1988 +++ b/drivers/hwmon/w83627ehf.c
1989 @@ -1756,7 +1756,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
1990 diode = 0x70;
1991 }
1992 for (i = 0; i < 3; i++) {
1993 - if ((tmp & (0x02 << i)))
1994 + const char *label = NULL;
1995 +
1996 + if (data->temp_label)
1997 + label = data->temp_label[data->temp_src[i]];
1998 +
1999 + /* Digital source overrides analog type */
2000 + if (label && strncmp(label, "PECI", 4) == 0)
2001 + data->temp_type[i] = 6;
2002 + else if (label && strncmp(label, "AMD", 3) == 0)
2003 + data->temp_type[i] = 5;
2004 + else if ((tmp & (0x02 << i)))
2005 data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
2006 else
2007 data->temp_type[i] = 4; /* thermistor */
2008 diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
2009 index 43a6271..12f7c83 100644
2010 --- a/drivers/hwspinlock/hwspinlock_core.c
2011 +++ b/drivers/hwspinlock/hwspinlock_core.c
2012 @@ -26,6 +26,7 @@
2013 #include <linux/radix-tree.h>
2014 #include <linux/hwspinlock.h>
2015 #include <linux/pm_runtime.h>
2016 +#include <linux/mutex.h>
2017
2018 #include "hwspinlock_internal.h"
2019
2020 @@ -52,10 +53,12 @@
2021 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
2022
2023 /*
2024 - * Synchronization of access to the tree is achieved using this spinlock,
2025 + * Synchronization of access to the tree is achieved using this mutex,
2026 * as the radix-tree API requires that users provide all synchronisation.
2027 + * A mutex is needed because we're using non-atomic radix tree allocations.
2028 */
2029 -static DEFINE_SPINLOCK(hwspinlock_tree_lock);
2030 +static DEFINE_MUTEX(hwspinlock_tree_lock);
2031 +
2032
2033 /**
2034 * __hwspin_trylock() - attempt to lock a specific hwspinlock
2035 @@ -261,8 +264,7 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
2036 * This function should be called from the underlying platform-specific
2037 * implementation, to register a new hwspinlock instance.
2038 *
2039 - * Can be called from an atomic context (will not sleep) but not from
2040 - * within interrupt context.
2041 + * Should be called from a process context (might sleep)
2042 *
2043 * Returns 0 on success, or an appropriate error code on failure
2044 */
2045 @@ -279,7 +281,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
2046
2047 spin_lock_init(&hwlock->lock);
2048
2049 - spin_lock(&hwspinlock_tree_lock);
2050 + mutex_lock(&hwspinlock_tree_lock);
2051
2052 ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
2053 if (ret)
2054 @@ -293,7 +295,7 @@ int hwspin_lock_register(struct hwspinlock *hwlock)
2055 WARN_ON(tmp != hwlock);
2056
2057 out:
2058 - spin_unlock(&hwspinlock_tree_lock);
2059 + mutex_unlock(&hwspinlock_tree_lock);
2060 return ret;
2061 }
2062 EXPORT_SYMBOL_GPL(hwspin_lock_register);
2063 @@ -305,8 +307,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
2064 * This function should be called from the underlying platform-specific
2065 * implementation, to unregister an existing (and unused) hwspinlock.
2066 *
2067 - * Can be called from an atomic context (will not sleep) but not from
2068 - * within interrupt context.
2069 + * Should be called from a process context (might sleep)
2070 *
2071 * Returns the address of hwspinlock @id on success, or NULL on failure
2072 */
2073 @@ -315,7 +316,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
2074 struct hwspinlock *hwlock = NULL;
2075 int ret;
2076
2077 - spin_lock(&hwspinlock_tree_lock);
2078 + mutex_lock(&hwspinlock_tree_lock);
2079
2080 /* make sure the hwspinlock is not in use (tag is set) */
2081 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
2082 @@ -331,7 +332,7 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
2083 }
2084
2085 out:
2086 - spin_unlock(&hwspinlock_tree_lock);
2087 + mutex_unlock(&hwspinlock_tree_lock);
2088 return hwlock;
2089 }
2090 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
2091 @@ -400,9 +401,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
2092 * to the remote core before it can be used for synchronization (to get the
2093 * id of a given hwlock, use hwspin_lock_get_id()).
2094 *
2095 - * Can be called from an atomic context (will not sleep) but not from
2096 - * within interrupt context (simply because there is no use case for
2097 - * that yet).
2098 + * Should be called from a process context (might sleep)
2099 *
2100 * Returns the address of the assigned hwspinlock, or NULL on error
2101 */
2102 @@ -411,7 +410,7 @@ struct hwspinlock *hwspin_lock_request(void)
2103 struct hwspinlock *hwlock;
2104 int ret;
2105
2106 - spin_lock(&hwspinlock_tree_lock);
2107 + mutex_lock(&hwspinlock_tree_lock);
2108
2109 /* look for an unused lock */
2110 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
2111 @@ -431,7 +430,7 @@ struct hwspinlock *hwspin_lock_request(void)
2112 hwlock = NULL;
2113
2114 out:
2115 - spin_unlock(&hwspinlock_tree_lock);
2116 + mutex_unlock(&hwspinlock_tree_lock);
2117 return hwlock;
2118 }
2119 EXPORT_SYMBOL_GPL(hwspin_lock_request);
2120 @@ -445,9 +444,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
2121 * Usually early board code will be calling this function in order to
2122 * reserve specific hwspinlock ids for predefined purposes.
2123 *
2124 - * Can be called from an atomic context (will not sleep) but not from
2125 - * within interrupt context (simply because there is no use case for
2126 - * that yet).
2127 + * Should be called from a process context (might sleep)
2128 *
2129 * Returns the address of the assigned hwspinlock, or NULL on error
2130 */
2131 @@ -456,7 +453,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
2132 struct hwspinlock *hwlock;
2133 int ret;
2134
2135 - spin_lock(&hwspinlock_tree_lock);
2136 + mutex_lock(&hwspinlock_tree_lock);
2137
2138 /* make sure this hwspinlock exists */
2139 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
2140 @@ -482,7 +479,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
2141 hwlock = NULL;
2142
2143 out:
2144 - spin_unlock(&hwspinlock_tree_lock);
2145 + mutex_unlock(&hwspinlock_tree_lock);
2146 return hwlock;
2147 }
2148 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
2149 @@ -495,9 +492,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
2150 * Should only be called with an @hwlock that was retrieved from
2151 * an earlier call to omap_hwspin_lock_request{_specific}.
2152 *
2153 - * Can be called from an atomic context (will not sleep) but not from
2154 - * within interrupt context (simply because there is no use case for
2155 - * that yet).
2156 + * Should be called from a process context (might sleep)
2157 *
2158 * Returns 0 on success, or an appropriate error code on failure
2159 */
2160 @@ -511,7 +506,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
2161 return -EINVAL;
2162 }
2163
2164 - spin_lock(&hwspinlock_tree_lock);
2165 + mutex_lock(&hwspinlock_tree_lock);
2166
2167 /* make sure the hwspinlock is used */
2168 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
2169 @@ -538,7 +533,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
2170 module_put(hwlock->owner);
2171
2172 out:
2173 - spin_unlock(&hwspinlock_tree_lock);
2174 + mutex_unlock(&hwspinlock_tree_lock);
2175 return ret;
2176 }
2177 EXPORT_SYMBOL_GPL(hwspin_lock_free);
2178 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2179 index 0e4227f..cc79045 100644
2180 --- a/drivers/iommu/amd_iommu.c
2181 +++ b/drivers/iommu/amd_iommu.c
2182 @@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
2183 if (!pte || !IOMMU_PTE_PRESENT(*pte))
2184 continue;
2185
2186 - dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
2187 + dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
2188 }
2189
2190 update_domain(&dma_dom->domain);
2191 diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
2192 index dc3d3d8..661b692 100644
2193 --- a/drivers/leds/led-class.c
2194 +++ b/drivers/leds/led-class.c
2195 @@ -267,9 +267,14 @@ void led_blink_set(struct led_classdev *led_cdev,
2196 unsigned long *delay_on,
2197 unsigned long *delay_off)
2198 {
2199 + del_timer_sync(&led_cdev->blink_timer);
2200 +
2201 if (led_cdev->blink_set &&
2202 - !led_cdev->blink_set(led_cdev, delay_on, delay_off))
2203 + !led_cdev->blink_set(led_cdev, delay_on, delay_off)) {
2204 + led_cdev->blink_delay_on = *delay_on;
2205 + led_cdev->blink_delay_off = *delay_off;
2206 return;
2207 + }
2208
2209 /* blink with 1 Hz as default if nothing specified */
2210 if (!*delay_on && !*delay_off)
2211 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2212 index 0cd9672..1d44228 100644
2213 --- a/drivers/md/raid10.c
2214 +++ b/drivers/md/raid10.c
2215 @@ -1337,7 +1337,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2216 mirror_info_t *p = &conf->mirrors[mirror];
2217 if (p->recovery_disabled == mddev->recovery_disabled)
2218 continue;
2219 - if (!p->rdev)
2220 + if (p->rdev)
2221 continue;
2222
2223 disk_stack_limits(mddev->gendisk, rdev->bdev,
2224 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2225 index ac5e8b5..b6200c3 100644
2226 --- a/drivers/md/raid5.c
2227 +++ b/drivers/md/raid5.c
2228 @@ -3069,7 +3069,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2229 }
2230 } else if (test_bit(In_sync, &rdev->flags))
2231 set_bit(R5_Insync, &dev->flags);
2232 - else {
2233 + else if (!test_bit(Faulty, &rdev->flags)) {
2234 /* in sync if before recovery_offset */
2235 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
2236 set_bit(R5_Insync, &dev->flags);
2237 @@ -3116,7 +3116,7 @@ static void handle_stripe(struct stripe_head *sh)
2238 struct r5dev *pdev, *qdev;
2239
2240 clear_bit(STRIPE_HANDLE, &sh->state);
2241 - if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
2242 + if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
2243 /* already being handled, ensure it gets handled
2244 * again when current action finishes */
2245 set_bit(STRIPE_HANDLE, &sh->state);
2246 @@ -3165,10 +3165,14 @@ static void handle_stripe(struct stripe_head *sh)
2247 /* check if the array has lost more than max_degraded devices and,
2248 * if so, some requests might need to be failed.
2249 */
2250 - if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
2251 - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
2252 - if (s.failed > conf->max_degraded && s.syncing)
2253 - handle_failed_sync(conf, sh, &s);
2254 + if (s.failed > conf->max_degraded) {
2255 + sh->check_state = 0;
2256 + sh->reconstruct_state = 0;
2257 + if (s.to_read+s.to_write+s.written)
2258 + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
2259 + if (s.syncing)
2260 + handle_failed_sync(conf, sh, &s);
2261 + }
2262
2263 /*
2264 * might be able to return some write requests if the parity blocks
2265 @@ -3377,7 +3381,7 @@ finish:
2266
2267 return_io(s.return_bi);
2268
2269 - clear_bit(STRIPE_ACTIVE, &sh->state);
2270 + clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
2271 }
2272
2273 static void raid5_activate_delayed(raid5_conf_t *conf)
2274 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
2275 index 5eb91b4..a224e94 100644
2276 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
2277 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
2278 @@ -30,6 +30,11 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
2279 struct dib0700_state *st = d->priv;
2280 int ret;
2281
2282 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2283 + deb_info("could not acquire lock");
2284 + return 0;
2285 + }
2286 +
2287 ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
2288 REQUEST_GET_VERSION,
2289 USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
2290 @@ -46,6 +51,7 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
2291 if (fwtype != NULL)
2292 *fwtype = (st->buf[12] << 24) | (st->buf[13] << 16) |
2293 (st->buf[14] << 8) | st->buf[15];
2294 + mutex_unlock(&d->usb_mutex);
2295 return ret;
2296 }
2297
2298 @@ -108,7 +114,12 @@ int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen
2299 int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val)
2300 {
2301 struct dib0700_state *st = d->priv;
2302 - s16 ret;
2303 + int ret;
2304 +
2305 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2306 + deb_info("could not acquire lock");
2307 + return 0;
2308 + }
2309
2310 st->buf[0] = REQUEST_SET_GPIO;
2311 st->buf[1] = gpio;
2312 @@ -116,6 +127,7 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
2313
2314 ret = dib0700_ctrl_wr(d, st->buf, 3);
2315
2316 + mutex_unlock(&d->usb_mutex);
2317 return ret;
2318 }
2319
2320 @@ -125,6 +137,11 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
2321 int ret;
2322
2323 if (st->fw_version >= 0x10201) {
2324 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2325 + deb_info("could not acquire lock");
2326 + return 0;
2327 + }
2328 +
2329 st->buf[0] = REQUEST_SET_USB_XFER_LEN;
2330 st->buf[1] = (nb_ts_packets >> 8) & 0xff;
2331 st->buf[2] = nb_ts_packets & 0xff;
2332 @@ -132,6 +149,7 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
2333 deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets);
2334
2335 ret = dib0700_ctrl_wr(d, st->buf, 3);
2336 + mutex_unlock(&d->usb_mutex);
2337 } else {
2338 deb_info("this firmware does not allow to change the USB xfer len\n");
2339 ret = -EIO;
2340 @@ -208,6 +226,10 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
2341
2342 } else {
2343 /* Write request */
2344 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2345 + deb_info("could not acquire lock");
2346 + return 0;
2347 + }
2348 st->buf[0] = REQUEST_NEW_I2C_WRITE;
2349 st->buf[1] = msg[i].addr << 1;
2350 st->buf[2] = (en_start << 7) | (en_stop << 6) |
2351 @@ -227,6 +249,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
2352 USB_TYPE_VENDOR | USB_DIR_OUT,
2353 0, 0, st->buf, msg[i].len + 4,
2354 USB_CTRL_GET_TIMEOUT);
2355 + mutex_unlock(&d->usb_mutex);
2356 if (result < 0) {
2357 deb_info("i2c write error (status = %d)\n", result);
2358 break;
2359 @@ -249,6 +272,10 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
2360
2361 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
2362 return -EAGAIN;
2363 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2364 + deb_info("could not acquire lock");
2365 + return 0;
2366 + }
2367
2368 for (i = 0; i < num; i++) {
2369 /* fill in the address */
2370 @@ -279,6 +306,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
2371 break;
2372 }
2373 }
2374 + mutex_unlock(&d->usb_mutex);
2375 mutex_unlock(&d->i2c_mutex);
2376
2377 return i;
2378 @@ -337,7 +365,12 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
2379 u16 pll_loopdiv, u16 free_div, u16 dsuScaler)
2380 {
2381 struct dib0700_state *st = d->priv;
2382 - s16 ret;
2383 + int ret;
2384 +
2385 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2386 + deb_info("could not acquire lock");
2387 + return 0;
2388 + }
2389
2390 st->buf[0] = REQUEST_SET_CLOCK;
2391 st->buf[1] = (en_pll << 7) | (pll_src << 6) |
2392 @@ -352,6 +385,7 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
2393 st->buf[9] = dsuScaler & 0xff; /* LSB */
2394
2395 ret = dib0700_ctrl_wr(d, st->buf, 10);
2396 + mutex_unlock(&d->usb_mutex);
2397
2398 return ret;
2399 }
2400 @@ -360,10 +394,16 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
2401 {
2402 struct dib0700_state *st = d->priv;
2403 u16 divider;
2404 + int ret;
2405
2406 if (scl_kHz == 0)
2407 return -EINVAL;
2408
2409 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2410 + deb_info("could not acquire lock");
2411 + return 0;
2412 + }
2413 +
2414 st->buf[0] = REQUEST_SET_I2C_PARAM;
2415 divider = (u16) (30000 / scl_kHz);
2416 st->buf[1] = 0;
2417 @@ -379,7 +419,11 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
2418 deb_info("setting I2C speed: %04x %04x %04x (%d kHz).",
2419 (st->buf[2] << 8) | (st->buf[3]), (st->buf[4] << 8) |
2420 st->buf[5], (st->buf[6] << 8) | st->buf[7], scl_kHz);
2421 - return dib0700_ctrl_wr(d, st->buf, 8);
2422 +
2423 + ret = dib0700_ctrl_wr(d, st->buf, 8);
2424 + mutex_unlock(&d->usb_mutex);
2425 +
2426 + return ret;
2427 }
2428
2429
2430 @@ -515,6 +559,11 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
2431 }
2432 }
2433
2434 + if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
2435 + deb_info("could not acquire lock");
2436 + return 0;
2437 + }
2438 +
2439 st->buf[0] = REQUEST_ENABLE_VIDEO;
2440 /* this bit gives a kind of command,
2441 * rather than enabling something or not */
2442 @@ -548,7 +597,10 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
2443
2444 deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]);
2445
2446 - return dib0700_ctrl_wr(adap->dev, st->buf, 4);
2447 + ret = dib0700_ctrl_wr(adap->dev, st->buf, 4);
2448 + mutex_unlock(&adap->dev->usb_mutex);
2449 +
2450 + return ret;
2451 }
2452
2453 int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
2454 @@ -557,6 +609,11 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
2455 struct dib0700_state *st = d->priv;
2456 int new_proto, ret;
2457
2458 + if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
2459 + deb_info("could not acquire lock");
2460 + return 0;
2461 + }
2462 +
2463 st->buf[0] = REQUEST_SET_RC;
2464 st->buf[1] = 0;
2465 st->buf[2] = 0;
2466 @@ -567,23 +624,29 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
2467 else if (rc_type == RC_TYPE_NEC)
2468 new_proto = 0;
2469 else if (rc_type == RC_TYPE_RC6) {
2470 - if (st->fw_version < 0x10200)
2471 - return -EINVAL;
2472 + if (st->fw_version < 0x10200) {
2473 + ret = -EINVAL;
2474 + goto out;
2475 + }
2476
2477 new_proto = 2;
2478 - } else
2479 - return -EINVAL;
2480 + } else {
2481 + ret = -EINVAL;
2482 + goto out;
2483 + }
2484
2485 st->buf[1] = new_proto;
2486
2487 ret = dib0700_ctrl_wr(d, st->buf, 3);
2488 if (ret < 0) {
2489 err("ir protocol setup failed");
2490 - return ret;
2491 + goto out;
2492 }
2493
2494 d->props.rc.core.protocol = rc_type;
2495
2496 +out:
2497 + mutex_unlock(&d->usb_mutex);
2498 return ret;
2499 }
2500
2501 diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
2502 index 1d47d4d..dc1cb17 100644
2503 --- a/drivers/media/dvb/frontends/dib0070.c
2504 +++ b/drivers/media/dvb/frontends/dib0070.c
2505 @@ -27,6 +27,7 @@
2506 #include <linux/kernel.h>
2507 #include <linux/slab.h>
2508 #include <linux/i2c.h>
2509 +#include <linux/mutex.h>
2510
2511 #include "dvb_frontend.h"
2512
2513 @@ -78,10 +79,18 @@ struct dib0070_state {
2514 struct i2c_msg msg[2];
2515 u8 i2c_write_buffer[3];
2516 u8 i2c_read_buffer[2];
2517 + struct mutex i2c_buffer_lock;
2518 };
2519
2520 -static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
2521 +static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
2522 {
2523 + u16 ret;
2524 +
2525 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2526 + dprintk("could not acquire lock");
2527 + return 0;
2528 + }
2529 +
2530 state->i2c_write_buffer[0] = reg;
2531
2532 memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
2533 @@ -96,13 +105,23 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
2534
2535 if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
2536 printk(KERN_WARNING "DiB0070 I2C read failed\n");
2537 - return 0;
2538 - }
2539 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2540 + ret = 0;
2541 + } else
2542 + ret = (state->i2c_read_buffer[0] << 8)
2543 + | state->i2c_read_buffer[1];
2544 +
2545 + mutex_unlock(&state->i2c_buffer_lock);
2546 + return ret;
2547 }
2548
2549 static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
2550 {
2551 + int ret;
2552 +
2553 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2554 + dprintk("could not acquire lock");
2555 + return -EINVAL;
2556 + }
2557 state->i2c_write_buffer[0] = reg;
2558 state->i2c_write_buffer[1] = val >> 8;
2559 state->i2c_write_buffer[2] = val & 0xff;
2560 @@ -115,9 +134,12 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
2561
2562 if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
2563 printk(KERN_WARNING "DiB0070 I2C write failed\n");
2564 - return -EREMOTEIO;
2565 - }
2566 - return 0;
2567 + ret = -EREMOTEIO;
2568 + } else
2569 + ret = 0;
2570 +
2571 + mutex_unlock(&state->i2c_buffer_lock);
2572 + return ret;
2573 }
2574
2575 #define HARD_RESET(state) do { \
2576 @@ -734,6 +756,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
2577 state->cfg = cfg;
2578 state->i2c = i2c;
2579 state->fe = fe;
2580 + mutex_init(&state->i2c_buffer_lock);
2581 fe->tuner_priv = state;
2582
2583 if (dib0070_reset(fe) != 0)
2584 diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
2585 index c9c935a..b174d1c 100644
2586 --- a/drivers/media/dvb/frontends/dib0090.c
2587 +++ b/drivers/media/dvb/frontends/dib0090.c
2588 @@ -27,6 +27,7 @@
2589 #include <linux/kernel.h>
2590 #include <linux/slab.h>
2591 #include <linux/i2c.h>
2592 +#include <linux/mutex.h>
2593
2594 #include "dvb_frontend.h"
2595
2596 @@ -196,6 +197,7 @@ struct dib0090_state {
2597 struct i2c_msg msg[2];
2598 u8 i2c_write_buffer[3];
2599 u8 i2c_read_buffer[2];
2600 + struct mutex i2c_buffer_lock;
2601 };
2602
2603 struct dib0090_fw_state {
2604 @@ -208,10 +210,18 @@ struct dib0090_fw_state {
2605 struct i2c_msg msg;
2606 u8 i2c_write_buffer[2];
2607 u8 i2c_read_buffer[2];
2608 + struct mutex i2c_buffer_lock;
2609 };
2610
2611 static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
2612 {
2613 + u16 ret;
2614 +
2615 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2616 + dprintk("could not acquire lock");
2617 + return 0;
2618 + }
2619 +
2620 state->i2c_write_buffer[0] = reg;
2621
2622 memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
2623 @@ -226,14 +236,24 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
2624
2625 if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
2626 printk(KERN_WARNING "DiB0090 I2C read failed\n");
2627 - return 0;
2628 - }
2629 + ret = 0;
2630 + } else
2631 + ret = (state->i2c_read_buffer[0] << 8)
2632 + | state->i2c_read_buffer[1];
2633
2634 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2635 + mutex_unlock(&state->i2c_buffer_lock);
2636 + return ret;
2637 }
2638
2639 static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
2640 {
2641 + int ret;
2642 +
2643 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2644 + dprintk("could not acquire lock");
2645 + return -EINVAL;
2646 + }
2647 +
2648 state->i2c_write_buffer[0] = reg & 0xff;
2649 state->i2c_write_buffer[1] = val >> 8;
2650 state->i2c_write_buffer[2] = val & 0xff;
2651 @@ -246,13 +266,23 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
2652
2653 if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
2654 printk(KERN_WARNING "DiB0090 I2C write failed\n");
2655 - return -EREMOTEIO;
2656 - }
2657 - return 0;
2658 + ret = -EREMOTEIO;
2659 + } else
2660 + ret = 0;
2661 +
2662 + mutex_unlock(&state->i2c_buffer_lock);
2663 + return ret;
2664 }
2665
2666 static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
2667 {
2668 + u16 ret;
2669 +
2670 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2671 + dprintk("could not acquire lock");
2672 + return 0;
2673 + }
2674 +
2675 state->i2c_write_buffer[0] = reg;
2676
2677 memset(&state->msg, 0, sizeof(struct i2c_msg));
2678 @@ -262,13 +292,24 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
2679 state->msg.len = 2;
2680 if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
2681 printk(KERN_WARNING "DiB0090 I2C read failed\n");
2682 - return 0;
2683 - }
2684 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2685 + ret = 0;
2686 + } else
2687 + ret = (state->i2c_read_buffer[0] << 8)
2688 + | state->i2c_read_buffer[1];
2689 +
2690 + mutex_unlock(&state->i2c_buffer_lock);
2691 + return ret;
2692 }
2693
2694 static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
2695 {
2696 + int ret;
2697 +
2698 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2699 + dprintk("could not acquire lock");
2700 + return -EINVAL;
2701 + }
2702 +
2703 state->i2c_write_buffer[0] = val >> 8;
2704 state->i2c_write_buffer[1] = val & 0xff;
2705
2706 @@ -279,9 +320,12 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
2707 state->msg.len = 2;
2708 if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
2709 printk(KERN_WARNING "DiB0090 I2C write failed\n");
2710 - return -EREMOTEIO;
2711 - }
2712 - return 0;
2713 + ret = -EREMOTEIO;
2714 + } else
2715 + ret = 0;
2716 +
2717 + mutex_unlock(&state->i2c_buffer_lock);
2718 + return ret;
2719 }
2720
2721 #define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0)
2722 @@ -2440,6 +2484,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
2723 st->config = config;
2724 st->i2c = i2c;
2725 st->fe = fe;
2726 + mutex_init(&st->i2c_buffer_lock);
2727 fe->tuner_priv = st;
2728
2729 if (config->wbd == NULL)
2730 @@ -2471,6 +2516,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
2731 st->config = config;
2732 st->i2c = i2c;
2733 st->fe = fe;
2734 + mutex_init(&st->i2c_buffer_lock);
2735 fe->tuner_priv = st;
2736
2737 if (dib0090_fw_reset_digital(fe, st->config) != 0)
2738 diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
2739 index 79cb1c2..dbb76d7 100644
2740 --- a/drivers/media/dvb/frontends/dib7000m.c
2741 +++ b/drivers/media/dvb/frontends/dib7000m.c
2742 @@ -11,6 +11,7 @@
2743 #include <linux/kernel.h>
2744 #include <linux/slab.h>
2745 #include <linux/i2c.h>
2746 +#include <linux/mutex.h>
2747
2748 #include "dvb_frontend.h"
2749
2750 @@ -55,6 +56,7 @@ struct dib7000m_state {
2751 struct i2c_msg msg[2];
2752 u8 i2c_write_buffer[4];
2753 u8 i2c_read_buffer[2];
2754 + struct mutex i2c_buffer_lock;
2755 };
2756
2757 enum dib7000m_power_mode {
2758 @@ -69,6 +71,13 @@ enum dib7000m_power_mode {
2759
2760 static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
2761 {
2762 + u16 ret;
2763 +
2764 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2765 + dprintk("could not acquire lock");
2766 + return 0;
2767 + }
2768 +
2769 state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
2770 state->i2c_write_buffer[1] = reg & 0xff;
2771
2772 @@ -85,11 +94,21 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
2773 if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
2774 dprintk("i2c read error on %d",reg);
2775
2776 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2777 + ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2778 + mutex_unlock(&state->i2c_buffer_lock);
2779 +
2780 + return ret;
2781 }
2782
2783 static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
2784 {
2785 + int ret;
2786 +
2787 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2788 + dprintk("could not acquire lock");
2789 + return -EINVAL;
2790 + }
2791 +
2792 state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2793 state->i2c_write_buffer[1] = reg & 0xff;
2794 state->i2c_write_buffer[2] = (val >> 8) & 0xff;
2795 @@ -101,7 +120,10 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
2796 state->msg[0].buf = state->i2c_write_buffer;
2797 state->msg[0].len = 4;
2798
2799 - return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
2800 + ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
2801 + -EREMOTEIO : 0);
2802 + mutex_unlock(&state->i2c_buffer_lock);
2803 + return ret;
2804 }
2805 static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
2806 {
2807 @@ -1385,6 +1407,7 @@ struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
2808 demod = &st->demod;
2809 demod->demodulator_priv = st;
2810 memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops));
2811 + mutex_init(&st->i2c_buffer_lock);
2812
2813 st->timf_default = cfg->bw->timf;
2814
2815 diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
2816 index a64a538..4eb9c2b 100644
2817 --- a/drivers/media/dvb/frontends/dib7000p.c
2818 +++ b/drivers/media/dvb/frontends/dib7000p.c
2819 @@ -10,6 +10,7 @@
2820 #include <linux/kernel.h>
2821 #include <linux/slab.h>
2822 #include <linux/i2c.h>
2823 +#include <linux/mutex.h>
2824
2825 #include "dvb_math.h"
2826 #include "dvb_frontend.h"
2827 @@ -68,6 +69,7 @@ struct dib7000p_state {
2828 struct i2c_msg msg[2];
2829 u8 i2c_write_buffer[4];
2830 u8 i2c_read_buffer[2];
2831 + struct mutex i2c_buffer_lock;
2832 };
2833
2834 enum dib7000p_power_mode {
2835 @@ -81,6 +83,13 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
2836
2837 static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
2838 {
2839 + u16 ret;
2840 +
2841 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2842 + dprintk("could not acquire lock");
2843 + return 0;
2844 + }
2845 +
2846 state->i2c_write_buffer[0] = reg >> 8;
2847 state->i2c_write_buffer[1] = reg & 0xff;
2848
2849 @@ -97,11 +106,20 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
2850 if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
2851 dprintk("i2c read error on %d", reg);
2852
2853 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2854 + ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2855 + mutex_unlock(&state->i2c_buffer_lock);
2856 + return ret;
2857 }
2858
2859 static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
2860 {
2861 + int ret;
2862 +
2863 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2864 + dprintk("could not acquire lock");
2865 + return -EINVAL;
2866 + }
2867 +
2868 state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2869 state->i2c_write_buffer[1] = reg & 0xff;
2870 state->i2c_write_buffer[2] = (val >> 8) & 0xff;
2871 @@ -113,7 +131,10 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
2872 state->msg[0].buf = state->i2c_write_buffer;
2873 state->msg[0].len = 4;
2874
2875 - return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
2876 + ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
2877 + -EREMOTEIO : 0);
2878 + mutex_unlock(&state->i2c_buffer_lock);
2879 + return ret;
2880 }
2881
2882 static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
2883 @@ -1646,6 +1667,7 @@ int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defau
2884 return -ENOMEM;
2885
2886 dpst->i2c_adap = i2c;
2887 + mutex_init(&dpst->i2c_buffer_lock);
2888
2889 for (k = no_of_demods - 1; k >= 0; k--) {
2890 dpst->cfg = cfg[k];
2891 @@ -2324,6 +2346,7 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
2892 demod = &st->demod;
2893 demod->demodulator_priv = st;
2894 memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops));
2895 + mutex_init(&st->i2c_buffer_lock);
2896
2897 dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */
2898
2899 @@ -2333,8 +2356,9 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
2900 st->version = dib7000p_read_word(st, 897);
2901
2902 /* FIXME: make sure the dev.parent field is initialized, or else
2903 - request_firmware() will hit an OOPS (this should be moved somewhere
2904 - more common) */
2905 + request_firmware() will hit an OOPS (this should be moved somewhere
2906 + more common) */
2907 + st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
2908
2909 /* FIXME: make sure the dev.parent field is initialized, or else
2910 request_firmware() will hit an OOPS (this should be moved somewhere
2911 diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
2912 index 7d2ea11..fe284d5 100644
2913 --- a/drivers/media/dvb/frontends/dib8000.c
2914 +++ b/drivers/media/dvb/frontends/dib8000.c
2915 @@ -10,6 +10,8 @@
2916 #include <linux/kernel.h>
2917 #include <linux/slab.h>
2918 #include <linux/i2c.h>
2919 +#include <linux/mutex.h>
2920 +
2921 #include "dvb_math.h"
2922
2923 #include "dvb_frontend.h"
2924 @@ -37,6 +39,7 @@ struct i2c_device {
2925 u8 addr;
2926 u8 *i2c_write_buffer;
2927 u8 *i2c_read_buffer;
2928 + struct mutex *i2c_buffer_lock;
2929 };
2930
2931 struct dib8000_state {
2932 @@ -77,6 +80,7 @@ struct dib8000_state {
2933 struct i2c_msg msg[2];
2934 u8 i2c_write_buffer[4];
2935 u8 i2c_read_buffer[2];
2936 + struct mutex i2c_buffer_lock;
2937 };
2938
2939 enum dib8000_power_mode {
2940 @@ -86,24 +90,39 @@ enum dib8000_power_mode {
2941
2942 static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
2943 {
2944 + u16 ret;
2945 struct i2c_msg msg[2] = {
2946 - {.addr = i2c->addr >> 1, .flags = 0,
2947 - .buf = i2c->i2c_write_buffer, .len = 2},
2948 - {.addr = i2c->addr >> 1, .flags = I2C_M_RD,
2949 - .buf = i2c->i2c_read_buffer, .len = 2},
2950 + {.addr = i2c->addr >> 1, .flags = 0, .len = 2},
2951 + {.addr = i2c->addr >> 1, .flags = I2C_M_RD, .len = 2},
2952 };
2953
2954 + if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
2955 + dprintk("could not acquire lock");
2956 + return 0;
2957 + }
2958 +
2959 + msg[0].buf = i2c->i2c_write_buffer;
2960 msg[0].buf[0] = reg >> 8;
2961 msg[0].buf[1] = reg & 0xff;
2962 + msg[1].buf = i2c->i2c_read_buffer;
2963
2964 if (i2c_transfer(i2c->adap, msg, 2) != 2)
2965 dprintk("i2c read error on %d", reg);
2966
2967 - return (msg[1].buf[0] << 8) | msg[1].buf[1];
2968 + ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
2969 + mutex_unlock(i2c->i2c_buffer_lock);
2970 + return ret;
2971 }
2972
2973 static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
2974 {
2975 + u16 ret;
2976 +
2977 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
2978 + dprintk("could not acquire lock");
2979 + return 0;
2980 + }
2981 +
2982 state->i2c_write_buffer[0] = reg >> 8;
2983 state->i2c_write_buffer[1] = reg & 0xff;
2984
2985 @@ -120,7 +139,10 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
2986 if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
2987 dprintk("i2c read error on %d", reg);
2988
2989 - return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2990 + ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2991 + mutex_unlock(&state->i2c_buffer_lock);
2992 +
2993 + return ret;
2994 }
2995
2996 static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
2997 @@ -135,22 +157,35 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
2998
2999 static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
3000 {
3001 - struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0,
3002 - .buf = i2c->i2c_write_buffer, .len = 4};
3003 + struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0, .len = 4};
3004 int ret = 0;
3005
3006 + if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
3007 + dprintk("could not acquire lock");
3008 + return -EINVAL;
3009 + }
3010 +
3011 + msg.buf = i2c->i2c_write_buffer;
3012 msg.buf[0] = (reg >> 8) & 0xff;
3013 msg.buf[1] = reg & 0xff;
3014 msg.buf[2] = (val >> 8) & 0xff;
3015 msg.buf[3] = val & 0xff;
3016
3017 ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
3018 + mutex_unlock(i2c->i2c_buffer_lock);
3019
3020 return ret;
3021 }
3022
3023 static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
3024 {
3025 + int ret;
3026 +
3027 + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
3028 + dprintk("could not acquire lock");
3029 + return -EINVAL;
3030 + }
3031 +
3032 state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
3033 state->i2c_write_buffer[1] = reg & 0xff;
3034 state->i2c_write_buffer[2] = (val >> 8) & 0xff;
3035 @@ -162,7 +197,11 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
3036 state->msg[0].buf = state->i2c_write_buffer;
3037 state->msg[0].len = 4;
3038
3039 - return i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
3040 + ret = (i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ?
3041 + -EREMOTEIO : 0);
3042 + mutex_unlock(&state->i2c_buffer_lock);
3043 +
3044 + return ret;
3045 }
3046
3047 static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
3048 @@ -2434,8 +2473,15 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
3049 if (!client.i2c_read_buffer) {
3050 dprintk("%s: not enough memory", __func__);
3051 ret = -ENOMEM;
3052 - goto error_memory;
3053 + goto error_memory_read;
3054 + }
3055 + client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
3056 + if (!client.i2c_buffer_lock) {
3057 + dprintk("%s: not enough memory", __func__);
3058 + ret = -ENOMEM;
3059 + goto error_memory_lock;
3060 }
3061 + mutex_init(client.i2c_buffer_lock);
3062
3063 for (k = no_of_demods - 1; k >= 0; k--) {
3064 /* designated i2c address */
3065 @@ -2476,8 +2522,10 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
3066 }
3067
3068 error:
3069 + kfree(client.i2c_buffer_lock);
3070 +error_memory_lock:
3071 kfree(client.i2c_read_buffer);
3072 -error_memory:
3073 +error_memory_read:
3074 kfree(client.i2c_write_buffer);
3075
3076 return ret;
3077 @@ -2581,6 +2629,8 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
3078 state->i2c.addr = i2c_addr;
3079 state->i2c.i2c_write_buffer = state->i2c_write_buffer;
3080 state->i2c.i2c_read_buffer = state->i2c_read_buffer;
3081 + mutex_init(&state->i2c_buffer_lock);
3082 + state->i2c.i2c_buffer_lock = &state->i2c_buffer_lock;
3083 state->gpio_val = cfg->gpio_val;
3084 state->gpio_dir = cfg->gpio_dir;
3085
3086 diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
3087 index a085588..b931074 100644
3088 --- a/drivers/media/dvb/frontends/dib9000.c
3089 +++ b/drivers/media/dvb/frontends/dib9000.c
3090 @@ -38,6 +38,15 @@ struct i2c_device {
3091 #define DibInitLock(lock) mutex_init(lock)
3092 #define DibFreeLock(lock)
3093
3094 +struct dib9000_pid_ctrl {
3095 +#define DIB9000_PID_FILTER_CTRL 0
3096 +#define DIB9000_PID_FILTER 1
3097 + u8 cmd;
3098 + u8 id;
3099 + u16 pid;
3100 + u8 onoff;
3101 +};
3102 +
3103 struct dib9000_state {
3104 struct i2c_device i2c;
3105
3106 @@ -99,6 +108,10 @@ struct dib9000_state {
3107 struct i2c_msg msg[2];
3108 u8 i2c_write_buffer[255];
3109 u8 i2c_read_buffer[255];
3110 + DIB_LOCK demod_lock;
3111 + u8 get_frontend_internal;
3112 + struct dib9000_pid_ctrl pid_ctrl[10];
3113 + s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
3114 };
3115
3116 static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3117 @@ -1743,19 +1756,56 @@ EXPORT_SYMBOL(dib9000_set_gpio);
3118 int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
3119 {
3120 struct dib9000_state *state = fe->demodulator_priv;
3121 - u16 val = dib9000_read_word(state, 294 + 1) & 0xffef;
3122 + u16 val;
3123 + int ret;
3124 +
3125 + if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
3126 + /* postpone the pid filtering cmd */
3127 + dprintk("pid filter cmd postpone");
3128 + state->pid_ctrl_index++;
3129 + state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
3130 + state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
3131 + return 0;
3132 + }
3133 +
3134 + DibAcquireLock(&state->demod_lock);
3135 +
3136 + val = dib9000_read_word(state, 294 + 1) & 0xffef;
3137 val |= (onoff & 0x1) << 4;
3138
3139 dprintk("PID filter enabled %d", onoff);
3140 - return dib9000_write_word(state, 294 + 1, val);
3141 + ret = dib9000_write_word(state, 294 + 1, val);
3142 + DibReleaseLock(&state->demod_lock);
3143 + return ret;
3144 +
3145 }
3146 EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
3147
3148 int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
3149 {
3150 struct dib9000_state *state = fe->demodulator_priv;
3151 + int ret;
3152 +
3153 + if (state->pid_ctrl_index != -2) {
3154 + /* postpone the pid filtering cmd */
3155 + dprintk("pid filter postpone");
3156 + if (state->pid_ctrl_index < 9) {
3157 + state->pid_ctrl_index++;
3158 + state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
3159 + state->pid_ctrl[state->pid_ctrl_index].id = id;
3160 + state->pid_ctrl[state->pid_ctrl_index].pid = pid;
3161 + state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
3162 + } else
3163 + dprintk("can not add any more pid ctrl cmd");
3164 + return 0;
3165 + }
3166 +
3167 + DibAcquireLock(&state->demod_lock);
3168 dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
3169 - return dib9000_write_word(state, 300 + 1 + id, onoff ? (1 << 13) | pid : 0);
3170 + ret = dib9000_write_word(state, 300 + 1 + id,
3171 + onoff ? (1 << 13) | pid : 0);
3172 + DibReleaseLock(&state->demod_lock);
3173 + return ret;
3174 }
3175 EXPORT_SYMBOL(dib9000_fw_pid_filter);
3176
3177 @@ -1778,6 +1828,7 @@ static void dib9000_release(struct dvb_frontend *demod)
3178 DibFreeLock(&state->platform.risc.mbx_lock);
3179 DibFreeLock(&state->platform.risc.mem_lock);
3180 DibFreeLock(&state->platform.risc.mem_mbx_lock);
3181 + DibFreeLock(&state->demod_lock);
3182 dibx000_exit_i2c_master(&st->i2c_master);
3183
3184 i2c_del_adapter(&st->tuner_adap);
3185 @@ -1795,14 +1846,19 @@ static int dib9000_sleep(struct dvb_frontend *fe)
3186 {
3187 struct dib9000_state *state = fe->demodulator_priv;
3188 u8 index_frontend;
3189 - int ret;
3190 + int ret = 0;
3191
3192 + DibAcquireLock(&state->demod_lock);
3193 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
3194 ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
3195 if (ret < 0)
3196 - return ret;
3197 + goto error;
3198 }
3199 - return dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
3200 + ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
3201 +
3202 +error:
3203 + DibReleaseLock(&state->demod_lock);
3204 + return ret;
3205 }
3206
3207 static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
3208 @@ -1816,7 +1872,10 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3209 struct dib9000_state *state = fe->demodulator_priv;
3210 u8 index_frontend, sub_index_frontend;
3211 fe_status_t stat;
3212 - int ret;
3213 + int ret = 0;
3214 +
3215 + if (state->get_frontend_internal == 0)
3216 + DibAcquireLock(&state->demod_lock);
3217
3218 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
3219 state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
3220 @@ -1846,14 +1905,15 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3221 state->fe[index_frontend]->dtv_property_cache.rolloff;
3222 }
3223 }
3224 - return 0;
3225 + ret = 0;
3226 + goto return_value;
3227 }
3228 }
3229
3230 /* get the channel from master chip */
3231 ret = dib9000_fw_get_channel(fe, fep);
3232 if (ret != 0)
3233 - return ret;
3234 + goto return_value;
3235
3236 /* synchronize the cache with the other frontends */
3237 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
3238 @@ -1866,8 +1926,12 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3239 state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
3240 state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
3241 }
3242 + ret = 0;
3243
3244 - return 0;
3245 +return_value:
3246 + if (state->get_frontend_internal == 0)
3247 + DibReleaseLock(&state->demod_lock);
3248 + return ret;
3249 }
3250
3251 static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
3252 @@ -1912,6 +1976,10 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3253 dprintk("dib9000: must specify bandwidth ");
3254 return 0;
3255 }
3256 +
3257 + state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
3258 + DibAcquireLock(&state->demod_lock);
3259 +
3260 fe->dtv_property_cache.delivery_system = SYS_DVBT;
3261
3262 /* set the master status */
3263 @@ -1974,13 +2042,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3264 /* check the tune result */
3265 if (exit_condition == 1) { /* tune failed */
3266 dprintk("tune failed");
3267 + DibReleaseLock(&state->demod_lock);
3268 + /* tune failed; put all the pid filtering cmd to junk */
3269 + state->pid_ctrl_index = -1;
3270 return 0;
3271 }
3272
3273 dprintk("tune success on frontend%i", index_frontend_success);
3274
3275 /* synchronize all the channel cache */
3276 + state->get_frontend_internal = 1;
3277 dib9000_get_frontend(state->fe[0], fep);
3278 + state->get_frontend_internal = 0;
3279
3280 /* retune the other frontends with the found channel */
3281 channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
3282 @@ -2025,6 +2098,28 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
3283 /* turn off the diversity for the last frontend */
3284 dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
3285
3286 + DibReleaseLock(&state->demod_lock);
3287 + if (state->pid_ctrl_index >= 0) {
3288 + u8 index_pid_filter_cmd;
3289 + u8 pid_ctrl_index = state->pid_ctrl_index;
3290 +
3291 + state->pid_ctrl_index = -2;
3292 + for (index_pid_filter_cmd = 0;
3293 + index_pid_filter_cmd <= pid_ctrl_index;
3294 + index_pid_filter_cmd++) {
3295 + if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL)
3296 + dib9000_fw_pid_filter_ctrl(state->fe[0],
3297 + state->pid_ctrl[index_pid_filter_cmd].onoff);
3298 + else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER)
3299 + dib9000_fw_pid_filter(state->fe[0],
3300 + state->pid_ctrl[index_pid_filter_cmd].id,
3301 + state->pid_ctrl[index_pid_filter_cmd].pid,
3302 + state->pid_ctrl[index_pid_filter_cmd].onoff);
3303 + }
3304 + }
3305 + /* do not postpone any more the pid filtering */
3306 + state->pid_ctrl_index = -2;
3307 +
3308 return 0;
3309 }
3310
3311 @@ -2041,6 +2136,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
3312 u8 index_frontend;
3313 u16 lock = 0, lock_slave = 0;
3314
3315 + DibAcquireLock(&state->demod_lock);
3316 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
3317 lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
3318
3319 @@ -2059,6 +2155,8 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
3320 if ((lock & 0x0008) || (lock_slave & 0x0008))
3321 *stat |= FE_HAS_LOCK;
3322
3323 + DibReleaseLock(&state->demod_lock);
3324 +
3325 return 0;
3326 }
3327
3328 @@ -2066,10 +2164,14 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
3329 {
3330 struct dib9000_state *state = fe->demodulator_priv;
3331 u16 *c;
3332 + int ret = 0;
3333
3334 + DibAcquireLock(&state->demod_lock);
3335 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
3336 - if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
3337 - return -EIO;
3338 + if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
3339 + ret = -EIO;
3340 + goto error;
3341 + }
3342 dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
3343 state->i2c_read_buffer, 16 * 2);
3344 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
3345 @@ -2077,7 +2179,10 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
3346 c = (u16 *)state->i2c_read_buffer;
3347
3348 *ber = c[10] << 16 | c[11];
3349 - return 0;
3350 +
3351 +error:
3352 + DibReleaseLock(&state->demod_lock);
3353 + return ret;
3354 }
3355
3356 static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
3357 @@ -2086,7 +2191,9 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
3358 u8 index_frontend;
3359 u16 *c = (u16 *)state->i2c_read_buffer;
3360 u16 val;
3361 + int ret = 0;
3362
3363 + DibAcquireLock(&state->demod_lock);
3364 *strength = 0;
3365 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
3366 state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
3367 @@ -2097,8 +2204,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
3368 }
3369
3370 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
3371 - if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
3372 - return -EIO;
3373 + if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
3374 + ret = -EIO;
3375 + goto error;
3376 + }
3377 dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
3378 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
3379
3380 @@ -2107,7 +2216,10 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
3381 *strength = 65535;
3382 else
3383 *strength += val;
3384 - return 0;
3385 +
3386 +error:
3387 + DibReleaseLock(&state->demod_lock);
3388 + return ret;
3389 }
3390
3391 static u32 dib9000_get_snr(struct dvb_frontend *fe)
3392 @@ -2151,6 +2263,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
3393 u8 index_frontend;
3394 u32 snr_master;
3395
3396 + DibAcquireLock(&state->demod_lock);
3397 snr_master = dib9000_get_snr(fe);
3398 for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
3399 snr_master += dib9000_get_snr(state->fe[index_frontend]);
3400 @@ -2161,6 +2274,8 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
3401 } else
3402 *snr = 0;
3403
3404 + DibReleaseLock(&state->demod_lock);
3405 +
3406 return 0;
3407 }
3408
3409 @@ -2168,15 +2283,22 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
3410 {
3411 struct dib9000_state *state = fe->demodulator_priv;
3412 u16 *c = (u16 *)state->i2c_read_buffer;
3413 + int ret = 0;
3414
3415 + DibAcquireLock(&state->demod_lock);
3416 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
3417 - if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
3418 - return -EIO;
3419 + if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
3420 + ret = -EIO;
3421 + goto error;
3422 + }
3423 dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
3424 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
3425
3426 *unc = c[12];
3427 - return 0;
3428 +
3429 +error:
3430 + DibReleaseLock(&state->demod_lock);
3431 + return ret;
3432 }
3433
3434 int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
3435 @@ -2322,6 +2444,10 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
3436 DibInitLock(&st->platform.risc.mbx_lock);
3437 DibInitLock(&st->platform.risc.mem_lock);
3438 DibInitLock(&st->platform.risc.mem_mbx_lock);
3439 + DibInitLock(&st->demod_lock);
3440 + st->get_frontend_internal = 0;
3441 +
3442 + st->pid_ctrl_index = -2;
3443
3444 st->fe[0] = fe;
3445 fe->demodulator_priv = st;
3446 diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
3447 index dc5d17a..774d507 100644
3448 --- a/drivers/media/dvb/frontends/dibx000_common.c
3449 +++ b/drivers/media/dvb/frontends/dibx000_common.c
3450 @@ -1,4 +1,5 @@
3451 #include <linux/i2c.h>
3452 +#include <linux/mutex.h>
3453
3454 #include "dibx000_common.h"
3455
3456 @@ -10,6 +11,13 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
3457
3458 static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
3459 {
3460 + int ret;
3461 +
3462 + if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
3463 + dprintk("could not acquire lock");
3464 + return -EINVAL;
3465 + }
3466 +
3467 mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
3468 mst->i2c_write_buffer[1] = reg & 0xff;
3469 mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
3470 @@ -21,11 +29,21 @@ static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
3471 mst->msg[0].buf = mst->i2c_write_buffer;
3472 mst->msg[0].len = 4;
3473
3474 - return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
3475 + ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
3476 + mutex_unlock(&mst->i2c_buffer_lock);
3477 +
3478 + return ret;
3479 }
3480
3481 static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
3482 {
3483 + u16 ret;
3484 +
3485 + if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
3486 + dprintk("could not acquire lock");
3487 + return 0;
3488 + }
3489 +
3490 mst->i2c_write_buffer[0] = reg >> 8;
3491 mst->i2c_write_buffer[1] = reg & 0xff;
3492
3493 @@ -42,7 +60,10 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
3494 if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
3495 dprintk("i2c read error on %d", reg);
3496
3497 - return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
3498 + ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
3499 + mutex_unlock(&mst->i2c_buffer_lock);
3500 +
3501 + return ret;
3502 }
3503
3504 static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
3505 @@ -257,6 +278,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
3506 struct i2c_msg msg[], int num)
3507 {
3508 struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
3509 + int ret;
3510
3511 if (num > 32) {
3512 dprintk("%s: too much I2C message to be transmitted (%i).\
3513 @@ -264,10 +286,15 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
3514 return -ENOMEM;
3515 }
3516
3517 - memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
3518 -
3519 dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
3520
3521 + if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
3522 + dprintk("could not acquire lock");
3523 + return -EINVAL;
3524 + }
3525 +
3526 + memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
3527 +
3528 /* open the gate */
3529 dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
3530 mst->msg[0].addr = mst->i2c_addr;
3531 @@ -282,7 +309,11 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
3532 mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
3533 mst->msg[num + 1].len = 4;
3534
3535 - return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
3536 + ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
3537 + num : -EIO);
3538 +
3539 + mutex_unlock(&mst->i2c_buffer_lock);
3540 + return ret;
3541 }
3542
3543 static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
3544 @@ -294,6 +325,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
3545 struct i2c_msg msg[], int num)
3546 {
3547 struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
3548 + int ret;
3549
3550 if (num > 32) {
3551 dprintk("%s: too much I2C message to be transmitted (%i).\
3552 @@ -301,10 +333,14 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
3553 return -ENOMEM;
3554 }
3555
3556 - memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
3557 -
3558 dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
3559
3560 + if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
3561 + dprintk("could not acquire lock");
3562 + return -EINVAL;
3563 + }
3564 + memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
3565 +
3566 /* open the gate */
3567 dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
3568 mst->msg[0].addr = mst->i2c_addr;
3569 @@ -319,7 +355,10 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
3570 mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
3571 mst->msg[num + 1].len = 4;
3572
3573 - return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
3574 + ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ?
3575 + num : -EIO);
3576 + mutex_unlock(&mst->i2c_buffer_lock);
3577 + return ret;
3578 }
3579
3580 static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
3581 @@ -390,8 +429,18 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
3582 int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
3583 struct i2c_adapter *i2c_adap, u8 i2c_addr)
3584 {
3585 - u8 tx[4];
3586 - struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 };
3587 + int ret;
3588 +
3589 + mutex_init(&mst->i2c_buffer_lock);
3590 + if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
3591 + dprintk("could not acquire lock");
3592 + return -EINVAL;
3593 + }
3594 + memset(mst->msg, 0, sizeof(struct i2c_msg));
3595 + mst->msg[0].addr = i2c_addr >> 1;
3596 + mst->msg[0].flags = 0;
3597 + mst->msg[0].buf = mst->i2c_write_buffer;
3598 + mst->msg[0].len = 4;
3599
3600 mst->device_rev = device_rev;
3601 mst->i2c_adap = i2c_adap;
3602 @@ -431,9 +480,12 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
3603 "DiBX000: could not initialize the master i2c_adapter\n");
3604
3605 /* initialize the i2c-master by closing the gate */
3606 - dibx000_i2c_gate_ctrl(mst, tx, 0, 0);
3607 + dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
3608 +
3609 + ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1);
3610 + mutex_unlock(&mst->i2c_buffer_lock);
3611
3612 - return i2c_transfer(i2c_adap, &m, 1) == 1;
3613 + return ret;
3614 }
3615
3616 EXPORT_SYMBOL(dibx000_init_i2c_master);
3617 diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
3618 index f031165..5e01147 100644
3619 --- a/drivers/media/dvb/frontends/dibx000_common.h
3620 +++ b/drivers/media/dvb/frontends/dibx000_common.h
3621 @@ -33,6 +33,7 @@ struct dibx000_i2c_master {
3622 struct i2c_msg msg[34];
3623 u8 i2c_write_buffer[8];
3624 u8 i2c_read_buffer[2];
3625 + struct mutex i2c_buffer_lock;
3626 };
3627
3628 extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
3629 diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
3630 index aa83f07..bcb45be 100644
3631 --- a/drivers/media/video/cx23885/cx23885-dvb.c
3632 +++ b/drivers/media/video/cx23885/cx23885-dvb.c
3633 @@ -844,7 +844,7 @@ static int dvb_register(struct cx23885_tsport *port)
3634 static struct xc2028_ctrl ctl = {
3635 .fname = XC3028L_DEFAULT_FIRMWARE,
3636 .max_len = 64,
3637 - .demod = 5000,
3638 + .demod = XC3028_FE_DIBCOM52,
3639 /* This is true for all demods with
3640 v36 firmware? */
3641 .type = XC2028_D2633,
3642 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
3643 index b27b940..d637982 100644
3644 --- a/drivers/mmc/core/core.c
3645 +++ b/drivers/mmc/core/core.c
3646 @@ -1151,7 +1151,7 @@ static void mmc_power_up(struct mmc_host *host)
3647 mmc_host_clk_release(host);
3648 }
3649
3650 -static void mmc_power_off(struct mmc_host *host)
3651 +void mmc_power_off(struct mmc_host *host)
3652 {
3653 mmc_host_clk_hold(host);
3654
3655 @@ -1241,8 +1241,7 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
3656 }
3657
3658 /*
3659 - * Remove the current bus handler from a host. Assumes that there are
3660 - * no interesting cards left, so the bus is powered down.
3661 + * Remove the current bus handler from a host.
3662 */
3663 void mmc_detach_bus(struct mmc_host *host)
3664 {
3665 @@ -1259,8 +1258,6 @@ void mmc_detach_bus(struct mmc_host *host)
3666
3667 spin_unlock_irqrestore(&host->lock, flags);
3668
3669 - mmc_power_off(host);
3670 -
3671 mmc_bus_put(host);
3672 }
3673
3674 @@ -1845,6 +1842,7 @@ void mmc_stop_host(struct mmc_host *host)
3675
3676 mmc_claim_host(host);
3677 mmc_detach_bus(host);
3678 + mmc_power_off(host);
3679 mmc_release_host(host);
3680 mmc_bus_put(host);
3681 return;
3682 @@ -1974,6 +1972,7 @@ int mmc_suspend_host(struct mmc_host *host)
3683 host->bus_ops->remove(host);
3684 mmc_claim_host(host);
3685 mmc_detach_bus(host);
3686 + mmc_power_off(host);
3687 mmc_release_host(host);
3688 host->pm_flags = 0;
3689 err = 0;
3690 @@ -2061,6 +2060,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
3691 host->bus_ops->remove(host);
3692
3693 mmc_detach_bus(host);
3694 + mmc_power_off(host);
3695 mmc_release_host(host);
3696 host->pm_flags = 0;
3697 break;
3698 diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
3699 index d9411ed..14664f1 100644
3700 --- a/drivers/mmc/core/core.h
3701 +++ b/drivers/mmc/core/core.h
3702 @@ -43,6 +43,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
3703 bool cmd11);
3704 void mmc_set_timing(struct mmc_host *host, unsigned int timing);
3705 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
3706 +void mmc_power_off(struct mmc_host *host);
3707
3708 static inline void mmc_delay(unsigned int ms)
3709 {
3710 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
3711 index 5700b1c..6952f77 100644
3712 --- a/drivers/mmc/core/mmc.c
3713 +++ b/drivers/mmc/core/mmc.c
3714 @@ -359,6 +359,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
3715 * card has the Enhanced area enabled. If so, export enhanced
3716 * area offset and size to user by adding sysfs interface.
3717 */
3718 + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
3719 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
3720 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
3721 u8 hc_erase_grp_sz =
3722 @@ -405,6 +406,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
3723 if (card->ext_csd.rev >= 5)
3724 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
3725
3726 + card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
3727 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
3728 card->erased_byte = 0xFF;
3729 else
3730 @@ -891,6 +893,7 @@ static void mmc_detect(struct mmc_host *host)
3731
3732 mmc_claim_host(host);
3733 mmc_detach_bus(host);
3734 + mmc_power_off(host);
3735 mmc_release_host(host);
3736 }
3737 }
3738 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
3739 index 0370e03..4c281a4 100644
3740 --- a/drivers/mmc/core/sd.c
3741 +++ b/drivers/mmc/core/sd.c
3742 @@ -1043,6 +1043,7 @@ static void mmc_sd_detect(struct mmc_host *host)
3743
3744 mmc_claim_host(host);
3745 mmc_detach_bus(host);
3746 + mmc_power_off(host);
3747 mmc_release_host(host);
3748 }
3749 }
3750 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
3751 index 262fff0..ac492ac 100644
3752 --- a/drivers/mmc/core/sdio.c
3753 +++ b/drivers/mmc/core/sdio.c
3754 @@ -597,6 +597,7 @@ out:
3755
3756 mmc_claim_host(host);
3757 mmc_detach_bus(host);
3758 + mmc_power_off(host);
3759 mmc_release_host(host);
3760 }
3761 }
3762 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
3763 index f1af222..49e20a4 100644
3764 --- a/drivers/mtd/mtdchar.c
3765 +++ b/drivers/mtd/mtdchar.c
3766 @@ -320,6 +320,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
3767 ops.mode = MTD_OOB_RAW;
3768 ops.datbuf = kbuf;
3769 ops.oobbuf = NULL;
3770 + ops.ooboffs = 0;
3771 ops.len = len;
3772
3773 ret = mtd->write_oob(mtd, *ppos, &ops);
3774 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3775 index a46e9bb..86f05f4 100644
3776 --- a/drivers/mtd/nand/nand_base.c
3777 +++ b/drivers/mtd/nand/nand_base.c
3778 @@ -2097,14 +2097,22 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
3779
3780 /**
3781 * nand_fill_oob - [Internal] Transfer client buffer to oob
3782 - * @chip: nand chip structure
3783 + * @mtd: MTD device structure
3784 * @oob: oob data buffer
3785 * @len: oob data write length
3786 * @ops: oob ops structure
3787 */
3788 -static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
3789 - struct mtd_oob_ops *ops)
3790 +static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
3791 + struct mtd_oob_ops *ops)
3792 {
3793 + struct nand_chip *chip = mtd->priv;
3794 +
3795 + /*
3796 + * Initialise to all 0xFF, to avoid the possibility of left over OOB
3797 + * data from a previous OOB read.
3798 + */
3799 + memset(chip->oob_poi, 0xff, mtd->oobsize);
3800 +
3801 switch (ops->mode) {
3802
3803 case MTD_OOB_PLACE:
3804 @@ -2201,10 +2209,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
3805 (chip->pagebuf << chip->page_shift) < (to + ops->len))
3806 chip->pagebuf = -1;
3807
3808 - /* If we're not given explicit OOB data, let it be 0xFF */
3809 - if (likely(!oob))
3810 - memset(chip->oob_poi, 0xff, mtd->oobsize);
3811 -
3812 /* Don't allow multipage oob writes with offset */
3813 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
3814 return -EINVAL;
3815 @@ -2226,8 +2230,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
3816
3817 if (unlikely(oob)) {
3818 size_t len = min(oobwritelen, oobmaxlen);
3819 - oob = nand_fill_oob(chip, oob, len, ops);
3820 + oob = nand_fill_oob(mtd, oob, len, ops);
3821 oobwritelen -= len;
3822 + } else {
3823 + /* We still need to erase leftover OOB data */
3824 + memset(chip->oob_poi, 0xff, mtd->oobsize);
3825 }
3826
3827 ret = chip->write_page(mtd, chip, wbuf, page, cached,
3828 @@ -2401,10 +2408,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3829 if (page == chip->pagebuf)
3830 chip->pagebuf = -1;
3831
3832 - memset(chip->oob_poi, 0xff, mtd->oobsize);
3833 - nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
3834 + nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3835 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3836 - memset(chip->oob_poi, 0xff, mtd->oobsize);
3837
3838 if (status)
3839 return status;
3840 diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
3841 index 1fb3b3a..30689cc 100644
3842 --- a/drivers/mtd/nand/pxa3xx_nand.c
3843 +++ b/drivers/mtd/nand/pxa3xx_nand.c
3844 @@ -685,6 +685,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
3845 * OOB, ignore such double bit errors
3846 */
3847 if (is_buf_blank(buf, mtd->writesize))
3848 + info->retcode = ERR_NONE;
3849 + else
3850 mtd->ecc_stats.failed++;
3851 }
3852
3853 @@ -813,7 +815,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
3854 info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
3855 /* set info fields needed to read id */
3856 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
3857 - info->reg_ndcr = ndcr;
3858 + info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
3859 info->cmdset = &default_cmdset;
3860
3861 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
3862 @@ -882,7 +884,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
3863 struct pxa3xx_nand_info *info = mtd->priv;
3864 struct platform_device *pdev = info->pdev;
3865 struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
3866 - struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
3867 + struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
3868 const struct pxa3xx_nand_flash *f = NULL;
3869 struct nand_chip *chip = mtd->priv;
3870 uint32_t id = -1;
3871 @@ -942,8 +944,10 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
3872 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
3873 if (f->flash_width == 16)
3874 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
3875 + pxa3xx_flash_ids[1].name = NULL;
3876 + def = pxa3xx_flash_ids;
3877 KEEP_CONFIG:
3878 - if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
3879 + if (nand_scan_ident(mtd, 1, def))
3880 return -ENODEV;
3881 /* calculate addressing information */
3882 info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
3883 @@ -954,9 +958,9 @@ KEEP_CONFIG:
3884 info->row_addr_cycles = 2;
3885 mtd->name = mtd_names[0];
3886 chip->ecc.mode = NAND_ECC_HW;
3887 - chip->ecc.size = f->page_size;
3888 + chip->ecc.size = info->page_size;
3889
3890 - chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
3891 + chip->options = (info->reg_ndcr & NDCR_DWIDTH_M) ? NAND_BUSWIDTH_16 : 0;
3892 chip->options |= NAND_NO_AUTOINCR;
3893 chip->options |= NAND_NO_READRDY;
3894
3895 diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
3896 index 7a87d07..4938bd0 100644
3897 --- a/drivers/mtd/redboot.c
3898 +++ b/drivers/mtd/redboot.c
3899 @@ -297,6 +297,9 @@ static struct mtd_part_parser redboot_parser = {
3900 .name = "RedBoot",
3901 };
3902
3903 +/* mtd parsers will request the module by parser name */
3904 +MODULE_ALIAS("RedBoot");
3905 +
3906 static int __init redboot_parser_init(void)
3907 {
3908 return register_mtd_parser(&redboot_parser);
3909 diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
3910 index edd7304..dc44b73 100644
3911 --- a/drivers/net/phy/dp83640.c
3912 +++ b/drivers/net/phy/dp83640.c
3913 @@ -875,6 +875,7 @@ static void dp83640_remove(struct phy_device *phydev)
3914 struct dp83640_clock *clock;
3915 struct list_head *this, *next;
3916 struct dp83640_private *tmp, *dp83640 = phydev->priv;
3917 + struct sk_buff *skb;
3918
3919 if (phydev->addr == BROADCAST_ADDR)
3920 return;
3921 @@ -882,6 +883,12 @@ static void dp83640_remove(struct phy_device *phydev)
3922 enable_status_frames(phydev, false);
3923 cancel_work_sync(&dp83640->ts_work);
3924
3925 + while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
3926 + kfree_skb(skb);
3927 +
3928 + while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
3929 + skb_complete_tx_timestamp(skb, NULL);
3930 +
3931 clock = dp83640_clock_get(dp83640->clock);
3932
3933 if (dp83640 == clock->chosen) {
3934 @@ -1060,7 +1067,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
3935 struct dp83640_private *dp83640 = phydev->priv;
3936
3937 if (!dp83640->hwts_tx_en) {
3938 - kfree_skb(skb);
3939 + skb_complete_tx_timestamp(skb, NULL);
3940 return;
3941 }
3942 skb_queue_tail(&dp83640->tx_queue, skb);
3943 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
3944 index 3bb1311..7145714 100644
3945 --- a/drivers/net/rionet.c
3946 +++ b/drivers/net/rionet.c
3947 @@ -88,8 +88,8 @@ static struct rio_dev **rionet_active;
3948 #define dev_rionet_capable(dev) \
3949 is_rionet_capable(dev->src_ops, dev->dst_ops)
3950
3951 -#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
3952 -#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
3953 +#define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
3954 +#define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
3955
3956 static int rionet_rx_clean(struct net_device *ndev)
3957 {
3958 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
3959 index c11a2b8..d469004 100644
3960 --- a/drivers/net/tg3.c
3961 +++ b/drivers/net/tg3.c
3962 @@ -6029,12 +6029,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
3963
3964 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3965 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
3966 - struct sk_buff *skb,
3967 + struct sk_buff **pskb,
3968 u32 *entry, u32 *budget,
3969 u32 base_flags, u32 mss, u32 vlan)
3970 {
3971 struct tg3 *tp = tnapi->tp;
3972 - struct sk_buff *new_skb;
3973 + struct sk_buff *new_skb, *skb = *pskb;
3974 dma_addr_t new_addr = 0;
3975 int ret = 0;
3976
3977 @@ -6076,7 +6076,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
3978 }
3979
3980 dev_kfree_skb(skb);
3981 -
3982 + *pskb = new_skb;
3983 return ret;
3984 }
3985
3986 @@ -6305,7 +6305,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3987 */
3988 entry = tnapi->tx_prod;
3989 budget = tg3_tx_avail(tnapi);
3990 - if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
3991 + if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
3992 base_flags, mss, vlan))
3993 goto out_unlock;
3994 }
3995 diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
3996 index bfb6481..4e4e7c3 100644
3997 --- a/drivers/net/wireless/ath/ath9k/ani.c
3998 +++ b/drivers/net/wireless/ath/ath9k/ani.c
3999 @@ -502,9 +502,6 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
4000 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
4001 ATH9K_ANI_CCK_WEAK_SIG_THR);
4002
4003 - ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
4004 - ATH9K_RX_FILTER_PHYERR);
4005 -
4006 ath9k_ani_restart(ah);
4007 return;
4008 }
4009 @@ -525,8 +522,6 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
4010 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
4011 aniState->firstepLevel);
4012
4013 - ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
4014 - ~ATH9K_RX_FILTER_PHYERR);
4015 ath9k_ani_restart(ah);
4016
4017 ENABLE_REGWRITE_BUFFER(ah);
4018 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
4019 index f48051c..7c2aaad 100644
4020 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
4021 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
4022 @@ -643,8 +643,9 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
4023 outlier_idx = max_idx;
4024 else
4025 outlier_idx = min_idx;
4026 +
4027 + mp_coeff[outlier_idx] = mp_avg;
4028 }
4029 - mp_coeff[outlier_idx] = mp_avg;
4030 }
4031
4032 static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
4033 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
4034 index 8ff0b88..048f6af 100644
4035 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
4036 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
4037 @@ -253,8 +253,6 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
4038 return -EIO;
4039 }
4040
4041 - if (status & AR_TxOpExceeded)
4042 - ts->ts_status |= ATH9K_TXERR_XTXOP;
4043 ts->ts_rateindex = MS(status, AR_FinalTxIdx);
4044 ts->ts_seqnum = MS(status, AR_SeqNum);
4045 ts->tid = MS(status, AR_TxTid);
4046 @@ -264,6 +262,8 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
4047 ts->ts_status = 0;
4048 ts->ts_flags = 0;
4049
4050 + if (status & AR_TxOpExceeded)
4051 + ts->ts_status |= ATH9K_TXERR_XTXOP;
4052 status = ACCESS_ONCE(ads->status2);
4053 ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
4054 ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
4055 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
4056 index 5c59042..32ac05f 100644
4057 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
4058 +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
4059 @@ -572,12 +572,12 @@
4060
4061 #define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
4062
4063 -#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \
4064 - 0x3c8 : 0x448)
4065 -#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \
4066 - 0x3c4 : 0x440)
4067 -#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + AR_SREV_9485(ah) ? \
4068 - 0x3f0 : 0x48c)
4069 +#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
4070 + 0x3c8 : 0x448))
4071 +#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \
4072 + 0x3c4 : 0x440))
4073 +#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
4074 + 0x3f0 : 0x48c))
4075 #define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
4076 (AR_SREV_9485(ah) ? \
4077 0x3d0 : 0x450) + ((_i) << 2))
4078 diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
4079 index 611ea6c..d16d029 100644
4080 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
4081 +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
4082 @@ -521,7 +521,7 @@ static const u32 ar9485_1_1_radio_postamble[][2] = {
4083 {0x000160ac, 0x24611800},
4084 {0x000160b0, 0x03284f3e},
4085 {0x0001610c, 0x00170000},
4086 - {0x00016140, 0x10804008},
4087 + {0x00016140, 0x50804008},
4088 };
4089
4090 static const u32 ar9485_1_1_mac_postamble[][5] = {
4091 @@ -603,7 +603,7 @@ static const u32 ar9485_1_1_radio_core[][2] = {
4092
4093 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
4094 /* Addr allmodes */
4095 - {0x00018c00, 0x10052e5e},
4096 + {0x00018c00, 0x18052e5e},
4097 {0x00018c04, 0x000801d8},
4098 {0x00018c08, 0x0000080c},
4099 };
4100 @@ -776,7 +776,7 @@ static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
4101
4102 static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
4103 /* Addr allmodes */
4104 - {0x00018c00, 0x10013e5e},
4105 + {0x00018c00, 0x18013e5e},
4106 {0x00018c04, 0x000801d8},
4107 {0x00018c08, 0x0000080c},
4108 };
4109 @@ -882,7 +882,7 @@ static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
4110
4111 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
4112 /* Addr allmodes */
4113 - {0x00018c00, 0x10012e5e},
4114 + {0x00018c00, 0x18012e5e},
4115 {0x00018c04, 0x000801d8},
4116 {0x00018c08, 0x0000080c},
4117 };
4118 @@ -1021,7 +1021,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
4119
4120 static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
4121 /* Addr allmodes */
4122 - {0x00018c00, 0x10053e5e},
4123 + {0x00018c00, 0x18053e5e},
4124 {0x00018c04, 0x000801d8},
4125 {0x00018c08, 0x0000080c},
4126 };
4127 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
4128 index d3f4a59..77c8ded 100644
4129 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
4130 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
4131 @@ -38,6 +38,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
4132 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
4133 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
4134 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
4135 + { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
4136
4137 { USB_DEVICE(0x0cf3, 0x7015),
4138 .driver_info = AR9287_USB }, /* Atheros */
4139 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
4140 index 8dcefe7..0be84b1 100644
4141 --- a/drivers/net/wireless/ath/ath9k/hw.c
4142 +++ b/drivers/net/wireless/ath/ath9k/hw.c
4143 @@ -2101,6 +2101,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
4144 pCap->num_gpio_pins = AR9271_NUM_GPIO;
4145 else if (AR_DEVID_7010(ah))
4146 pCap->num_gpio_pins = AR7010_NUM_GPIO;
4147 + else if (AR_SREV_9300_20_OR_LATER(ah))
4148 + pCap->num_gpio_pins = AR9300_NUM_GPIO;
4149 + else if (AR_SREV_9287_11_OR_LATER(ah))
4150 + pCap->num_gpio_pins = AR9287_NUM_GPIO;
4151 else if (AR_SREV_9285_12_OR_LATER(ah))
4152 pCap->num_gpio_pins = AR9285_NUM_GPIO;
4153 else if (AR_SREV_9280_20_OR_LATER(ah))
4154 diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
4155 index 4c21f8c..60a3bb2 100644
4156 --- a/drivers/net/wireless/ath/ath9k/recv.c
4157 +++ b/drivers/net/wireless/ath/ath9k/recv.c
4158 @@ -433,12 +433,9 @@ void ath_rx_cleanup(struct ath_softc *sc)
4159
4160 u32 ath_calcrxfilter(struct ath_softc *sc)
4161 {
4162 -#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
4163 -
4164 u32 rfilt;
4165
4166 - rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
4167 - | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
4168 + rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
4169 | ATH9K_RX_FILTER_MCAST;
4170
4171 if (sc->rx.rxfilter & FIF_PROBE_REQ)
4172 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
4173 index d42ef17..a7ddc98 100644
4174 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
4175 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
4176 @@ -295,8 +295,8 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
4177 return ret;
4178 }
4179
4180 - if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) &&
4181 - priv->cfg->ht_params->smps_mode)
4182 + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
4183 + priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
4184 ieee80211_request_smps(ctx->vif,
4185 priv->cfg->ht_params->smps_mode);
4186
4187 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
4188 index 5621100..a5c5a0a 100644
4189 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
4190 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
4191 @@ -113,13 +113,8 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
4192 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4193
4194 IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
4195 - ret = wait_event_interruptible_timeout(priv->wait_command_queue,
4196 - priv->ucode_write_complete, 5 * HZ);
4197 - if (ret == -ERESTARTSYS) {
4198 - IWL_ERR(priv, "Could not load the %s uCode section due "
4199 - "to interrupt\n", name);
4200 - return ret;
4201 - }
4202 + ret = wait_event_timeout(priv->wait_command_queue,
4203 + priv->ucode_write_complete, 5 * HZ);
4204 if (!ret) {
4205 IWL_ERR(priv, "Could not load the %s uCode section\n",
4206 name);
4207 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
4208 index cf376f6..d652778 100644
4209 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
4210 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
4211 @@ -867,7 +867,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
4212 * commands by clearing the ready bit */
4213 clear_bit(STATUS_READY, &priv->status);
4214
4215 - wake_up_interruptible(&priv->wait_command_queue);
4216 + wake_up(&priv->wait_command_queue);
4217
4218 if (!ondemand) {
4219 /*
4220 @@ -918,7 +918,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
4221 */
4222 clear_bit(STATUS_READY, &priv->status);
4223 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4224 - wake_up_interruptible(&priv->wait_command_queue);
4225 + wake_up(&priv->wait_command_queue);
4226 IWL_ERR(priv, "RF is used by WiMAX\n");
4227 return;
4228 }
4229 diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
4230 index 8e31400..732f01b 100644
4231 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c
4232 +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
4233 @@ -561,7 +561,7 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
4234 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
4235 test_bit(STATUS_RF_KILL_HW, &priv->status));
4236 else
4237 - wake_up_interruptible(&priv->wait_command_queue);
4238 + wake_up(&priv->wait_command_queue);
4239 }
4240
4241 static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
4242 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
4243 index 4748602..f9f0df0 100644
4244 --- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
4245 +++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
4246 @@ -671,7 +671,7 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
4247 handled |= CSR_INT_BIT_FH_TX;
4248 /* Wake up uCode load routine, now that load is complete */
4249 priv->ucode_write_complete = 1;
4250 - wake_up_interruptible(&priv->wait_command_queue);
4251 + wake_up(&priv->wait_command_queue);
4252 }
4253
4254 if (inta & ~handled) {
4255 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
4256 index 222d410..2bf3107 100644
4257 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
4258 +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
4259 @@ -790,7 +790,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
4260 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4261 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
4262 get_cmd_string(cmd->hdr.cmd));
4263 - wake_up_interruptible(&priv->wait_command_queue);
4264 + wake_up(&priv->wait_command_queue);
4265 }
4266
4267 meta->flags = 0;
4268 @@ -957,7 +957,7 @@ static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
4269 return ret;
4270 }
4271
4272 - ret = wait_event_interruptible_timeout(priv->wait_command_queue,
4273 + ret = wait_event_timeout(priv->wait_command_queue,
4274 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
4275 HOST_COMPLETE_TIMEOUT);
4276 if (!ret) {
4277 diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
4278 index edfe01c..afb7356 100644
4279 --- a/drivers/net/wireless/wl12xx/scan.c
4280 +++ b/drivers/net/wireless/wl12xx/scan.c
4281 @@ -83,14 +83,18 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
4282 for (i = 0, j = 0;
4283 i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
4284 i++) {
4285 -
4286 flags = req->channels[i]->flags;
4287
4288 if (!test_bit(i, wl->scan.scanned_ch) &&
4289 !(flags & IEEE80211_CHAN_DISABLED) &&
4290 - ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) &&
4291 - (req->channels[i]->band == band)) {
4292 -
4293 + (req->channels[i]->band == band) &&
4294 + /*
4295 + * In passive scans, we scan all remaining
4296 + * channels, even if not marked as such.
4297 + * In active scans, we only scan channels not
4298 + * marked as passive.
4299 + */
4300 + (passive || !(flags & IEEE80211_CHAN_PASSIVE_SCAN))) {
4301 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
4302 req->channels[i]->band,
4303 req->channels[i]->center_freq);
4304 @@ -142,6 +146,10 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
4305 int ret;
4306 u16 scan_options = 0;
4307
4308 + /* skip active scans if we don't have SSIDs */
4309 + if (!passive && wl->scan.req->n_ssids == 0)
4310 + return WL1271_NOTHING_TO_SCAN;
4311 +
4312 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
4313 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
4314 if (!cmd || !trigger) {
4315 @@ -152,8 +160,7 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
4316 /* We always use high priority scans */
4317 scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH;
4318
4319 - /* No SSIDs means that we have a forced passive scan */
4320 - if (passive || wl->scan.req->n_ssids == 0)
4321 + if (passive)
4322 scan_options |= WL1271_SCAN_OPT_PASSIVE;
4323
4324 cmd->params.scan_options = cpu_to_le16(scan_options);
4325 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4326 index 1196f61..cec4629 100644
4327 --- a/drivers/pci/quirks.c
4328 +++ b/drivers/pci/quirks.c
4329 @@ -2745,20 +2745,6 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
4330 /* disable must be done via function #0 */
4331 if (PCI_FUNC(dev->devfn))
4332 return;
4333 -
4334 - pci_read_config_byte(dev, 0xCB, &disable);
4335 -
4336 - if (disable & 0x02)
4337 - return;
4338 -
4339 - pci_read_config_byte(dev, 0xCA, &write_enable);
4340 - pci_write_config_byte(dev, 0xCA, 0x57);
4341 - pci_write_config_byte(dev, 0xCB, disable | 0x02);
4342 - pci_write_config_byte(dev, 0xCA, write_enable);
4343 -
4344 - dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
4345 - dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
4346 -
4347 /*
4348 * RICOH 0xe823 SD/MMC card reader fails to recognize
4349 * certain types of SD/MMC cards. Lowering the SD base
4350 @@ -2781,6 +2767,20 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
4351
4352 dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
4353 }
4354 +
4355 + pci_read_config_byte(dev, 0xCB, &disable);
4356 +
4357 + if (disable & 0x02)
4358 + return;
4359 +
4360 + pci_read_config_byte(dev, 0xCA, &write_enable);
4361 + pci_write_config_byte(dev, 0xCA, 0x57);
4362 + pci_write_config_byte(dev, 0xCB, disable | 0x02);
4363 + pci_write_config_byte(dev, 0xCA, write_enable);
4364 +
4365 + dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
4366 + dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
4367 +
4368 }
4369 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
4370 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
4371 diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
4372 index 6fa215a..90832a9 100644
4373 --- a/drivers/pci/xen-pcifront.c
4374 +++ b/drivers/pci/xen-pcifront.c
4375 @@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
4376 dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
4377 pci_name(dev), i);
4378 if (pci_claim_resource(dev, i)) {
4379 - dev_err(&pdev->xdev->dev, "Could not claim "
4380 - "resource %s/%d! Device offline. Try "
4381 - "giving less than 4GB to domain.\n",
4382 + dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
4383 + "Device offline. Try using e820_host=1 in the guest config.\n",
4384 pci_name(dev), i);
4385 }
4386 }
4387 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
4388 index 3591630..59ac26c 100644
4389 --- a/drivers/platform/x86/samsung-laptop.c
4390 +++ b/drivers/platform/x86/samsung-laptop.c
4391 @@ -370,15 +370,17 @@ static u8 read_brightness(void)
4392 &sretval);
4393 if (!retval) {
4394 user_brightness = sretval.retval[0];
4395 - if (user_brightness != 0)
4396 + if (user_brightness > sabi_config->min_brightness)
4397 user_brightness -= sabi_config->min_brightness;
4398 + else
4399 + user_brightness = 0;
4400 }
4401 return user_brightness;
4402 }
4403
4404 static void set_brightness(u8 user_brightness)
4405 {
4406 - u8 user_level = user_brightness - sabi_config->min_brightness;
4407 + u8 user_level = user_brightness + sabi_config->min_brightness;
4408
4409 sabi_set_command(sabi_config->commands.set_brightness, user_level);
4410 }
4411 @@ -641,6 +643,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
4412 .callback = dmi_check_cb,
4413 },
4414 {
4415 + .ident = "R700",
4416 + .matches = {
4417 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4418 + DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
4419 + DMI_MATCH(DMI_BOARD_NAME, "SR700"),
4420 + },
4421 + .callback = dmi_check_cb,
4422 + },
4423 + {
4424 .ident = "R530/R730",
4425 .matches = {
4426 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4427 @@ -686,6 +697,24 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
4428 },
4429 .callback = dmi_check_cb,
4430 },
4431 + {
4432 + .ident = "X520",
4433 + .matches = {
4434 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4435 + DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
4436 + DMI_MATCH(DMI_BOARD_NAME, "X520"),
4437 + },
4438 + .callback = dmi_check_cb,
4439 + },
4440 + {
4441 + .ident = "R528/R728",
4442 + .matches = {
4443 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4444 + DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
4445 + DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
4446 + },
4447 + .callback = dmi_check_cb,
4448 + },
4449 { },
4450 };
4451 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
4452 @@ -770,7 +799,7 @@ static int __init samsung_init(void)
4453 sabi_iface = ioremap_nocache(ifaceP, 16);
4454 if (!sabi_iface) {
4455 pr_err("Can't remap %x\n", ifaceP);
4456 - goto exit;
4457 + goto error_no_signature;
4458 }
4459 if (debug) {
4460 printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
4461 @@ -802,7 +831,8 @@ static int __init samsung_init(void)
4462 /* create a backlight device to talk to this one */
4463 memset(&props, 0, sizeof(struct backlight_properties));
4464 props.type = BACKLIGHT_PLATFORM;
4465 - props.max_brightness = sabi_config->max_brightness;
4466 + props.max_brightness = sabi_config->max_brightness -
4467 + sabi_config->min_brightness;
4468 backlight_device = backlight_device_register("samsung", &sdev->dev,
4469 NULL, &backlight_ops,
4470 &props);
4471 @@ -821,7 +851,6 @@ static int __init samsung_init(void)
4472 if (retval)
4473 goto error_file_create;
4474
4475 -exit:
4476 return 0;
4477
4478 error_file_create:
4479 diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
4480 index f23d5a8..9b88be4 100644
4481 --- a/drivers/platform/x86/wmi.c
4482 +++ b/drivers/platform/x86/wmi.c
4483 @@ -754,9 +754,13 @@ static void wmi_free_devices(void)
4484 struct wmi_block *wblock, *next;
4485
4486 /* Delete devices for all the GUIDs */
4487 - list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
4488 + list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
4489 + list_del(&wblock->list);
4490 if (wblock->dev.class)
4491 device_unregister(&wblock->dev);
4492 + else
4493 + kfree(wblock);
4494 + }
4495 }
4496
4497 static bool guid_already_parsed(const char *guid_string)
4498 diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
4499 index 1fefe82..91a783d 100644
4500 --- a/drivers/power/ds2780_battery.c
4501 +++ b/drivers/power/ds2780_battery.c
4502 @@ -39,6 +39,7 @@ struct ds2780_device_info {
4503 struct device *dev;
4504 struct power_supply bat;
4505 struct device *w1_dev;
4506 + struct task_struct *mutex_holder;
4507 };
4508
4509 enum current_types {
4510 @@ -49,8 +50,8 @@ enum current_types {
4511 static const char model[] = "DS2780";
4512 static const char manufacturer[] = "Maxim/Dallas";
4513
4514 -static inline struct ds2780_device_info *to_ds2780_device_info(
4515 - struct power_supply *psy)
4516 +static inline struct ds2780_device_info *
4517 +to_ds2780_device_info(struct power_supply *psy)
4518 {
4519 return container_of(psy, struct ds2780_device_info, bat);
4520 }
4521 @@ -60,17 +61,28 @@ static inline struct power_supply *to_power_supply(struct device *dev)
4522 return dev_get_drvdata(dev);
4523 }
4524
4525 -static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
4526 +static inline int ds2780_battery_io(struct ds2780_device_info *dev_info,
4527 + char *buf, int addr, size_t count, int io)
4528 {
4529 - return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
4530 + if (dev_info->mutex_holder == current)
4531 + return w1_ds2780_io_nolock(dev_info->w1_dev, buf, addr, count, io);
4532 + else
4533 + return w1_ds2780_io(dev_info->w1_dev, buf, addr, count, io);
4534 +}
4535 +
4536 +static inline int ds2780_read8(struct ds2780_device_info *dev_info, u8 *val,
4537 + int addr)
4538 +{
4539 + return ds2780_battery_io(dev_info, val, addr, sizeof(u8), 0);
4540 }
4541
4542 -static int ds2780_read16(struct device *dev, s16 *val, int addr)
4543 +static int ds2780_read16(struct ds2780_device_info *dev_info, s16 *val,
4544 + int addr)
4545 {
4546 int ret;
4547 u8 raw[2];
4548
4549 - ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
4550 + ret = ds2780_battery_io(dev_info, raw, addr, sizeof(raw), 0);
4551 if (ret < 0)
4552 return ret;
4553
4554 @@ -79,16 +91,16 @@ static int ds2780_read16(struct device *dev, s16 *val, int addr)
4555 return 0;
4556 }
4557
4558 -static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
4559 - size_t count)
4560 +static inline int ds2780_read_block(struct ds2780_device_info *dev_info,
4561 + u8 *val, int addr, size_t count)
4562 {
4563 - return w1_ds2780_io(dev, val, addr, count, 0);
4564 + return ds2780_battery_io(dev_info, val, addr, count, 0);
4565 }
4566
4567 -static inline int ds2780_write(struct device *dev, u8 *val, int addr,
4568 - size_t count)
4569 +static inline int ds2780_write(struct ds2780_device_info *dev_info, u8 *val,
4570 + int addr, size_t count)
4571 {
4572 - return w1_ds2780_io(dev, val, addr, count, 1);
4573 + return ds2780_battery_io(dev_info, val, addr, count, 1);
4574 }
4575
4576 static inline int ds2780_store_eeprom(struct device *dev, int addr)
4577 @@ -122,7 +134,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
4578 {
4579 int ret;
4580
4581 - ret = ds2780_write(dev_info->w1_dev, &conductance,
4582 + ret = ds2780_write(dev_info, &conductance,
4583 DS2780_RSNSP_REG, sizeof(u8));
4584 if (ret < 0)
4585 return ret;
4586 @@ -134,7 +146,7 @@ static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
4587 static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
4588 u16 *rsgain)
4589 {
4590 - return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
4591 + return ds2780_read16(dev_info, rsgain, DS2780_RSGAIN_MSB_REG);
4592 }
4593
4594 /* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
4595 @@ -144,8 +156,8 @@ static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
4596 int ret;
4597 u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
4598
4599 - ret = ds2780_write(dev_info->w1_dev, raw,
4600 - DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
4601 + ret = ds2780_write(dev_info, raw,
4602 + DS2780_RSGAIN_MSB_REG, sizeof(raw));
4603 if (ret < 0)
4604 return ret;
4605
4606 @@ -167,7 +179,7 @@ static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
4607 * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
4608 * voltage LSB register
4609 */
4610 - ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
4611 + ret = ds2780_read16(dev_info, &voltage_raw,
4612 DS2780_VOLT_MSB_REG);
4613 if (ret < 0)
4614 return ret;
4615 @@ -196,7 +208,7 @@ static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
4616 * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
4617 * temperature LSB register
4618 */
4619 - ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
4620 + ret = ds2780_read16(dev_info, &temperature_raw,
4621 DS2780_TEMP_MSB_REG);
4622 if (ret < 0)
4623 return ret;
4624 @@ -222,13 +234,13 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
4625 * The units of measurement for current are dependent on the value of
4626 * the sense resistor.
4627 */
4628 - ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
4629 + ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
4630 if (ret < 0)
4631 return ret;
4632
4633 if (sense_res_raw == 0) {
4634 dev_err(dev_info->dev, "sense resistor value is 0\n");
4635 - return -ENXIO;
4636 + return -EINVAL;
4637 }
4638 sense_res = 1000 / sense_res_raw;
4639
4640 @@ -248,7 +260,7 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
4641 * Bits 7 - 0 of the current value are in bits 7 - 0 of the current
4642 * LSB register
4643 */
4644 - ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
4645 + ret = ds2780_read16(dev_info, &current_raw, reg_msb);
4646 if (ret < 0)
4647 return ret;
4648
4649 @@ -267,7 +279,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
4650 * The units of measurement for accumulated current are dependent on
4651 * the value of the sense resistor.
4652 */
4653 - ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
4654 + ret = ds2780_read8(dev_info, &sense_res_raw, DS2780_RSNSP_REG);
4655 if (ret < 0)
4656 return ret;
4657
4658 @@ -285,7 +297,7 @@ static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
4659 * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
4660 * LSB register
4661 */
4662 - ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
4663 + ret = ds2780_read16(dev_info, &current_raw, DS2780_ACR_MSB_REG);
4664 if (ret < 0)
4665 return ret;
4666
4667 @@ -299,7 +311,7 @@ static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
4668 int ret;
4669 u8 raw;
4670
4671 - ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
4672 + ret = ds2780_read8(dev_info, &raw, DS2780_RARC_REG);
4673 if (ret < 0)
4674 return ret;
4675
4676 @@ -345,7 +357,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
4677 * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
4678 * LSB register
4679 */
4680 - ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
4681 + ret = ds2780_read16(dev_info, &charge_raw, DS2780_RAAC_MSB_REG);
4682 if (ret < 0)
4683 return ret;
4684
4685 @@ -356,7 +368,7 @@ static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
4686 static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
4687 u8 *control_reg)
4688 {
4689 - return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
4690 + return ds2780_read8(dev_info, control_reg, DS2780_CONTROL_REG);
4691 }
4692
4693 static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
4694 @@ -364,7 +376,7 @@ static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
4695 {
4696 int ret;
4697
4698 - ret = ds2780_write(dev_info->w1_dev, &control_reg,
4699 + ret = ds2780_write(dev_info, &control_reg,
4700 DS2780_CONTROL_REG, sizeof(u8));
4701 if (ret < 0)
4702 return ret;
4703 @@ -503,7 +515,7 @@ static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
4704 struct power_supply *psy = to_power_supply(dev);
4705 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
4706
4707 - ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
4708 + ret = ds2780_read8(dev_info, &sense_resistor, DS2780_RSNSP_REG);
4709 if (ret < 0)
4710 return ret;
4711
4712 @@ -584,7 +596,7 @@ static ssize_t ds2780_get_pio_pin(struct device *dev,
4713 struct power_supply *psy = to_power_supply(dev);
4714 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
4715
4716 - ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
4717 + ret = ds2780_read8(dev_info, &sfr, DS2780_SFR_REG);
4718 if (ret < 0)
4719 return ret;
4720
4721 @@ -611,7 +623,7 @@ static ssize_t ds2780_set_pio_pin(struct device *dev,
4722 return -EINVAL;
4723 }
4724
4725 - ret = ds2780_write(dev_info->w1_dev, &new_setting,
4726 + ret = ds2780_write(dev_info, &new_setting,
4727 DS2780_SFR_REG, sizeof(u8));
4728 if (ret < 0)
4729 return ret;
4730 @@ -632,7 +644,7 @@ static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
4731 DS2780_EEPROM_BLOCK1_END -
4732 DS2780_EEPROM_BLOCK1_START + 1 - off);
4733
4734 - return ds2780_read_block(dev_info->w1_dev, buf,
4735 + return ds2780_read_block(dev_info, buf,
4736 DS2780_EEPROM_BLOCK1_START + off, count);
4737 }
4738
4739 @@ -650,7 +662,7 @@ static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
4740 DS2780_EEPROM_BLOCK1_END -
4741 DS2780_EEPROM_BLOCK1_START + 1 - off);
4742
4743 - ret = ds2780_write(dev_info->w1_dev, buf,
4744 + ret = ds2780_write(dev_info, buf,
4745 DS2780_EEPROM_BLOCK1_START + off, count);
4746 if (ret < 0)
4747 return ret;
4748 @@ -685,9 +697,8 @@ static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
4749 DS2780_EEPROM_BLOCK0_END -
4750 DS2780_EEPROM_BLOCK0_START + 1 - off);
4751
4752 - return ds2780_read_block(dev_info->w1_dev, buf,
4753 + return ds2780_read_block(dev_info, buf,
4754 DS2780_EEPROM_BLOCK0_START + off, count);
4755 -
4756 }
4757
4758 static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
4759 @@ -704,7 +715,7 @@ static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
4760 DS2780_EEPROM_BLOCK0_END -
4761 DS2780_EEPROM_BLOCK0_START + 1 - off);
4762
4763 - ret = ds2780_write(dev_info->w1_dev, buf,
4764 + ret = ds2780_write(dev_info, buf,
4765 DS2780_EEPROM_BLOCK0_START + off, count);
4766 if (ret < 0)
4767 return ret;
4768 @@ -768,6 +779,7 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
4769 dev_info->bat.properties = ds2780_battery_props;
4770 dev_info->bat.num_properties = ARRAY_SIZE(ds2780_battery_props);
4771 dev_info->bat.get_property = ds2780_battery_get_property;
4772 + dev_info->mutex_holder = current;
4773
4774 ret = power_supply_register(&pdev->dev, &dev_info->bat);
4775 if (ret) {
4776 @@ -797,6 +809,8 @@ static int __devinit ds2780_battery_probe(struct platform_device *pdev)
4777 goto fail_remove_bin_file;
4778 }
4779
4780 + dev_info->mutex_holder = NULL;
4781 +
4782 return 0;
4783
4784 fail_remove_bin_file:
4785 @@ -816,6 +830,8 @@ static int __devexit ds2780_battery_remove(struct platform_device *pdev)
4786 {
4787 struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
4788
4789 + dev_info->mutex_holder = current;
4790 +
4791 /* remove attributes */
4792 sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
4793
4794 diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
4795 index 5c56741..cda9bd6 100644
4796 --- a/drivers/s390/cio/ccwgroup.c
4797 +++ b/drivers/s390/cio/ccwgroup.c
4798 @@ -87,6 +87,12 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
4799 }
4800 }
4801
4802 +static ssize_t ccwgroup_online_store(struct device *dev,
4803 + struct device_attribute *attr,
4804 + const char *buf, size_t count);
4805 +static ssize_t ccwgroup_online_show(struct device *dev,
4806 + struct device_attribute *attr,
4807 + char *buf);
4808 /*
4809 * Provide an 'ungroup' attribute so the user can remove group devices no
4810 * longer needed or accidentially created. Saves memory :)
4811 @@ -134,6 +140,20 @@ out:
4812 }
4813
4814 static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
4815 +static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
4816 +
4817 +static struct attribute *ccwgroup_attrs[] = {
4818 + &dev_attr_online.attr,
4819 + &dev_attr_ungroup.attr,
4820 + NULL,
4821 +};
4822 +static struct attribute_group ccwgroup_attr_group = {
4823 + .attrs = ccwgroup_attrs,
4824 +};
4825 +static const struct attribute_group *ccwgroup_attr_groups[] = {
4826 + &ccwgroup_attr_group,
4827 + NULL,
4828 +};
4829
4830 static void
4831 ccwgroup_release (struct device *dev)
4832 @@ -293,25 +313,17 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
4833 }
4834
4835 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
4836 -
4837 + gdev->dev.groups = ccwgroup_attr_groups;
4838 rc = device_add(&gdev->dev);
4839 if (rc)
4840 goto error;
4841 get_device(&gdev->dev);
4842 - rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
4843 -
4844 - if (rc) {
4845 - device_unregister(&gdev->dev);
4846 - goto error;
4847 - }
4848 -
4849 rc = __ccwgroup_create_symlinks(gdev);
4850 if (!rc) {
4851 mutex_unlock(&gdev->reg_mutex);
4852 put_device(&gdev->dev);
4853 return 0;
4854 }
4855 - device_remove_file(&gdev->dev, &dev_attr_ungroup);
4856 device_unregister(&gdev->dev);
4857 error:
4858 for (i = 0; i < num_devices; i++)
4859 @@ -423,7 +435,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
4860 int ret;
4861
4862 if (!dev->driver)
4863 - return -ENODEV;
4864 + return -EINVAL;
4865
4866 gdev = to_ccwgroupdev(dev);
4867 gdrv = to_ccwgroupdrv(dev->driver);
4868 @@ -456,8 +468,6 @@ ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *b
4869 return sprintf(buf, online ? "1\n" : "0\n");
4870 }
4871
4872 -static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
4873 -
4874 static int
4875 ccwgroup_probe (struct device *dev)
4876 {
4877 @@ -469,12 +479,7 @@ ccwgroup_probe (struct device *dev)
4878 gdev = to_ccwgroupdev(dev);
4879 gdrv = to_ccwgroupdrv(dev->driver);
4880
4881 - if ((ret = device_create_file(dev, &dev_attr_online)))
4882 - return ret;
4883 -
4884 ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
4885 - if (ret)
4886 - device_remove_file(dev, &dev_attr_online);
4887
4888 return ret;
4889 }
4890 @@ -485,9 +490,6 @@ ccwgroup_remove (struct device *dev)
4891 struct ccwgroup_device *gdev;
4892 struct ccwgroup_driver *gdrv;
4893
4894 - device_remove_file(dev, &dev_attr_online);
4895 - device_remove_file(dev, &dev_attr_ungroup);
4896 -
4897 if (!dev->driver)
4898 return 0;
4899
4900 diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
4901 index 0119b81..d973325 100644
4902 --- a/drivers/scsi/device_handler/scsi_dh.c
4903 +++ b/drivers/scsi/device_handler/scsi_dh.c
4904 @@ -398,7 +398,15 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
4905
4906 spin_lock_irqsave(q->queue_lock, flags);
4907 sdev = q->queuedata;
4908 - if (sdev && sdev->scsi_dh_data)
4909 + if (!sdev) {
4910 + spin_unlock_irqrestore(q->queue_lock, flags);
4911 + err = SCSI_DH_NOSYS;
4912 + if (fn)
4913 + fn(data, err);
4914 + return err;
4915 + }
4916 +
4917 + if (sdev->scsi_dh_data)
4918 scsi_dh = sdev->scsi_dh_data->scsi_dh;
4919 dev = get_device(&sdev->sdev_gendev);
4920 if (!scsi_dh || !dev ||
4921 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
4922 index 4f7a582..351dc0b 100644
4923 --- a/drivers/scsi/hosts.c
4924 +++ b/drivers/scsi/hosts.c
4925 @@ -286,6 +286,7 @@ static void scsi_host_dev_release(struct device *dev)
4926 {
4927 struct Scsi_Host *shost = dev_to_shost(dev);
4928 struct device *parent = dev->parent;
4929 + struct request_queue *q;
4930
4931 scsi_proc_hostdir_rm(shost->hostt);
4932
4933 @@ -293,9 +294,11 @@ static void scsi_host_dev_release(struct device *dev)
4934 kthread_stop(shost->ehandler);
4935 if (shost->work_q)
4936 destroy_workqueue(shost->work_q);
4937 - if (shost->uspace_req_q) {
4938 - kfree(shost->uspace_req_q->queuedata);
4939 - scsi_free_queue(shost->uspace_req_q);
4940 + q = shost->uspace_req_q;
4941 + if (q) {
4942 + kfree(q->queuedata);
4943 + q->queuedata = NULL;
4944 + scsi_free_queue(q);
4945 }
4946
4947 scsi_destroy_command_freelist(shost);
4948 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
4949 index b200b73..8c713d3 100644
4950 --- a/drivers/scsi/hpsa.c
4951 +++ b/drivers/scsi/hpsa.c
4952 @@ -3300,6 +3300,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
4953 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4954 pmcsr |= PCI_D0;
4955 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
4956 +
4957 + /*
4958 + * The P600 requires a small delay when changing states.
4959 + * Otherwise we may think the board did not reset and we bail.
4960 + * This for kdump only and is particular to the P600.
4961 + */
4962 + msleep(500);
4963 }
4964 return 0;
4965 }
4966 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
4967 index 8d63630..acbb924 100644
4968 --- a/drivers/scsi/ipr.c
4969 +++ b/drivers/scsi/ipr.c
4970 @@ -8812,7 +8812,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
4971 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
4972 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
4973 ioa_cfg->needs_hard_reset = 1;
4974 - if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
4975 + if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
4976 ioa_cfg->needs_hard_reset = 1;
4977 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
4978 ioa_cfg->ioa_unit_checked = 1;
4979 diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
4980 index d1de633..8efeb6b 100644
4981 --- a/drivers/scsi/isci/isci.h
4982 +++ b/drivers/scsi/isci/isci.h
4983 @@ -97,7 +97,7 @@
4984 #define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
4985
4986 #define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
4987 -#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
4988 +#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U)
4989 #define SCU_INVALID_FRAME_INDEX (0xFFFF)
4990
4991 #define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
4992 diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
4993 index 486b113..38a99d2 100644
4994 --- a/drivers/scsi/isci/port_config.c
4995 +++ b/drivers/scsi/isci/port_config.c
4996 @@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data)
4997 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
4998
4999 if (!configure_phy_mask)
5000 - return;
5001 + goto done;
5002
5003 for (index = 0; index < SCI_MAX_PHYS; index++) {
5004 if ((configure_phy_mask & (1 << index)) == 0)
5005 diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
5006 index b5d3a8c..225b196 100644
5007 --- a/drivers/scsi/isci/request.c
5008 +++ b/drivers/scsi/isci/request.c
5009 @@ -1490,29 +1490,30 @@ sci_io_request_frame_handler(struct isci_request *ireq,
5010 return SCI_SUCCESS;
5011
5012 case SCI_REQ_SMP_WAIT_RESP: {
5013 - struct smp_resp *rsp_hdr = &ireq->smp.rsp;
5014 - void *frame_header;
5015 + struct sas_task *task = isci_request_access_task(ireq);
5016 + struct scatterlist *sg = &task->smp_task.smp_resp;
5017 + void *frame_header, *kaddr;
5018 + u8 *rsp;
5019
5020 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
5021 - frame_index,
5022 - &frame_header);
5023 -
5024 - /* byte swap the header. */
5025 - word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
5026 - sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
5027 + frame_index,
5028 + &frame_header);
5029 + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
5030 + rsp = kaddr + sg->offset;
5031 + sci_swab32_cpy(rsp, frame_header, 1);
5032
5033 - if (rsp_hdr->frame_type == SMP_RESPONSE) {
5034 + if (rsp[0] == SMP_RESPONSE) {
5035 void *smp_resp;
5036
5037 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
5038 - frame_index,
5039 - &smp_resp);
5040 + frame_index,
5041 + &smp_resp);
5042
5043 - word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
5044 - sizeof(u32);
5045 -
5046 - sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
5047 - smp_resp, word_cnt);
5048 + word_cnt = (sg->length/4)-1;
5049 + if (word_cnt > 0)
5050 + word_cnt = min_t(unsigned int, word_cnt,
5051 + SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
5052 + sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
5053
5054 ireq->scu_status = SCU_TASK_DONE_GOOD;
5055 ireq->sci_status = SCI_SUCCESS;
5056 @@ -1528,12 +1529,13 @@ sci_io_request_frame_handler(struct isci_request *ireq,
5057 __func__,
5058 ireq,
5059 frame_index,
5060 - rsp_hdr->frame_type);
5061 + rsp[0]);
5062
5063 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
5064 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
5065 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
5066 }
5067 + kunmap_atomic(kaddr, KM_IRQ0);
5068
5069 sci_controller_release_frame(ihost, frame_index);
5070
5071 @@ -2603,18 +2605,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
5072 status = SAM_STAT_GOOD;
5073 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
5074
5075 - if (task->task_proto == SAS_PROTOCOL_SMP) {
5076 - void *rsp = &request->smp.rsp;
5077 -
5078 - dev_dbg(&ihost->pdev->dev,
5079 - "%s: SMP protocol completion\n",
5080 - __func__);
5081 -
5082 - sg_copy_from_buffer(
5083 - &task->smp_task.smp_resp, 1,
5084 - rsp, sizeof(struct smp_resp));
5085 - } else if (completion_status
5086 - == SCI_IO_SUCCESS_IO_DONE_EARLY) {
5087 + if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
5088
5089 /* This was an SSP / STP / SATA transfer.
5090 * There is a possibility that less data than
5091 diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
5092 index 7a1d5a9..58d70b6 100644
5093 --- a/drivers/scsi/isci/request.h
5094 +++ b/drivers/scsi/isci/request.h
5095 @@ -174,9 +174,6 @@ struct isci_request {
5096 };
5097 } ssp;
5098 struct {
5099 - struct smp_resp rsp;
5100 - } smp;
5101 - struct {
5102 struct isci_stp_request req;
5103 struct host_to_dev_fis cmd;
5104 struct dev_to_host_fis rsp;
5105 diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
5106 index 462b151..dc26b4a 100644
5107 --- a/drivers/scsi/isci/sas.h
5108 +++ b/drivers/scsi/isci/sas.h
5109 @@ -204,8 +204,6 @@ struct smp_req {
5110 u8 req_data[0];
5111 } __packed;
5112
5113 -#define SMP_RESP_HDR_SZ 4
5114 -
5115 /*
5116 * struct sci_sas_address - This structure depicts how a SAS address is
5117 * represented by SCI.
5118 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
5119 index 16ad97d..37cbe4d 100644
5120 --- a/drivers/scsi/libsas/sas_expander.c
5121 +++ b/drivers/scsi/libsas/sas_expander.c
5122 @@ -199,6 +199,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
5123 phy->virtual = dr->virtual;
5124 phy->last_da_index = -1;
5125
5126 + phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
5127 + phy->phy->identify.device_type = phy->attached_dev_type;
5128 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
5129 phy->phy->identify.target_port_protocols = phy->attached_tproto;
5130 phy->phy->identify.phy_identifier = phy_id;
5131 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5132 index 776d019..839ad7b 100644
5133 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
5134 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5135 @@ -1907,7 +1907,6 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
5136 static enum
5137 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
5138 {
5139 - struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
5140 struct megasas_instance *instance;
5141 unsigned long flags;
5142
5143 @@ -1916,7 +1915,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
5144 return BLK_EH_NOT_HANDLED;
5145 }
5146
5147 - instance = cmd->instance;
5148 + instance = (struct megasas_instance *)scmd->device->host->hostdata;
5149 if (!(instance->flag & MEGASAS_FW_BUSY)) {
5150 /* FW is busy, throttle IO */
5151 spin_lock_irqsave(instance->host->host_lock, flags);
5152 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
5153 index 6abd2fc..97aac82 100644
5154 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
5155 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
5156 @@ -7461,22 +7461,27 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
5157 /* SAS Device List */
5158 list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
5159 list) {
5160 - spin_lock_irqsave(&ioc->sas_device_lock, flags);
5161 - list_move_tail(&sas_device->list, &ioc->sas_device_list);
5162 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5163
5164 if (ioc->hide_drives)
5165 continue;
5166
5167 if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
5168 sas_device->sas_address_parent)) {
5169 - _scsih_sas_device_remove(ioc, sas_device);
5170 + list_del(&sas_device->list);
5171 + kfree(sas_device);
5172 + continue;
5173 } else if (!sas_device->starget) {
5174 mpt2sas_transport_port_remove(ioc,
5175 sas_device->sas_address,
5176 sas_device->sas_address_parent);
5177 - _scsih_sas_device_remove(ioc, sas_device);
5178 + list_del(&sas_device->list);
5179 + kfree(sas_device);
5180 + continue;
5181 +
5182 }
5183 + spin_lock_irqsave(&ioc->sas_device_lock, flags);
5184 + list_move_tail(&sas_device->list, &ioc->sas_device_list);
5185 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5186 }
5187 }
5188
5189 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5190 index fc3f168..b4d43ae 100644
5191 --- a/drivers/scsi/scsi_lib.c
5192 +++ b/drivers/scsi/scsi_lib.c
5193 @@ -1698,6 +1698,15 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
5194
5195 void scsi_free_queue(struct request_queue *q)
5196 {
5197 + unsigned long flags;
5198 +
5199 + WARN_ON(q->queuedata);
5200 +
5201 + /* cause scsi_request_fn() to kill all non-finished requests */
5202 + spin_lock_irqsave(q->queue_lock, flags);
5203 + q->request_fn(q);
5204 + spin_unlock_irqrestore(q->queue_lock, flags);
5205 +
5206 blk_cleanup_queue(q);
5207 }
5208
5209 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
5210 index 44e8ca3..72273a0 100644
5211 --- a/drivers/scsi/scsi_scan.c
5212 +++ b/drivers/scsi/scsi_scan.c
5213 @@ -322,6 +322,7 @@ out_device_destroy:
5214 scsi_device_set_state(sdev, SDEV_DEL);
5215 transport_destroy_device(&sdev->sdev_gendev);
5216 put_device(&sdev->sdev_dev);
5217 + scsi_free_queue(sdev->request_queue);
5218 put_device(&sdev->sdev_gendev);
5219 out:
5220 if (display_failure_msg)
5221 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
5222 index 1871b8a..9b28f39 100644
5223 --- a/drivers/scsi/st.c
5224 +++ b/drivers/scsi/st.c
5225 @@ -462,14 +462,16 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
5226 {
5227 struct st_request *SRpnt = req->end_io_data;
5228 struct scsi_tape *STp = SRpnt->stp;
5229 + struct bio *tmp;
5230
5231 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
5232 STp->buffer->cmdstat.residual = req->resid_len;
5233
5234 + tmp = SRpnt->bio;
5235 if (SRpnt->waiting)
5236 complete(SRpnt->waiting);
5237
5238 - blk_rq_unmap_user(SRpnt->bio);
5239 + blk_rq_unmap_user(tmp);
5240 __blk_put_request(req->q, req);
5241 }
5242
5243 diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
5244 index fde3a2d..322be7a 100644
5245 --- a/drivers/spi/spi-omap2-mcspi.c
5246 +++ b/drivers/spi/spi-omap2-mcspi.c
5247 @@ -1116,15 +1116,16 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
5248 status = -ENODEV;
5249 goto err1;
5250 }
5251 +
5252 + r->start += pdata->regs_offset;
5253 + r->end += pdata->regs_offset;
5254 + mcspi->phys = r->start;
5255 if (!request_mem_region(r->start, resource_size(r),
5256 dev_name(&pdev->dev))) {
5257 status = -EBUSY;
5258 goto err1;
5259 }
5260
5261 - r->start += pdata->regs_offset;
5262 - r->end += pdata->regs_offset;
5263 - mcspi->phys = r->start;
5264 mcspi->base = ioremap(r->start, resource_size(r));
5265 if (!mcspi->base) {
5266 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
5267 diff --git a/drivers/staging/hv/hyperv_storage.h b/drivers/staging/hv/hyperv_storage.h
5268 index a01f9a0..5af82f4 100644
5269 --- a/drivers/staging/hv/hyperv_storage.h
5270 +++ b/drivers/staging/hv/hyperv_storage.h
5271 @@ -218,6 +218,7 @@ struct vstor_packet {
5272 #define STORVSC_MAX_LUNS_PER_TARGET 64
5273 #define STORVSC_MAX_TARGETS 1
5274 #define STORVSC_MAX_CHANNELS 1
5275 +#define STORVSC_MAX_CMD_LEN 16
5276
5277 struct hv_storvsc_request;
5278
5279 diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
5280 index 61989f0..88d5193 100644
5281 --- a/drivers/staging/hv/netvsc_drv.c
5282 +++ b/drivers/staging/hv/netvsc_drv.c
5283 @@ -217,8 +217,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
5284 if (status == 1) {
5285 netif_carrier_on(net);
5286 netif_wake_queue(net);
5287 - netif_notify_peers(net);
5288 ndev_ctx = netdev_priv(net);
5289 + schedule_delayed_work(&ndev_ctx->dwork, 0);
5290 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
5291 } else {
5292 netif_carrier_off(net);
5293 diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
5294 index 7effaf3..26983ac 100644
5295 --- a/drivers/staging/hv/storvsc_drv.c
5296 +++ b/drivers/staging/hv/storvsc_drv.c
5297 @@ -701,6 +701,8 @@ static int storvsc_probe(struct hv_device *device)
5298 host->max_id = STORVSC_MAX_TARGETS;
5299 /* max # of channels */
5300 host->max_channel = STORVSC_MAX_CHANNELS - 1;
5301 + /* max cmd length */
5302 + host->max_cmd_len = STORVSC_MAX_CMD_LEN;
5303
5304 /* Register the HBA and start the scsi bus scan */
5305 ret = scsi_add_host(host, &device->device);
5306 diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
5307 index ca098ca..02fafec 100644
5308 --- a/drivers/staging/quatech_usb2/quatech_usb2.c
5309 +++ b/drivers/staging/quatech_usb2/quatech_usb2.c
5310 @@ -916,9 +916,10 @@ static int qt2_ioctl(struct tty_struct *tty,
5311 dbg("%s() port %d, cmd == TIOCMIWAIT enter",
5312 __func__, port->number);
5313 prev_msr_value = port_extra->shadowMSR & QT2_SERIAL_MSR_MASK;
5314 + barrier();
5315 + __set_current_state(TASK_INTERRUPTIBLE);
5316 while (1) {
5317 add_wait_queue(&port_extra->wait, &wait);
5318 - set_current_state(TASK_INTERRUPTIBLE);
5319 schedule();
5320 dbg("%s(): port %d, cmd == TIOCMIWAIT here\n",
5321 __func__, port->number);
5322 @@ -926,9 +927,12 @@ static int qt2_ioctl(struct tty_struct *tty,
5323 /* see if a signal woke us up */
5324 if (signal_pending(current))
5325 return -ERESTARTSYS;
5326 + set_current_state(TASK_INTERRUPTIBLE);
5327 msr_value = port_extra->shadowMSR & QT2_SERIAL_MSR_MASK;
5328 - if (msr_value == prev_msr_value)
5329 + if (msr_value == prev_msr_value) {
5330 + __set_current_state(TASK_RUNNING);
5331 return -EIO; /* no change - error */
5332 + }
5333 if ((arg & TIOCM_RNG &&
5334 ((prev_msr_value & QT2_SERIAL_MSR_RI) ==
5335 (msr_value & QT2_SERIAL_MSR_RI))) ||
5336 @@ -941,6 +945,7 @@ static int qt2_ioctl(struct tty_struct *tty,
5337 (arg & TIOCM_CTS &&
5338 ((prev_msr_value & QT2_SERIAL_MSR_CTS) ==
5339 (msr_value & QT2_SERIAL_MSR_CTS)))) {
5340 + __set_current_state(TASK_RUNNING);
5341 return 0;
5342 }
5343 } /* end inifinite while */
5344 diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
5345 index 12f5eba..48aa61e 100644
5346 --- a/drivers/staging/serqt_usb2/serqt_usb2.c
5347 +++ b/drivers/staging/serqt_usb2/serqt_usb2.c
5348 @@ -24,7 +24,6 @@ static int debug;
5349 #define DRIVER_DESC "Quatech USB to Serial Driver"
5350
5351 #define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
5352 -#define QUATECH_SSU100 0xC020 /* SSU100 */
5353 #define QUATECH_SSU200 0xC030 /* SSU200 */
5354 #define QUATECH_DSU100 0xC040 /* DSU100 */
5355 #define QUATECH_DSU200 0xC050 /* DSU200 */
5356 @@ -127,7 +126,6 @@ static int debug;
5357 #define RS232_MODE 0x00
5358
5359 static const struct usb_device_id serqt_id_table[] = {
5360 - {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100)},
5361 {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU200)},
5362 {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100)},
5363 {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU200)},
5364 @@ -775,7 +773,6 @@ static int qt_startup(struct usb_serial *serial)
5365 }
5366
5367 switch (serial->dev->descriptor.idProduct) {
5368 - case QUATECH_SSU100:
5369 case QUATECH_DSU100:
5370 case QUATECH_QSU100:
5371 case QUATECH_ESU100A:
5372 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
5373 index 074ac42..be21617 100644
5374 --- a/drivers/staging/usbip/usbip_common.h
5375 +++ b/drivers/staging/usbip/usbip_common.h
5376 @@ -126,12 +126,12 @@ extern struct device_attribute dev_attr_usbip_debug;
5377 *
5378 */
5379 #define USBIP_CMD_SUBMIT 0x0001
5380 -#define USBIP_RET_SUBMIT 0x0002
5381 -#define USBIP_CMD_UNLINK 0x0003
5382 +#define USBIP_CMD_UNLINK 0x0002
5383 +#define USBIP_RET_SUBMIT 0x0003
5384 #define USBIP_RET_UNLINK 0x0004
5385
5386 -#define USBIP_DIR_IN 0x00
5387 -#define USBIP_DIR_OUT 0x01
5388 +#define USBIP_DIR_OUT 0x00
5389 +#define USBIP_DIR_IN 0x01
5390
5391 /**
5392 * struct usbip_header_basic - data pertinent to every request
5393 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
5394 index 6a4ea29..26a5d8b 100644
5395 --- a/drivers/target/iscsi/iscsi_target.c
5396 +++ b/drivers/target/iscsi/iscsi_target.c
5397 @@ -1079,7 +1079,9 @@ attach_cmd:
5398 */
5399 if (!cmd->immediate_data) {
5400 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
5401 - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
5402 + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
5403 + return 0;
5404 + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
5405 return iscsit_add_reject_from_cmd(
5406 ISCSI_REASON_PROTOCOL_ERROR,
5407 1, 0, buf, cmd);
5408 @@ -1819,17 +1821,16 @@ attach:
5409 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
5410 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
5411 out_of_order_cmdsn = 1;
5412 - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
5413 + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
5414 return 0;
5415 - } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
5416 + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
5417 return iscsit_add_reject_from_cmd(
5418 ISCSI_REASON_PROTOCOL_ERROR,
5419 1, 0, buf, cmd);
5420 - }
5421 }
5422 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
5423
5424 - if (out_of_order_cmdsn)
5425 + if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
5426 return 0;
5427 /*
5428 * Found the referenced task, send to transport for processing.
5429 diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
5430 index aa2d6799..fb85b35 100644
5431 --- a/drivers/target/loopback/tcm_loop.c
5432 +++ b/drivers/target/loopback/tcm_loop.c
5433 @@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
5434 sgl_bidi = sdb->table.sgl;
5435 sgl_bidi_count = sdb->table.nents;
5436 }
5437 + /*
5438 + * Because some userspace code via scsi-generic do not memset their
5439 + * associated read buffers, go ahead and do that here for type
5440 + * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
5441 + * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
5442 + * by target core in transport_generic_allocate_tasks() ->
5443 + * transport_generic_cmd_sequencer().
5444 + */
5445 + if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
5446 + se_cmd->data_direction == DMA_FROM_DEVICE) {
5447 + struct scatterlist *sg = scsi_sglist(sc);
5448 + unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
5449 +
5450 + if (buf != NULL) {
5451 + memset(buf, 0, sg->length);
5452 + kunmap(sg_page(sg));
5453 + }
5454 + }
5455
5456 /* Tell the core about our preallocated memory */
5457 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
5458 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
5459 index 98c98a3..8badcb4 100644
5460 --- a/drivers/target/target_core_alua.c
5461 +++ b/drivers/target/target_core_alua.c
5462 @@ -68,6 +68,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
5463 unsigned char *buf;
5464 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
5465 Target port group descriptor */
5466 + /*
5467 + * Need at least 4 bytes of response data or else we can't
5468 + * even fit the return data length.
5469 + */
5470 + if (cmd->data_length < 4) {
5471 + pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
5472 + " too small\n", cmd->data_length);
5473 + return -EINVAL;
5474 + }
5475
5476 buf = transport_kmap_first_data_page(cmd);
5477
5478 @@ -75,6 +84,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
5479 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
5480 tg_pt_gp_list) {
5481 /*
5482 + * Check if the Target port group and Target port descriptor list
5483 + * based on tg_pt_gp_members count will fit into the response payload.
5484 + * Otherwise, bump rd_len to let the initiator know we have exceeded
5485 + * the allocation length and the response is truncated.
5486 + */
5487 + if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
5488 + cmd->data_length) {
5489 + rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
5490 + continue;
5491 + }
5492 + /*
5493 * PREF: Preferred target port bit, determine if this
5494 * bit should be set for port group.
5495 */
5496 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
5497 index 27d4925..5c1b8c5 100644
5498 --- a/drivers/target/target_core_tmr.c
5499 +++ b/drivers/target/target_core_tmr.c
5500 @@ -67,15 +67,16 @@ void core_tmr_release_req(
5501 struct se_tmr_req *tmr)
5502 {
5503 struct se_device *dev = tmr->tmr_dev;
5504 + unsigned long flags;
5505
5506 if (!dev) {
5507 kmem_cache_free(se_tmr_req_cache, tmr);
5508 return;
5509 }
5510
5511 - spin_lock_irq(&dev->se_tmr_lock);
5512 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
5513 list_del(&tmr->tmr_list);
5514 - spin_unlock_irq(&dev->se_tmr_lock);
5515 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
5516
5517 kmem_cache_free(se_tmr_req_cache, tmr);
5518 }
5519 @@ -100,54 +101,20 @@ static void core_tmr_handle_tas_abort(
5520 transport_cmd_finish_abort(cmd, 0);
5521 }
5522
5523 -int core_tmr_lun_reset(
5524 +static void core_tmr_drain_tmr_list(
5525 struct se_device *dev,
5526 struct se_tmr_req *tmr,
5527 - struct list_head *preempt_and_abort_list,
5528 - struct se_cmd *prout_cmd)
5529 + struct list_head *preempt_and_abort_list)
5530 {
5531 - struct se_cmd *cmd, *tcmd;
5532 - struct se_node_acl *tmr_nacl = NULL;
5533 - struct se_portal_group *tmr_tpg = NULL;
5534 - struct se_queue_obj *qobj = &dev->dev_queue_obj;
5535 + LIST_HEAD(drain_tmr_list);
5536 struct se_tmr_req *tmr_p, *tmr_pp;
5537 - struct se_task *task, *task_tmp;
5538 + struct se_cmd *cmd;
5539 unsigned long flags;
5540 - int fe_count, tas;
5541 - /*
5542 - * TASK_ABORTED status bit, this is configurable via ConfigFS
5543 - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
5544 - *
5545 - * A task aborted status (TAS) bit set to zero specifies that aborted
5546 - * tasks shall be terminated by the device server without any response
5547 - * to the application client. A TAS bit set to one specifies that tasks
5548 - * aborted by the actions of an I_T nexus other than the I_T nexus on
5549 - * which the command was received shall be completed with TASK ABORTED
5550 - * status (see SAM-4).
5551 - */
5552 - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
5553 - /*
5554 - * Determine if this se_tmr is coming from a $FABRIC_MOD
5555 - * or struct se_device passthrough..
5556 - */
5557 - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
5558 - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
5559 - tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
5560 - if (tmr_nacl && tmr_tpg) {
5561 - pr_debug("LUN_RESET: TMR caller fabric: %s"
5562 - " initiator port %s\n",
5563 - tmr_tpg->se_tpg_tfo->get_fabric_name(),
5564 - tmr_nacl->initiatorname);
5565 - }
5566 - }
5567 - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
5568 - (preempt_and_abort_list) ? "Preempt" : "TMR",
5569 - dev->transport->name, tas);
5570 /*
5571 * Release all pending and outgoing TMRs aside from the received
5572 * LUN_RESET tmr..
5573 */
5574 - spin_lock_irq(&dev->se_tmr_lock);
5575 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
5576 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
5577 /*
5578 * Allow the received TMR to return with FUNCTION_COMPLETE.
5579 @@ -169,29 +136,48 @@ int core_tmr_lun_reset(
5580 (core_scsi3_check_cdb_abort_and_preempt(
5581 preempt_and_abort_list, cmd) != 0))
5582 continue;
5583 - spin_unlock_irq(&dev->se_tmr_lock);
5584
5585 - spin_lock_irqsave(&cmd->t_state_lock, flags);
5586 + spin_lock(&cmd->t_state_lock);
5587 if (!atomic_read(&cmd->t_transport_active)) {
5588 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5589 - spin_lock_irq(&dev->se_tmr_lock);
5590 + spin_unlock(&cmd->t_state_lock);
5591 continue;
5592 }
5593 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
5594 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5595 - spin_lock_irq(&dev->se_tmr_lock);
5596 + spin_unlock(&cmd->t_state_lock);
5597 continue;
5598 }
5599 + spin_unlock(&cmd->t_state_lock);
5600 +
5601 + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
5602 + }
5603 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
5604 +
5605 + while (!list_empty(&drain_tmr_list)) {
5606 + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
5607 + list_del(&tmr->tmr_list);
5608 + cmd = tmr->task_cmd;
5609 +
5610 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
5611 " Response: 0x%02x, t_state: %d\n",
5612 - (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
5613 - tmr_p->function, tmr_p->response, cmd->t_state);
5614 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5615 + (preempt_and_abort_list) ? "Preempt" : "", tmr,
5616 + tmr->function, tmr->response, cmd->t_state);
5617
5618 transport_cmd_finish_abort_tmr(cmd);
5619 - spin_lock_irq(&dev->se_tmr_lock);
5620 }
5621 - spin_unlock_irq(&dev->se_tmr_lock);
5622 +}
5623 +
5624 +static void core_tmr_drain_task_list(
5625 + struct se_device *dev,
5626 + struct se_cmd *prout_cmd,
5627 + struct se_node_acl *tmr_nacl,
5628 + int tas,
5629 + struct list_head *preempt_and_abort_list)
5630 +{
5631 + LIST_HEAD(drain_task_list);
5632 + struct se_cmd *cmd;
5633 + struct se_task *task, *task_tmp;
5634 + unsigned long flags;
5635 + int fe_count;
5636 /*
5637 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
5638 * This is following sam4r17, section 5.6 Aborting commands, Table 38
5639 @@ -236,9 +222,23 @@ int core_tmr_lun_reset(
5640 if (prout_cmd == cmd)
5641 continue;
5642
5643 - list_del(&task->t_state_list);
5644 + list_move_tail(&task->t_state_list, &drain_task_list);
5645 atomic_set(&task->task_state_active, 0);
5646 - spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5647 + /*
5648 + * Remove from task execute list before processing drain_task_list
5649 + */
5650 + if (atomic_read(&task->task_execute_queue) != 0) {
5651 + list_del(&task->t_execute_list);
5652 + atomic_set(&task->task_execute_queue, 0);
5653 + atomic_dec(&dev->execute_tasks);
5654 + }
5655 + }
5656 + spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5657 +
5658 + while (!list_empty(&drain_task_list)) {
5659 + task = list_entry(drain_task_list.next, struct se_task, t_state_list);
5660 + list_del(&task->t_state_list);
5661 + cmd = task->task_se_cmd;
5662
5663 spin_lock_irqsave(&cmd->t_state_lock, flags);
5664 pr_debug("LUN_RESET: %s cmd: %p task: %p"
5665 @@ -275,20 +275,14 @@ int core_tmr_lun_reset(
5666
5667 atomic_set(&task->task_active, 0);
5668 atomic_set(&task->task_stop, 0);
5669 - } else {
5670 - if (atomic_read(&task->task_execute_queue) != 0)
5671 - transport_remove_task_from_execute_queue(task, dev);
5672 }
5673 __transport_stop_task_timer(task, &flags);
5674
5675 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5676 - spin_unlock_irqrestore(
5677 - &cmd->t_state_lock, flags);
5678 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5679 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
5680 " t_task_cdbs_ex_left: %d\n", task, dev,
5681 atomic_read(&cmd->t_task_cdbs_ex_left));
5682 -
5683 - spin_lock_irqsave(&dev->execute_task_lock, flags);
5684 continue;
5685 }
5686 fe_count = atomic_read(&cmd->t_fe_count);
5687 @@ -298,22 +292,31 @@ int core_tmr_lun_reset(
5688 " task: %p, t_fe_count: %d dev: %p\n", task,
5689 fe_count, dev);
5690 atomic_set(&cmd->t_transport_aborted, 1);
5691 - spin_unlock_irqrestore(&cmd->t_state_lock,
5692 - flags);
5693 - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
5694 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5695
5696 - spin_lock_irqsave(&dev->execute_task_lock, flags);
5697 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
5698 continue;
5699 }
5700 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
5701 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
5702 atomic_set(&cmd->t_transport_aborted, 1);
5703 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5704 - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
5705
5706 - spin_lock_irqsave(&dev->execute_task_lock, flags);
5707 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
5708 }
5709 - spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5710 +}
5711 +
5712 +static void core_tmr_drain_cmd_list(
5713 + struct se_device *dev,
5714 + struct se_cmd *prout_cmd,
5715 + struct se_node_acl *tmr_nacl,
5716 + int tas,
5717 + struct list_head *preempt_and_abort_list)
5718 +{
5719 + LIST_HEAD(drain_cmd_list);
5720 + struct se_queue_obj *qobj = &dev->dev_queue_obj;
5721 + struct se_cmd *cmd, *tcmd;
5722 + unsigned long flags;
5723 /*
5724 * Release all commands remaining in the struct se_device cmd queue.
5725 *
5726 @@ -337,11 +340,26 @@ int core_tmr_lun_reset(
5727 */
5728 if (prout_cmd == cmd)
5729 continue;
5730 + /*
5731 + * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
5732 + * HW target mode fabrics.
5733 + */
5734 + spin_lock(&cmd->t_state_lock);
5735 + if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
5736 + spin_unlock(&cmd->t_state_lock);
5737 + continue;
5738 + }
5739 + spin_unlock(&cmd->t_state_lock);
5740
5741 - atomic_dec(&cmd->t_transport_queue_active);
5742 + atomic_set(&cmd->t_transport_queue_active, 0);
5743 atomic_dec(&qobj->queue_cnt);
5744 - list_del(&cmd->se_queue_node);
5745 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5746 + list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
5747 + }
5748 + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5749 +
5750 + while (!list_empty(&drain_cmd_list)) {
5751 + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
5752 + list_del_init(&cmd->se_queue_node);
5753
5754 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
5755 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
5756 @@ -354,9 +372,53 @@ int core_tmr_lun_reset(
5757
5758 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
5759 atomic_read(&cmd->t_fe_count));
5760 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
5761 }
5762 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5763 +}
5764 +
5765 +int core_tmr_lun_reset(
5766 + struct se_device *dev,
5767 + struct se_tmr_req *tmr,
5768 + struct list_head *preempt_and_abort_list,
5769 + struct se_cmd *prout_cmd)
5770 +{
5771 + struct se_node_acl *tmr_nacl = NULL;
5772 + struct se_portal_group *tmr_tpg = NULL;
5773 + int tas;
5774 + /*
5775 + * TASK_ABORTED status bit, this is configurable via ConfigFS
5776 + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
5777 + *
5778 + * A task aborted status (TAS) bit set to zero specifies that aborted
5779 + * tasks shall be terminated by the device server without any response
5780 + * to the application client. A TAS bit set to one specifies that tasks
5781 + * aborted by the actions of an I_T nexus other than the I_T nexus on
5782 + * which the command was received shall be completed with TASK ABORTED
5783 + * status (see SAM-4).
5784 + */
5785 + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
5786 + /*
5787 + * Determine if this se_tmr is coming from a $FABRIC_MOD
5788 + * or struct se_device passthrough..
5789 + */
5790 + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
5791 + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
5792 + tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
5793 + if (tmr_nacl && tmr_tpg) {
5794 + pr_debug("LUN_RESET: TMR caller fabric: %s"
5795 + " initiator port %s\n",
5796 + tmr_tpg->se_tpg_tfo->get_fabric_name(),
5797 + tmr_nacl->initiatorname);
5798 + }
5799 + }
5800 + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
5801 + (preempt_and_abort_list) ? "Preempt" : "TMR",
5802 + dev->transport->name, tas);
5803 +
5804 + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
5805 + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
5806 + preempt_and_abort_list);
5807 + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
5808 + preempt_and_abort_list);
5809 /*
5810 * Clear any legacy SPC-2 reservation when called during
5811 * LOGICAL UNIT RESET
5812 @@ -379,3 +441,4 @@ int core_tmr_lun_reset(
5813 dev->transport->name);
5814 return 0;
5815 }
5816 +
5817 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5818 index a4b0a8d..013c100 100644
5819 --- a/drivers/target/target_core_transport.c
5820 +++ b/drivers/target/target_core_transport.c
5821 @@ -594,13 +594,14 @@ check_lun:
5822
5823 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
5824 {
5825 - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
5826 transport_lun_remove_cmd(cmd);
5827
5828 if (transport_cmd_check_stop_to_fabric(cmd))
5829 return;
5830 - if (remove)
5831 + if (remove) {
5832 + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
5833 transport_generic_remove(cmd, 0);
5834 + }
5835 }
5836
5837 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
5838 @@ -621,8 +622,6 @@ static void transport_add_cmd_to_queue(
5839 struct se_queue_obj *qobj = &dev->dev_queue_obj;
5840 unsigned long flags;
5841
5842 - INIT_LIST_HEAD(&cmd->se_queue_node);
5843 -
5844 if (t_state) {
5845 spin_lock_irqsave(&cmd->t_state_lock, flags);
5846 cmd->t_state = t_state;
5847 @@ -631,15 +630,21 @@ static void transport_add_cmd_to_queue(
5848 }
5849
5850 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
5851 +
5852 + /* If the cmd is already on the list, remove it before we add it */
5853 + if (!list_empty(&cmd->se_queue_node))
5854 + list_del(&cmd->se_queue_node);
5855 + else
5856 + atomic_inc(&qobj->queue_cnt);
5857 +
5858 if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
5859 cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
5860 list_add(&cmd->se_queue_node, &qobj->qobj_list);
5861 } else
5862 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
5863 - atomic_inc(&cmd->t_transport_queue_active);
5864 + atomic_set(&cmd->t_transport_queue_active, 1);
5865 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5866
5867 - atomic_inc(&qobj->queue_cnt);
5868 wake_up_interruptible(&qobj->thread_wq);
5869 }
5870
5871 @@ -656,9 +661,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
5872 }
5873 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
5874
5875 - atomic_dec(&cmd->t_transport_queue_active);
5876 + atomic_set(&cmd->t_transport_queue_active, 0);
5877
5878 - list_del(&cmd->se_queue_node);
5879 + list_del_init(&cmd->se_queue_node);
5880 atomic_dec(&qobj->queue_cnt);
5881 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5882
5883 @@ -668,7 +673,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
5884 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
5885 struct se_queue_obj *qobj)
5886 {
5887 - struct se_cmd *t;
5888 unsigned long flags;
5889
5890 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
5891 @@ -676,14 +680,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
5892 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5893 return;
5894 }
5895 -
5896 - list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
5897 - if (t == cmd) {
5898 - atomic_dec(&cmd->t_transport_queue_active);
5899 - atomic_dec(&qobj->queue_cnt);
5900 - list_del(&cmd->se_queue_node);
5901 - break;
5902 - }
5903 + atomic_set(&cmd->t_transport_queue_active, 0);
5904 + atomic_dec(&qobj->queue_cnt);
5905 + list_del_init(&cmd->se_queue_node);
5906 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
5907
5908 if (atomic_read(&cmd->t_transport_queue_active)) {
5909 @@ -1067,7 +1066,7 @@ static void transport_release_all_cmds(struct se_device *dev)
5910 list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
5911 se_queue_node) {
5912 t_state = cmd->t_state;
5913 - list_del(&cmd->se_queue_node);
5914 + list_del_init(&cmd->se_queue_node);
5915 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
5916 flags);
5917
5918 @@ -1598,6 +1597,7 @@ void transport_init_se_cmd(
5919 INIT_LIST_HEAD(&cmd->se_delayed_node);
5920 INIT_LIST_HEAD(&cmd->se_ordered_node);
5921 INIT_LIST_HEAD(&cmd->se_qf_node);
5922 + INIT_LIST_HEAD(&cmd->se_queue_node);
5923
5924 INIT_LIST_HEAD(&cmd->t_task_list);
5925 init_completion(&cmd->transport_lun_fe_stop_comp);
5926 @@ -4920,6 +4920,15 @@ EXPORT_SYMBOL(transport_check_aborted_status);
5927
5928 void transport_send_task_abort(struct se_cmd *cmd)
5929 {
5930 + unsigned long flags;
5931 +
5932 + spin_lock_irqsave(&cmd->t_state_lock, flags);
5933 + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5934 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5935 + return;
5936 + }
5937 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
5938 +
5939 /*
5940 * If there are still expected incoming fabric WRITEs, we wait
5941 * until until they have completed before sending a TASK_ABORTED
5942 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
5943 index e809e9d..e18604b 100644
5944 --- a/drivers/tty/pty.c
5945 +++ b/drivers/tty/pty.c
5946 @@ -670,12 +670,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
5947
5948 nonseekable_open(inode, filp);
5949
5950 + retval = tty_alloc_file(filp);
5951 + if (retval)
5952 + return retval;
5953 +
5954 /* find a device that is not in use. */
5955 tty_lock();
5956 index = devpts_new_index(inode);
5957 tty_unlock();
5958 - if (index < 0)
5959 - return index;
5960 + if (index < 0) {
5961 + retval = index;
5962 + goto err_file;
5963 + }
5964
5965 mutex_lock(&tty_mutex);
5966 tty_lock();
5967 @@ -689,27 +695,27 @@ static int ptmx_open(struct inode *inode, struct file *filp)
5968
5969 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
5970
5971 - retval = tty_add_file(tty, filp);
5972 - if (retval)
5973 - goto out;
5974 + tty_add_file(tty, filp);
5975
5976 retval = devpts_pty_new(inode, tty->link);
5977 if (retval)
5978 - goto out1;
5979 + goto err_release;
5980
5981 retval = ptm_driver->ops->open(tty, filp);
5982 if (retval)
5983 - goto out2;
5984 -out1:
5985 + goto err_release;
5986 +
5987 tty_unlock();
5988 - return retval;
5989 -out2:
5990 + return 0;
5991 +err_release:
5992 tty_unlock();
5993 tty_release(inode, filp);
5994 return retval;
5995 out:
5996 devpts_kill_index(inode, index);
5997 tty_unlock();
5998 +err_file:
5999 + tty_free_file(filp);
6000 return retval;
6001 }
6002
6003 diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
6004 index b704c8c..5b837e7 100644
6005 --- a/drivers/tty/serial/jsm/jsm.h
6006 +++ b/drivers/tty/serial/jsm/jsm.h
6007 @@ -183,10 +183,8 @@ struct jsm_board
6008 /* Our Read/Error/Write queue sizes */
6009 #define RQUEUEMASK 0x1FFF /* 8 K - 1 */
6010 #define EQUEUEMASK 0x1FFF /* 8 K - 1 */
6011 -#define WQUEUEMASK 0x0FFF /* 4 K - 1 */
6012 #define RQUEUESIZE (RQUEUEMASK + 1)
6013 #define EQUEUESIZE RQUEUESIZE
6014 -#define WQUEUESIZE (WQUEUEMASK + 1)
6015
6016
6017 /************************************************************************
6018 @@ -226,10 +224,6 @@ struct jsm_channel {
6019 u16 ch_e_head; /* Head location of the error queue */
6020 u16 ch_e_tail; /* Tail location of the error queue */
6021
6022 - u8 *ch_wqueue; /* Our write queue buffer - malloc'ed */
6023 - u16 ch_w_head; /* Head location of the write queue */
6024 - u16 ch_w_tail; /* Tail location of the write queue */
6025 -
6026 u64 ch_rxcount; /* total of data received so far */
6027 u64 ch_txcount; /* total of data transmitted so far */
6028
6029 @@ -378,7 +372,6 @@ extern int jsm_debug;
6030 * Prototypes for non-static functions used in more than one module
6031 *
6032 *************************************************************************/
6033 -int jsm_tty_write(struct uart_port *port);
6034 int jsm_tty_init(struct jsm_board *);
6035 int jsm_uart_port_init(struct jsm_board *);
6036 int jsm_remove_uart_port(struct jsm_board *);
6037 diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
6038 index 96da178..2aaafa9 100644
6039 --- a/drivers/tty/serial/jsm/jsm_driver.c
6040 +++ b/drivers/tty/serial/jsm/jsm_driver.c
6041 @@ -211,7 +211,6 @@ static void __devexit jsm_remove_one(struct pci_dev *pdev)
6042 if (brd->channels[i]) {
6043 kfree(brd->channels[i]->ch_rqueue);
6044 kfree(brd->channels[i]->ch_equeue);
6045 - kfree(brd->channels[i]->ch_wqueue);
6046 kfree(brd->channels[i]);
6047 }
6048 }
6049 diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
6050 index 4538c3e..bd6e846 100644
6051 --- a/drivers/tty/serial/jsm/jsm_neo.c
6052 +++ b/drivers/tty/serial/jsm/jsm_neo.c
6053 @@ -496,12 +496,15 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
6054 int s;
6055 int qlen;
6056 u32 len_written = 0;
6057 + struct circ_buf *circ;
6058
6059 if (!ch)
6060 return;
6061
6062 + circ = &ch->uart_port.state->xmit;
6063 +
6064 /* No data to write to the UART */
6065 - if (ch->ch_w_tail == ch->ch_w_head)
6066 + if (uart_circ_empty(circ))
6067 return;
6068
6069 /* If port is "stopped", don't send any data to the UART */
6070 @@ -517,11 +520,10 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
6071 if (ch->ch_cached_lsr & UART_LSR_THRE) {
6072 ch->ch_cached_lsr &= ~(UART_LSR_THRE);
6073
6074 - writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
6075 + writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx);
6076 jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev,
6077 - "Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]);
6078 - ch->ch_w_tail++;
6079 - ch->ch_w_tail &= WQUEUEMASK;
6080 + "Tx data: %x\n", circ->buf[circ->head]);
6081 + circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1);
6082 ch->ch_txcount++;
6083 }
6084 return;
6085 @@ -536,36 +538,36 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
6086 n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
6087
6088 /* cache head and tail of queue */
6089 - head = ch->ch_w_head & WQUEUEMASK;
6090 - tail = ch->ch_w_tail & WQUEUEMASK;
6091 - qlen = (head - tail) & WQUEUEMASK;
6092 + head = circ->head & (UART_XMIT_SIZE - 1);
6093 + tail = circ->tail & (UART_XMIT_SIZE - 1);
6094 + qlen = uart_circ_chars_pending(circ);
6095
6096 /* Find minimum of the FIFO space, versus queue length */
6097 n = min(n, qlen);
6098
6099 while (n > 0) {
6100
6101 - s = ((head >= tail) ? head : WQUEUESIZE) - tail;
6102 + s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
6103 s = min(s, n);
6104
6105 if (s <= 0)
6106 break;
6107
6108 - memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
6109 + memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s);
6110 /* Add and flip queue if needed */
6111 - tail = (tail + s) & WQUEUEMASK;
6112 + tail = (tail + s) & (UART_XMIT_SIZE - 1);
6113 n -= s;
6114 ch->ch_txcount += s;
6115 len_written += s;
6116 }
6117
6118 /* Update the final tail */
6119 - ch->ch_w_tail = tail & WQUEUEMASK;
6120 + circ->tail = tail & (UART_XMIT_SIZE - 1);
6121
6122 if (len_written >= ch->ch_t_tlevel)
6123 ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
6124
6125 - if (!jsm_tty_write(&ch->uart_port))
6126 + if (uart_circ_empty(circ))
6127 uart_write_wakeup(&ch->uart_port);
6128 }
6129
6130 @@ -946,7 +948,6 @@ static void neo_param(struct jsm_channel *ch)
6131 if ((ch->ch_c_cflag & (CBAUD)) == 0) {
6132 ch->ch_r_head = ch->ch_r_tail = 0;
6133 ch->ch_e_head = ch->ch_e_tail = 0;
6134 - ch->ch_w_head = ch->ch_w_tail = 0;
6135
6136 neo_flush_uart_write(ch);
6137 neo_flush_uart_read(ch);
6138 diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
6139 index 7a4a914..434bd88 100644
6140 --- a/drivers/tty/serial/jsm/jsm_tty.c
6141 +++ b/drivers/tty/serial/jsm/jsm_tty.c
6142 @@ -118,6 +118,19 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
6143 udelay(10);
6144 }
6145
6146 +/*
6147 + * jsm_tty_write()
6148 + *
6149 + * Take data from the user or kernel and send it out to the FEP.
6150 + * In here exists all the Transparent Print magic as well.
6151 + */
6152 +static void jsm_tty_write(struct uart_port *port)
6153 +{
6154 + struct jsm_channel *channel;
6155 + channel = container_of(port, struct jsm_channel, uart_port);
6156 + channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
6157 +}
6158 +
6159 static void jsm_tty_start_tx(struct uart_port *port)
6160 {
6161 struct jsm_channel *channel = (struct jsm_channel *)port;
6162 @@ -216,14 +229,6 @@ static int jsm_tty_open(struct uart_port *port)
6163 return -ENOMEM;
6164 }
6165 }
6166 - if (!channel->ch_wqueue) {
6167 - channel->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
6168 - if (!channel->ch_wqueue) {
6169 - jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
6170 - "unable to allocate write queue buf");
6171 - return -ENOMEM;
6172 - }
6173 - }
6174
6175 channel->ch_flags &= ~(CH_OPENING);
6176 /*
6177 @@ -237,7 +242,6 @@ static int jsm_tty_open(struct uart_port *port)
6178 */
6179 channel->ch_r_head = channel->ch_r_tail = 0;
6180 channel->ch_e_head = channel->ch_e_tail = 0;
6181 - channel->ch_w_head = channel->ch_w_tail = 0;
6182
6183 brd->bd_ops->flush_uart_write(channel);
6184 brd->bd_ops->flush_uart_read(channel);
6185 @@ -836,75 +840,3 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
6186 }
6187 }
6188 }
6189 -
6190 -/*
6191 - * jsm_tty_write()
6192 - *
6193 - * Take data from the user or kernel and send it out to the FEP.
6194 - * In here exists all the Transparent Print magic as well.
6195 - */
6196 -int jsm_tty_write(struct uart_port *port)
6197 -{
6198 - int bufcount;
6199 - int data_count = 0,data_count1 =0;
6200 - u16 head;
6201 - u16 tail;
6202 - u16 tmask;
6203 - u32 remain;
6204 - int temp_tail = port->state->xmit.tail;
6205 - struct jsm_channel *channel = (struct jsm_channel *)port;
6206 -
6207 - tmask = WQUEUEMASK;
6208 - head = (channel->ch_w_head) & tmask;
6209 - tail = (channel->ch_w_tail) & tmask;
6210 -
6211 - if ((bufcount = tail - head - 1) < 0)
6212 - bufcount += WQUEUESIZE;
6213 -
6214 - bufcount = min(bufcount, 56);
6215 - remain = WQUEUESIZE - head;
6216 -
6217 - data_count = 0;
6218 - if (bufcount >= remain) {
6219 - bufcount -= remain;
6220 - while ((port->state->xmit.head != temp_tail) &&
6221 - (data_count < remain)) {
6222 - channel->ch_wqueue[head++] =
6223 - port->state->xmit.buf[temp_tail];
6224 -
6225 - temp_tail++;
6226 - temp_tail &= (UART_XMIT_SIZE - 1);
6227 - data_count++;
6228 - }
6229 - if (data_count == remain) head = 0;
6230 - }
6231 -
6232 - data_count1 = 0;
6233 - if (bufcount > 0) {
6234 - remain = bufcount;
6235 - while ((port->state->xmit.head != temp_tail) &&
6236 - (data_count1 < remain)) {
6237 - channel->ch_wqueue[head++] =
6238 - port->state->xmit.buf[temp_tail];
6239 -
6240 - temp_tail++;
6241 - temp_tail &= (UART_XMIT_SIZE - 1);
6242 - data_count1++;
6243 -
6244 - }
6245 - }
6246 -
6247 - port->state->xmit.tail = temp_tail;
6248 -
6249 - data_count += data_count1;
6250 - if (data_count) {
6251 - head &= tmask;
6252 - channel->ch_w_head = head;
6253 - }
6254 -
6255 - if (data_count) {
6256 - channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
6257 - }
6258 -
6259 - return data_count;
6260 -}
6261 diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
6262 index 531931c..5c8e3bb 100644
6263 --- a/drivers/tty/serial/pxa.c
6264 +++ b/drivers/tty/serial/pxa.c
6265 @@ -100,6 +100,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
6266 int max_count = 256;
6267
6268 do {
6269 + /* work around Errata #20 according to
6270 + * Intel(R) PXA27x Processor Family
6271 + * Specification Update (May 2005)
6272 + *
6273 + * Step 2
6274 + * Disable the Reciever Time Out Interrupt via IER[RTOEI]
6275 + */
6276 + up->ier &= ~UART_IER_RTOIE;
6277 + serial_out(up, UART_IER, up->ier);
6278 +
6279 ch = serial_in(up, UART_RX);
6280 flag = TTY_NORMAL;
6281 up->port.icount.rx++;
6282 @@ -156,6 +166,16 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
6283 *status = serial_in(up, UART_LSR);
6284 } while ((*status & UART_LSR_DR) && (max_count-- > 0));
6285 tty_flip_buffer_push(tty);
6286 +
6287 + /* work around Errata #20 according to
6288 + * Intel(R) PXA27x Processor Family
6289 + * Specification Update (May 2005)
6290 + *
6291 + * Step 6:
6292 + * No more data in FIFO: Re-enable RTO interrupt via IER[RTOIE]
6293 + */
6294 + up->ier |= UART_IER_RTOIE;
6295 + serial_out(up, UART_IER, up->ier);
6296 }
6297
6298 static void transmit_chars(struct uart_pxa_port *up)
6299 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
6300 index a3efbea..25f3094 100644
6301 --- a/drivers/tty/serial/serial_core.c
6302 +++ b/drivers/tty/serial/serial_core.c
6303 @@ -2008,6 +2008,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
6304 if (port->tty && port->tty->termios && termios.c_cflag == 0)
6305 termios = *(port->tty->termios);
6306
6307 + if (console_suspend_enabled)
6308 + uart_change_pm(state, 0);
6309 uport->ops->set_termios(uport, &termios, NULL);
6310 if (console_suspend_enabled)
6311 console_start(uport->cons);
6312 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
6313 index 4f1fc81..1a890e2 100644
6314 --- a/drivers/tty/tty_io.c
6315 +++ b/drivers/tty/tty_io.c
6316 @@ -194,8 +194,7 @@ static inline struct tty_struct *file_tty(struct file *file)
6317 return ((struct tty_file_private *)file->private_data)->tty;
6318 }
6319
6320 -/* Associate a new file with the tty structure */
6321 -int tty_add_file(struct tty_struct *tty, struct file *file)
6322 +int tty_alloc_file(struct file *file)
6323 {
6324 struct tty_file_private *priv;
6325
6326 @@ -203,15 +202,36 @@ int tty_add_file(struct tty_struct *tty, struct file *file)
6327 if (!priv)
6328 return -ENOMEM;
6329
6330 + file->private_data = priv;
6331 +
6332 + return 0;
6333 +}
6334 +
6335 +/* Associate a new file with the tty structure */
6336 +void tty_add_file(struct tty_struct *tty, struct file *file)
6337 +{
6338 + struct tty_file_private *priv = file->private_data;
6339 +
6340 priv->tty = tty;
6341 priv->file = file;
6342 - file->private_data = priv;
6343
6344 spin_lock(&tty_files_lock);
6345 list_add(&priv->list, &tty->tty_files);
6346 spin_unlock(&tty_files_lock);
6347 +}
6348
6349 - return 0;
6350 +/**
6351 + * tty_free_file - free file->private_data
6352 + *
6353 + * This shall be used only for fail path handling when tty_add_file was not
6354 + * called yet.
6355 + */
6356 +void tty_free_file(struct file *file)
6357 +{
6358 + struct tty_file_private *priv = file->private_data;
6359 +
6360 + file->private_data = NULL;
6361 + kfree(priv);
6362 }
6363
6364 /* Delete file from its tty */
6365 @@ -222,8 +242,7 @@ void tty_del_file(struct file *file)
6366 spin_lock(&tty_files_lock);
6367 list_del(&priv->list);
6368 spin_unlock(&tty_files_lock);
6369 - file->private_data = NULL;
6370 - kfree(priv);
6371 + tty_free_file(file);
6372 }
6373
6374
6375 @@ -1811,6 +1830,10 @@ static int tty_open(struct inode *inode, struct file *filp)
6376 nonseekable_open(inode, filp);
6377
6378 retry_open:
6379 + retval = tty_alloc_file(filp);
6380 + if (retval)
6381 + return -ENOMEM;
6382 +
6383 noctty = filp->f_flags & O_NOCTTY;
6384 index = -1;
6385 retval = 0;
6386 @@ -1823,6 +1846,7 @@ retry_open:
6387 if (!tty) {
6388 tty_unlock();
6389 mutex_unlock(&tty_mutex);
6390 + tty_free_file(filp);
6391 return -ENXIO;
6392 }
6393 driver = tty_driver_kref_get(tty->driver);
6394 @@ -1855,6 +1879,7 @@ retry_open:
6395 }
6396 tty_unlock();
6397 mutex_unlock(&tty_mutex);
6398 + tty_free_file(filp);
6399 return -ENODEV;
6400 }
6401
6402 @@ -1862,6 +1887,7 @@ retry_open:
6403 if (!driver) {
6404 tty_unlock();
6405 mutex_unlock(&tty_mutex);
6406 + tty_free_file(filp);
6407 return -ENODEV;
6408 }
6409 got_driver:
6410 @@ -1872,6 +1898,8 @@ got_driver:
6411 if (IS_ERR(tty)) {
6412 tty_unlock();
6413 mutex_unlock(&tty_mutex);
6414 + tty_driver_kref_put(driver);
6415 + tty_free_file(filp);
6416 return PTR_ERR(tty);
6417 }
6418 }
6419 @@ -1887,15 +1915,11 @@ got_driver:
6420 tty_driver_kref_put(driver);
6421 if (IS_ERR(tty)) {
6422 tty_unlock();
6423 + tty_free_file(filp);
6424 return PTR_ERR(tty);
6425 }
6426
6427 - retval = tty_add_file(tty, filp);
6428 - if (retval) {
6429 - tty_unlock();
6430 - tty_release(inode, filp);
6431 - return retval;
6432 - }
6433 + tty_add_file(tty, filp);
6434
6435 check_tty_count(tty, "tty_open");
6436 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
6437 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
6438 index dac7676..5112f57 100644
6439 --- a/drivers/usb/class/cdc-acm.c
6440 +++ b/drivers/usb/class/cdc-acm.c
6441 @@ -1534,6 +1534,9 @@ static const struct usb_device_id acm_ids[] = {
6442 { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
6443 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
6444
6445 + /* Support for Owen devices */
6446 + { USB_DEVICE(0x03eb, 0x0030), }, /* Owen SI30 */
6447 +
6448 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
6449
6450 /* Support Lego NXT using pbLua firmware */
6451 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
6452 index 37518df..0ca54e2 100644
6453 --- a/drivers/usb/core/devio.c
6454 +++ b/drivers/usb/core/devio.c
6455 @@ -407,7 +407,7 @@ static void async_completed(struct urb *urb)
6456 sinfo.si_errno = as->status;
6457 sinfo.si_code = SI_ASYNCIO;
6458 sinfo.si_addr = as->userurb;
6459 - pid = as->pid;
6460 + pid = get_pid(as->pid);
6461 uid = as->uid;
6462 euid = as->euid;
6463 secid = as->secid;
6464 @@ -422,9 +422,11 @@ static void async_completed(struct urb *urb)
6465 cancel_bulk_urbs(ps, as->bulk_addr);
6466 spin_unlock(&ps->lock);
6467
6468 - if (signr)
6469 + if (signr) {
6470 kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid,
6471 euid, secid);
6472 + put_pid(pid);
6473 + }
6474
6475 wake_up(&ps->wait);
6476 }
6477 @@ -607,9 +609,10 @@ static int findintfep(struct usb_device *dev, unsigned int ep)
6478 }
6479
6480 static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
6481 - unsigned int index)
6482 + unsigned int request, unsigned int index)
6483 {
6484 int ret = 0;
6485 + struct usb_host_interface *alt_setting;
6486
6487 if (ps->dev->state != USB_STATE_UNAUTHENTICATED
6488 && ps->dev->state != USB_STATE_ADDRESS
6489 @@ -618,6 +621,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
6490 if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype))
6491 return 0;
6492
6493 + /*
6494 + * check for the special corner case 'get_device_id' in the printer
6495 + * class specification, where wIndex is (interface << 8 | altsetting)
6496 + * instead of just interface
6497 + */
6498 + if (requesttype == 0xa1 && request == 0) {
6499 + alt_setting = usb_find_alt_setting(ps->dev->actconfig,
6500 + index >> 8, index & 0xff);
6501 + if (alt_setting
6502 + && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
6503 + index >>= 8;
6504 + }
6505 +
6506 index &= 0xff;
6507 switch (requesttype & USB_RECIP_MASK) {
6508 case USB_RECIP_ENDPOINT:
6509 @@ -770,7 +786,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
6510
6511 if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
6512 return -EFAULT;
6513 - ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex);
6514 + ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
6515 + ctrl.wIndex);
6516 if (ret)
6517 return ret;
6518 wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
6519 @@ -1100,7 +1117,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
6520 kfree(dr);
6521 return -EINVAL;
6522 }
6523 - ret = check_ctrlrecip(ps, dr->bRequestType,
6524 + ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
6525 le16_to_cpup(&dr->wIndex));
6526 if (ret) {
6527 kfree(dr);
6528 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
6529 index 34e3da5..75b4bc0 100644
6530 --- a/drivers/usb/core/driver.c
6531 +++ b/drivers/usb/core/driver.c
6532 @@ -1583,7 +1583,7 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
6533 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
6534 __func__, atomic_read(&intf->dev.power.usage_count),
6535 status);
6536 - if (status > 0)
6537 + if (status > 0 || status == -EINPROGRESS)
6538 status = 0;
6539 return status;
6540 }
6541 @@ -1668,6 +1668,11 @@ int usb_runtime_suspend(struct device *dev)
6542 return -EAGAIN;
6543
6544 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
6545 +
6546 + /* Allow a retry if autosuspend failed temporarily */
6547 + if (status == -EAGAIN || status == -EBUSY)
6548 + usb_mark_last_busy(udev);
6549 +
6550 /* The PM core reacts badly unless the return code is 0,
6551 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
6552 */
6553 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6554 index 81ce6a8..d6a8d82 100644
6555 --- a/drivers/usb/core/quirks.c
6556 +++ b/drivers/usb/core/quirks.c
6557 @@ -38,6 +38,27 @@ static const struct usb_device_id usb_quirk_list[] = {
6558 /* Creative SB Audigy 2 NX */
6559 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
6560
6561 + /* Logitech Webcam C200 */
6562 + { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
6563 +
6564 + /* Logitech Webcam C250 */
6565 + { USB_DEVICE(0x046d, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
6566 +
6567 + /* Logitech Webcam C300 */
6568 + { USB_DEVICE(0x046d, 0x0805), .driver_info = USB_QUIRK_RESET_RESUME },
6569 +
6570 + /* Logitech Webcam B/C500 */
6571 + { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
6572 +
6573 + /* Logitech Webcam Pro 9000 */
6574 + { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
6575 +
6576 + /* Logitech Webcam C310 */
6577 + { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
6578 +
6579 + /* Logitech Webcam C270 */
6580 + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
6581 +
6582 /* Logitech Harmony 700-series */
6583 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
6584
6585 @@ -69,6 +90,9 @@ static const struct usb_device_id usb_quirk_list[] = {
6586 { USB_DEVICE(0x06a3, 0x0006), .driver_info =
6587 USB_QUIRK_CONFIG_INTF_STRINGS },
6588
6589 + /* Guillemot Webcam Hercules Dualpix Exchange*/
6590 + { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
6591 +
6592 /* M-Systems Flash Disk Pioneers */
6593 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
6594
6595 diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
6596 index a341dde..0c06d80 100644
6597 --- a/drivers/usb/gadget/printer.c
6598 +++ b/drivers/usb/gadget/printer.c
6599 @@ -1611,7 +1611,7 @@ cleanup(void)
6600 if (status)
6601 ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
6602
6603 - unregister_chrdev_region(g_printer_devno, 2);
6604 + unregister_chrdev_region(g_printer_devno, 1);
6605 class_destroy(usb_gadget_class);
6606 mutex_unlock(&usb_printer_gadget.lock_printer_io);
6607 }
6608 diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
6609 index 40a844c..3e2ccb0 100644
6610 --- a/drivers/usb/host/ehci-dbg.c
6611 +++ b/drivers/usb/host/ehci-dbg.c
6612 @@ -808,7 +808,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
6613 next += temp;
6614
6615 temp = scnprintf (next, size, "uframe %04x\n",
6616 - ehci_readl(ehci, &ehci->regs->frame_index));
6617 + ehci_read_frame_index(ehci));
6618 size -= temp;
6619 next += temp;
6620
6621 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
6622 index f72ae0b..d7318e3 100644
6623 --- a/drivers/usb/host/ehci-hcd.c
6624 +++ b/drivers/usb/host/ehci-hcd.c
6625 @@ -768,6 +768,35 @@ static int ehci_run (struct usb_hcd *hcd)
6626 return 0;
6627 }
6628
6629 +static int __maybe_unused ehci_setup (struct usb_hcd *hcd)
6630 +{
6631 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
6632 + int retval;
6633 +
6634 + ehci->regs = (void __iomem *)ehci->caps +
6635 + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
6636 + dbg_hcs_params(ehci, "reset");
6637 + dbg_hcc_params(ehci, "reset");
6638 +
6639 + /* cache this readonly data; minimize chip reads */
6640 + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
6641 +
6642 + ehci->sbrn = HCD_USB2;
6643 +
6644 + retval = ehci_halt(ehci);
6645 + if (retval)
6646 + return retval;
6647 +
6648 + /* data structure init */
6649 + retval = ehci_init(hcd);
6650 + if (retval)
6651 + return retval;
6652 +
6653 + ehci_reset(ehci);
6654 +
6655 + return 0;
6656 +}
6657 +
6658 /*-------------------------------------------------------------------------*/
6659
6660 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
6661 @@ -1166,8 +1195,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
6662 static int ehci_get_frame (struct usb_hcd *hcd)
6663 {
6664 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
6665 - return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
6666 - ehci->periodic_size;
6667 + return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size;
6668 }
6669
6670 /*-------------------------------------------------------------------------*/
6671 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
6672 index 1102ce6..1d1caa6 100644
6673 --- a/drivers/usb/host/ehci-pci.c
6674 +++ b/drivers/usb/host/ehci-pci.c
6675 @@ -224,6 +224,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
6676 pci_dev_put(p_smbus);
6677 }
6678 break;
6679 + case PCI_VENDOR_ID_NETMOS:
6680 + /* MosChip frame-index-register bug */
6681 + ehci_info(ehci, "applying MosChip frame-index workaround\n");
6682 + ehci->frame_index_bug = 1;
6683 + break;
6684 }
6685
6686 /* optional debug port, normally in the first BAR */
6687 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
6688 index 2abf854..29bec34 100644
6689 --- a/drivers/usb/host/ehci-sched.c
6690 +++ b/drivers/usb/host/ehci-sched.c
6691 @@ -36,6 +36,27 @@
6692
6693 static int ehci_get_frame (struct usb_hcd *hcd);
6694
6695 +#ifdef CONFIG_PCI
6696 +
6697 +static unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
6698 +{
6699 + unsigned uf;
6700 +
6701 + /*
6702 + * The MosChip MCS9990 controller updates its microframe counter
6703 + * a little before the frame counter, and occasionally we will read
6704 + * the invalid intermediate value. Avoid problems by checking the
6705 + * microframe number (the low-order 3 bits); if they are 0 then
6706 + * re-read the register to get the correct value.
6707 + */
6708 + uf = ehci_readl(ehci, &ehci->regs->frame_index);
6709 + if (unlikely(ehci->frame_index_bug && ((uf & 7) == 0)))
6710 + uf = ehci_readl(ehci, &ehci->regs->frame_index);
6711 + return uf;
6712 +}
6713 +
6714 +#endif
6715 +
6716 /*-------------------------------------------------------------------------*/
6717
6718 /*
6719 @@ -482,7 +503,7 @@ static int enable_periodic (struct ehci_hcd *ehci)
6720 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
6721
6722 /* make sure ehci_work scans these */
6723 - ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
6724 + ehci->next_uframe = ehci_read_frame_index(ehci)
6725 % (ehci->periodic_size << 3);
6726 if (unlikely(ehci->broken_periodic))
6727 ehci->last_periodic_enable = ktime_get_real();
6728 @@ -1409,7 +1430,7 @@ iso_stream_schedule (
6729 goto fail;
6730 }
6731
6732 - now = ehci_readl(ehci, &ehci->regs->frame_index) & (mod - 1);
6733 + now = ehci_read_frame_index(ehci) & (mod - 1);
6734
6735 /* Typical case: reuse current schedule, stream is still active.
6736 * Hopefully there are no gaps from the host falling behind
6737 @@ -2276,7 +2297,7 @@ scan_periodic (struct ehci_hcd *ehci)
6738 */
6739 now_uframe = ehci->next_uframe;
6740 if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
6741 - clock = ehci_readl(ehci, &ehci->regs->frame_index);
6742 + clock = ehci_read_frame_index(ehci);
6743 clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
6744 } else {
6745 clock = now_uframe + mod - 1;
6746 @@ -2455,8 +2476,7 @@ restart:
6747 || ehci->periodic_sched == 0)
6748 break;
6749 ehci->next_uframe = now_uframe;
6750 - now = ehci_readl(ehci, &ehci->regs->frame_index) &
6751 - (mod - 1);
6752 + now = ehci_read_frame_index(ehci) & (mod - 1);
6753 if (now_uframe == now)
6754 break;
6755
6756 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
6757 index cc7d337..d92ed5c 100644
6758 --- a/drivers/usb/host/ehci.h
6759 +++ b/drivers/usb/host/ehci.h
6760 @@ -139,6 +139,7 @@ struct ehci_hcd { /* one per controller */
6761 unsigned fs_i_thresh:1; /* Intel iso scheduling */
6762 unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
6763 unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
6764 + unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
6765
6766 /* required for usb32 quirk */
6767 #define OHCI_CTRL_HCFS (3 << 6)
6768 @@ -740,6 +741,22 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
6769
6770 /*-------------------------------------------------------------------------*/
6771
6772 +#ifdef CONFIG_PCI
6773 +
6774 +/* For working around the MosChip frame-index-register bug */
6775 +static unsigned ehci_read_frame_index(struct ehci_hcd *ehci);
6776 +
6777 +#else
6778 +
6779 +static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
6780 +{
6781 + return ehci_readl(ehci, &ehci->regs->frame_index);
6782 +}
6783 +
6784 +#endif
6785 +
6786 +/*-------------------------------------------------------------------------*/
6787 +
6788 #ifndef DEBUG
6789 #define STUB_DEBUG_FILES
6790 #endif /* DEBUG */
6791 diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
6792 index a42ef38..2df851b 100644
6793 --- a/drivers/usb/host/fhci-sched.c
6794 +++ b/drivers/usb/host/fhci-sched.c
6795 @@ -1,7 +1,7 @@
6796 /*
6797 * Freescale QUICC Engine USB Host Controller Driver
6798 *
6799 - * Copyright (c) Freescale Semicondutor, Inc. 2006.
6800 + * Copyright (c) Freescale Semicondutor, Inc. 2006, 2011.
6801 * Shlomi Gridish <gridish@freescale.com>
6802 * Jerry Huang <Chang-Ming.Huang@freescale.com>
6803 * Copyright (c) Logic Product Development, Inc. 2007
6804 @@ -810,9 +810,11 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
6805 ed->dev_addr = usb_pipedevice(urb->pipe);
6806 ed->max_pkt_size = usb_maxpacket(urb->dev, urb->pipe,
6807 usb_pipeout(urb->pipe));
6808 + /* setup stage */
6809 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++, FHCI_TA_SETUP,
6810 USB_TD_TOGGLE_DATA0, urb->setup_packet, 8, 0, 0, true);
6811
6812 + /* data stage */
6813 if (data_len > 0) {
6814 td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
6815 usb_pipeout(urb->pipe) ? FHCI_TA_OUT :
6816 @@ -820,9 +822,18 @@ void fhci_queue_urb(struct fhci_hcd *fhci, struct urb *urb)
6817 USB_TD_TOGGLE_DATA1, data, data_len, 0, 0,
6818 true);
6819 }
6820 - td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
6821 - usb_pipeout(urb->pipe) ? FHCI_TA_IN : FHCI_TA_OUT,
6822 - USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
6823 +
6824 + /* status stage */
6825 + if (data_len > 0)
6826 + td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
6827 + (usb_pipeout(urb->pipe) ? FHCI_TA_IN :
6828 + FHCI_TA_OUT),
6829 + USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
6830 + else
6831 + td = fhci_td_fill(fhci, urb, urb_priv, ed, cnt++,
6832 + FHCI_TA_IN,
6833 + USB_TD_TOGGLE_DATA1, data, 0, 0, 0, true);
6834 +
6835 urb_state = US_CTRL_SETUP;
6836 break;
6837 case FHCI_TF_ISO:
6838 diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
6839 index 9154615..2f00040 100644
6840 --- a/drivers/usb/host/ohci-hub.c
6841 +++ b/drivers/usb/host/ohci-hub.c
6842 @@ -356,10 +356,7 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
6843 msleep(20);
6844 }
6845
6846 - /* Does the root hub have a port wakeup pending? */
6847 - if (ohci_readl(ohci, &ohci->regs->intrstatus) &
6848 - (OHCI_INTR_RD | OHCI_INTR_RHSC))
6849 - usb_hcd_resume_root_hub(hcd);
6850 + usb_hcd_resume_root_hub(hcd);
6851 }
6852
6853 /* Carry out polling-, autostop-, and autoresume-related state changes */
6854 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
6855 index 723f823..ce9f974 100644
6856 --- a/drivers/usb/host/xhci-hub.c
6857 +++ b/drivers/usb/host/xhci-hub.c
6858 @@ -392,6 +392,20 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
6859 return max_ports;
6860 }
6861
6862 +/* Test and clear port RWC bit */
6863 +void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
6864 + int port_id, u32 port_bit)
6865 +{
6866 + u32 temp;
6867 +
6868 + temp = xhci_readl(xhci, port_array[port_id]);
6869 + if (temp & port_bit) {
6870 + temp = xhci_port_state_to_neutral(temp);
6871 + temp |= port_bit;
6872 + xhci_writel(xhci, temp, port_array[port_id]);
6873 + }
6874 +}
6875 +
6876 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
6877 u16 wIndex, char *buf, u16 wLength)
6878 {
6879 @@ -938,12 +952,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
6880 spin_lock_irqsave(&xhci->lock, flags);
6881
6882 /* Clear PLC */
6883 - temp = xhci_readl(xhci, port_array[port_index]);
6884 - if (temp & PORT_PLC) {
6885 - temp = xhci_port_state_to_neutral(temp);
6886 - temp |= PORT_PLC;
6887 - xhci_writel(xhci, temp, port_array[port_index]);
6888 - }
6889 + xhci_test_and_clear_bit(xhci, port_array, port_index,
6890 + PORT_PLC);
6891
6892 slot_id = xhci_find_slot_id_by_port(hcd,
6893 xhci, port_index + 1);
6894 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
6895 index d446886..3428528 100644
6896 --- a/drivers/usb/host/xhci-mem.c
6897 +++ b/drivers/usb/host/xhci-mem.c
6898 @@ -81,7 +81,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
6899 * related flags, such as End TRB, Toggle Cycle, and no snoop.
6900 */
6901 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
6902 - struct xhci_segment *next, bool link_trbs)
6903 + struct xhci_segment *next, bool link_trbs, bool isoc)
6904 {
6905 u32 val;
6906
6907 @@ -97,7 +97,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
6908 val &= ~TRB_TYPE_BITMASK;
6909 val |= TRB_TYPE(TRB_LINK);
6910 /* Always set the chain bit with 0.95 hardware */
6911 - if (xhci_link_trb_quirk(xhci))
6912 + /* Set chain bit for isoc rings on AMD 0.96 host */
6913 + if (xhci_link_trb_quirk(xhci) ||
6914 + (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
6915 val |= TRB_CHAIN;
6916 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
6917 }
6918 @@ -112,18 +114,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
6919 struct xhci_segment *seg;
6920 struct xhci_segment *first_seg;
6921
6922 - if (!ring || !ring->first_seg)
6923 + if (!ring)
6924 return;
6925 - first_seg = ring->first_seg;
6926 - seg = first_seg->next;
6927 - xhci_dbg(xhci, "Freeing ring at %p\n", ring);
6928 - while (seg != first_seg) {
6929 - struct xhci_segment *next = seg->next;
6930 - xhci_segment_free(xhci, seg);
6931 - seg = next;
6932 + if (ring->first_seg) {
6933 + first_seg = ring->first_seg;
6934 + seg = first_seg->next;
6935 + xhci_dbg(xhci, "Freeing ring at %p\n", ring);
6936 + while (seg != first_seg) {
6937 + struct xhci_segment *next = seg->next;
6938 + xhci_segment_free(xhci, seg);
6939 + seg = next;
6940 + }
6941 + xhci_segment_free(xhci, first_seg);
6942 + ring->first_seg = NULL;
6943 }
6944 - xhci_segment_free(xhci, first_seg);
6945 - ring->first_seg = NULL;
6946 kfree(ring);
6947 }
6948
6949 @@ -152,7 +156,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
6950 * See section 4.9.1 and figures 15 and 16.
6951 */
6952 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
6953 - unsigned int num_segs, bool link_trbs, gfp_t flags)
6954 + unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
6955 {
6956 struct xhci_ring *ring;
6957 struct xhci_segment *prev;
6958 @@ -178,12 +182,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
6959 next = xhci_segment_alloc(xhci, flags);
6960 if (!next)
6961 goto fail;
6962 - xhci_link_segments(xhci, prev, next, link_trbs);
6963 + xhci_link_segments(xhci, prev, next, link_trbs, isoc);
6964
6965 prev = next;
6966 num_segs--;
6967 }
6968 - xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
6969 + xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
6970
6971 if (link_trbs) {
6972 /* See section 4.9.2.1 and 6.4.4.1 */
6973 @@ -229,14 +233,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
6974 * pointers to the beginning of the ring.
6975 */
6976 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
6977 - struct xhci_ring *ring)
6978 + struct xhci_ring *ring, bool isoc)
6979 {
6980 struct xhci_segment *seg = ring->first_seg;
6981 do {
6982 memset(seg->trbs, 0,
6983 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
6984 /* All endpoint rings have link TRBs */
6985 - xhci_link_segments(xhci, seg, seg->next, 1);
6986 + xhci_link_segments(xhci, seg, seg->next, 1, isoc);
6987 seg = seg->next;
6988 } while (seg != ring->first_seg);
6989 xhci_initialize_ring_info(ring);
6990 @@ -540,7 +544,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
6991 */
6992 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
6993 stream_info->stream_rings[cur_stream] =
6994 - xhci_ring_alloc(xhci, 1, true, mem_flags);
6995 + xhci_ring_alloc(xhci, 1, true, false, mem_flags);
6996 cur_ring = stream_info->stream_rings[cur_stream];
6997 if (!cur_ring)
6998 goto cleanup_rings;
6999 @@ -765,7 +769,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
7000 }
7001
7002 /* Allocate endpoint 0 ring */
7003 - dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
7004 + dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
7005 if (!dev->eps[0].ring)
7006 goto fail;
7007
7008 @@ -1175,10 +1179,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
7009 */
7010 if (usb_endpoint_xfer_isoc(&ep->desc))
7011 virt_dev->eps[ep_index].new_ring =
7012 - xhci_ring_alloc(xhci, 8, true, mem_flags);
7013 + xhci_ring_alloc(xhci, 8, true, true, mem_flags);
7014 else
7015 virt_dev->eps[ep_index].new_ring =
7016 - xhci_ring_alloc(xhci, 1, true, mem_flags);
7017 + xhci_ring_alloc(xhci, 1, true, false, mem_flags);
7018 if (!virt_dev->eps[ep_index].new_ring) {
7019 /* Attempt to use the ring cache */
7020 if (virt_dev->num_rings_cached == 0)
7021 @@ -1187,7 +1191,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
7022 virt_dev->ring_cache[virt_dev->num_rings_cached];
7023 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
7024 virt_dev->num_rings_cached--;
7025 - xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
7026 + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
7027 + usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
7028 }
7029 virt_dev->eps[ep_index].skip = false;
7030 ep_ring = virt_dev->eps[ep_index].new_ring;
7031 @@ -2001,7 +2006,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
7032 goto fail;
7033
7034 /* Set up the command ring to have one segments for now. */
7035 - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
7036 + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
7037 if (!xhci->cmd_ring)
7038 goto fail;
7039 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
7040 @@ -2032,7 +2037,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
7041 * the event ring segment table (ERST). Section 4.9.3.
7042 */
7043 xhci_dbg(xhci, "// Allocating event ring\n");
7044 - xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
7045 + xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
7046 + flags);
7047 if (!xhci->event_ring)
7048 goto fail;
7049 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
7050 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
7051 index cb16de2..50e7156 100644
7052 --- a/drivers/usb/host/xhci-pci.c
7053 +++ b/drivers/usb/host/xhci-pci.c
7054 @@ -128,6 +128,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
7055 if (pdev->vendor == PCI_VENDOR_ID_NEC)
7056 xhci->quirks |= XHCI_NEC_HOST;
7057
7058 + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
7059 + xhci->quirks |= XHCI_AMD_0x96_HOST;
7060 +
7061 /* AMD PLL quirk */
7062 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
7063 xhci->quirks |= XHCI_AMD_PLL_FIX;
7064 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
7065 index 952e2de..dd3eb6f 100644
7066 --- a/drivers/usb/host/xhci-ring.c
7067 +++ b/drivers/usb/host/xhci-ring.c
7068 @@ -185,7 +185,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
7069 * prepare_transfer()?
7070 */
7071 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
7072 - bool consumer, bool more_trbs_coming)
7073 + bool consumer, bool more_trbs_coming, bool isoc)
7074 {
7075 u32 chain;
7076 union xhci_trb *next;
7077 @@ -212,11 +212,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
7078 if (!chain && !more_trbs_coming)
7079 break;
7080
7081 - /* If we're not dealing with 0.95 hardware,
7082 + /* If we're not dealing with 0.95 hardware or
7083 + * isoc rings on AMD 0.96 host,
7084 * carry over the chain bit of the previous TRB
7085 * (which may mean the chain bit is cleared).
7086 */
7087 - if (!xhci_link_trb_quirk(xhci)) {
7088 + if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
7089 + && !xhci_link_trb_quirk(xhci)) {
7090 next->link.control &=
7091 cpu_to_le32(~TRB_CHAIN);
7092 next->link.control |=
7093 @@ -1342,10 +1344,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
7094 xhci_ring_device(xhci, slot_id);
7095 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
7096 /* Clear PORT_PLC */
7097 - temp = xhci_readl(xhci, port_array[faked_port_index]);
7098 - temp = xhci_port_state_to_neutral(temp);
7099 - temp |= PORT_PLC;
7100 - xhci_writel(xhci, temp, port_array[faked_port_index]);
7101 + xhci_test_and_clear_bit(xhci, port_array,
7102 + faked_port_index, PORT_PLC);
7103 } else {
7104 xhci_dbg(xhci, "resume HS port %d\n", port_id);
7105 bus_state->resume_done[faked_port_index] = jiffies +
7106 @@ -1356,6 +1356,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
7107 }
7108 }
7109
7110 + if (hcd->speed != HCD_USB3)
7111 + xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
7112 + PORT_PLC);
7113 +
7114 cleanup:
7115 /* Update event ring dequeue pointer before dropping the lock */
7116 inc_deq(xhci, xhci->event_ring, true);
7117 @@ -2409,7 +2413,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
7118 * prepare_transfer()?
7119 */
7120 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
7121 - bool consumer, bool more_trbs_coming,
7122 + bool consumer, bool more_trbs_coming, bool isoc,
7123 u32 field1, u32 field2, u32 field3, u32 field4)
7124 {
7125 struct xhci_generic_trb *trb;
7126 @@ -2419,7 +2423,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
7127 trb->field[1] = cpu_to_le32(field2);
7128 trb->field[2] = cpu_to_le32(field3);
7129 trb->field[3] = cpu_to_le32(field4);
7130 - inc_enq(xhci, ring, consumer, more_trbs_coming);
7131 + inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
7132 }
7133
7134 /*
7135 @@ -2427,7 +2431,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
7136 * FIXME allocate segments if the ring is full.
7137 */
7138 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
7139 - u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
7140 + u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
7141 {
7142 /* Make sure the endpoint has been added to xHC schedule */
7143 switch (ep_state) {
7144 @@ -2469,10 +2473,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
7145 next = ring->enqueue;
7146
7147 while (last_trb(xhci, ring, ring->enq_seg, next)) {
7148 - /* If we're not dealing with 0.95 hardware,
7149 - * clear the chain bit.
7150 + /* If we're not dealing with 0.95 hardware or isoc rings
7151 + * on AMD 0.96 host, clear the chain bit.
7152 */
7153 - if (!xhci_link_trb_quirk(xhci))
7154 + if (!xhci_link_trb_quirk(xhci) && !(isoc &&
7155 + (xhci->quirks & XHCI_AMD_0x96_HOST)))
7156 next->link.control &= cpu_to_le32(~TRB_CHAIN);
7157 else
7158 next->link.control |= cpu_to_le32(TRB_CHAIN);
7159 @@ -2505,6 +2510,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
7160 unsigned int num_trbs,
7161 struct urb *urb,
7162 unsigned int td_index,
7163 + bool isoc,
7164 gfp_t mem_flags)
7165 {
7166 int ret;
7167 @@ -2522,7 +2528,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
7168
7169 ret = prepare_ring(xhci, ep_ring,
7170 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
7171 - num_trbs, mem_flags);
7172 + num_trbs, isoc, mem_flags);
7173 if (ret)
7174 return ret;
7175
7176 @@ -2745,7 +2751,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7177
7178 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
7179 ep_index, urb->stream_id,
7180 - num_trbs, urb, 0, mem_flags);
7181 + num_trbs, urb, 0, false, mem_flags);
7182 if (trb_buff_len < 0)
7183 return trb_buff_len;
7184
7185 @@ -2840,7 +2846,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7186 more_trbs_coming = true;
7187 else
7188 more_trbs_coming = false;
7189 - queue_trb(xhci, ep_ring, false, more_trbs_coming,
7190 + queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
7191 lower_32_bits(addr),
7192 upper_32_bits(addr),
7193 length_field,
7194 @@ -2931,7 +2937,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7195
7196 ret = prepare_transfer(xhci, xhci->devs[slot_id],
7197 ep_index, urb->stream_id,
7198 - num_trbs, urb, 0, mem_flags);
7199 + num_trbs, urb, 0, false, mem_flags);
7200 if (ret < 0)
7201 return ret;
7202
7203 @@ -3003,7 +3009,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7204 more_trbs_coming = true;
7205 else
7206 more_trbs_coming = false;
7207 - queue_trb(xhci, ep_ring, false, more_trbs_coming,
7208 + queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
7209 lower_32_bits(addr),
7210 upper_32_bits(addr),
7211 length_field,
7212 @@ -3063,7 +3069,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7213 num_trbs++;
7214 ret = prepare_transfer(xhci, xhci->devs[slot_id],
7215 ep_index, urb->stream_id,
7216 - num_trbs, urb, 0, mem_flags);
7217 + num_trbs, urb, 0, false, mem_flags);
7218 if (ret < 0)
7219 return ret;
7220
7221 @@ -3096,7 +3102,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7222 }
7223 }
7224
7225 - queue_trb(xhci, ep_ring, false, true,
7226 + queue_trb(xhci, ep_ring, false, true, false,
7227 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
7228 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
7229 TRB_LEN(8) | TRB_INTR_TARGET(0),
7230 @@ -3116,7 +3122,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7231 if (urb->transfer_buffer_length > 0) {
7232 if (setup->bRequestType & USB_DIR_IN)
7233 field |= TRB_DIR_IN;
7234 - queue_trb(xhci, ep_ring, false, true,
7235 + queue_trb(xhci, ep_ring, false, true, false,
7236 lower_32_bits(urb->transfer_dma),
7237 upper_32_bits(urb->transfer_dma),
7238 length_field,
7239 @@ -3132,7 +3138,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7240 field = 0;
7241 else
7242 field = TRB_DIR_IN;
7243 - queue_trb(xhci, ep_ring, false, false,
7244 + queue_trb(xhci, ep_ring, false, false, false,
7245 0,
7246 0,
7247 TRB_INTR_TARGET(0),
7248 @@ -3281,7 +3287,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7249 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
7250
7251 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
7252 - urb->stream_id, trbs_per_td, urb, i, mem_flags);
7253 + urb->stream_id, trbs_per_td, urb, i, true,
7254 + mem_flags);
7255 if (ret < 0) {
7256 if (i == 0)
7257 return ret;
7258 @@ -3351,7 +3358,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7259 remainder |
7260 TRB_INTR_TARGET(0);
7261
7262 - queue_trb(xhci, ep_ring, false, more_trbs_coming,
7263 + queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
7264 lower_32_bits(addr),
7265 upper_32_bits(addr),
7266 length_field,
7267 @@ -3433,7 +3440,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
7268 * Do not insert any td of the urb to the ring if the check failed.
7269 */
7270 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
7271 - num_trbs, mem_flags);
7272 + num_trbs, true, mem_flags);
7273 if (ret)
7274 return ret;
7275
7276 @@ -3492,7 +3499,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
7277 reserved_trbs++;
7278
7279 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
7280 - reserved_trbs, GFP_ATOMIC);
7281 + reserved_trbs, false, GFP_ATOMIC);
7282 if (ret < 0) {
7283 xhci_err(xhci, "ERR: No room for command on command ring\n");
7284 if (command_must_succeed)
7285 @@ -3500,8 +3507,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
7286 "unfailable commands failed.\n");
7287 return ret;
7288 }
7289 - queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
7290 - field4 | xhci->cmd_ring->cycle_state);
7291 + queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
7292 + field3, field4 | xhci->cmd_ring->cycle_state);
7293 return 0;
7294 }
7295
7296 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
7297 index 3a0f695..3770004 100644
7298 --- a/drivers/usb/host/xhci.c
7299 +++ b/drivers/usb/host/xhci.c
7300 @@ -1888,6 +1888,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
7301 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
7302 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
7303 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
7304 +
7305 + /* Don't issue the command if there's no endpoints to update. */
7306 + if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
7307 + ctrl_ctx->drop_flags == 0)
7308 + return 0;
7309 +
7310 xhci_dbg(xhci, "New Input Control Context:\n");
7311 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
7312 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
7313 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
7314 index cae8e23..1fb0549 100644
7315 --- a/drivers/usb/host/xhci.h
7316 +++ b/drivers/usb/host/xhci.h
7317 @@ -1318,6 +1318,7 @@ struct xhci_hcd {
7318 #define XHCI_EP_LIMIT_QUIRK (1 << 5)
7319 #define XHCI_BROKEN_MSI (1 << 6)
7320 #define XHCI_RESET_ON_RESUME (1 << 7)
7321 +#define XHCI_AMD_0x96_HOST (1 << 9)
7322 unsigned int num_active_eps;
7323 unsigned int limit_active_eps;
7324 /* There are two roothubs to keep track of bus suspend info for */
7325 @@ -1572,6 +1573,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
7326 unsigned int ep_index, unsigned int stream_id);
7327
7328 /* xHCI roothub code */
7329 +void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
7330 + int port_id, u32 port_bit);
7331 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
7332 char *buf, u16 wLength);
7333 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
7334 diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
7335 index a09dbd2..a04b2ff 100644
7336 --- a/drivers/usb/mon/mon_bin.c
7337 +++ b/drivers/usb/mon/mon_bin.c
7338 @@ -1101,7 +1101,7 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
7339 nevents = mon_bin_queued(rp);
7340
7341 sp = (struct mon_bin_stats __user *)arg;
7342 - if (put_user(rp->cnt_lost, &sp->dropped))
7343 + if (put_user(ndropped, &sp->dropped))
7344 return -EFAULT;
7345 if (put_user(nevents, &sp->queued))
7346 return -EFAULT;
7347 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
7348 index 5fc13e7..f34f6ed 100644
7349 --- a/drivers/usb/serial/ftdi_sio.c
7350 +++ b/drivers/usb/serial/ftdi_sio.c
7351 @@ -207,6 +207,8 @@ static struct usb_device_id id_table_combined [] = {
7352 { USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) },
7353 { USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) },
7354 { USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) },
7355 + { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) },
7356 + { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) },
7357 { USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) },
7358 { USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) },
7359 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) },
7360 @@ -745,6 +747,8 @@ static struct usb_device_id id_table_combined [] = {
7361 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
7362 { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
7363 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
7364 + { USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID),
7365 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
7366 { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
7367 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
7368 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
7369 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
7370 index bf5227a..571fa96 100644
7371 --- a/drivers/usb/serial/ftdi_sio_ids.h
7372 +++ b/drivers/usb/serial/ftdi_sio_ids.h
7373 @@ -54,6 +54,7 @@
7374 /* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
7375 #define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
7376 #define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
7377 +#define LMI_LM3S_ICDI_BOARD_PID 0xbcda
7378
7379 #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
7380
7381 @@ -420,9 +421,11 @@
7382 #define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
7383
7384 /*
7385 - * DSS-20 Sync Station for Sony Ericsson P800
7386 + * Sony Ericsson product ids
7387 */
7388 -#define FTDI_DSS20_PID 0xFC82
7389 +#define FTDI_DSS20_PID 0xFC82 /* DSS-20 Sync Station for Sony Ericsson P800 */
7390 +#define FTDI_URBAN_0_PID 0xFC8A /* Sony Ericsson Urban, uart #0 */
7391 +#define FTDI_URBAN_1_PID 0xFC8B /* Sony Ericsson Urban, uart #1 */
7392
7393 /* www.irtrans.de device */
7394 #define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
7395 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7396 index fe22e90..89ae1f6 100644
7397 --- a/drivers/usb/serial/option.c
7398 +++ b/drivers/usb/serial/option.c
7399 @@ -475,31 +475,54 @@ enum option_blacklist_reason {
7400 OPTION_BLACKLIST_RESERVED_IF = 2
7401 };
7402
7403 +#define MAX_BL_NUM 8
7404 struct option_blacklist_info {
7405 - const u32 infolen; /* number of interface numbers on blacklist */
7406 - const u8 *ifaceinfo; /* pointer to the array holding the numbers */
7407 - enum option_blacklist_reason reason;
7408 + /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
7409 + const unsigned long sendsetup;
7410 + /* bitfield of interface numbers for OPTION_BLACKLIST_RESERVED_IF */
7411 + const unsigned long reserved;
7412 };
7413
7414 -static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
7415 static const struct option_blacklist_info four_g_w14_blacklist = {
7416 - .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
7417 - .ifaceinfo = four_g_w14_no_sendsetup,
7418 - .reason = OPTION_BLACKLIST_SENDSETUP
7419 + .sendsetup = BIT(0) | BIT(1),
7420 };
7421
7422 -static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
7423 static const struct option_blacklist_info alcatel_x200_blacklist = {
7424 - .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
7425 - .ifaceinfo = alcatel_x200_no_sendsetup,
7426 - .reason = OPTION_BLACKLIST_SENDSETUP
7427 + .sendsetup = BIT(0) | BIT(1),
7428 +};
7429 +
7430 +static const struct option_blacklist_info zte_0037_blacklist = {
7431 + .sendsetup = BIT(0) | BIT(1),
7432 };
7433
7434 -static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
7435 static const struct option_blacklist_info zte_k3765_z_blacklist = {
7436 - .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
7437 - .ifaceinfo = zte_k3765_z_no_sendsetup,
7438 - .reason = OPTION_BLACKLIST_SENDSETUP
7439 + .sendsetup = BIT(0) | BIT(1) | BIT(2),
7440 + .reserved = BIT(4),
7441 +};
7442 +
7443 +static const struct option_blacklist_info huawei_cdc12_blacklist = {
7444 + .reserved = BIT(1) | BIT(2),
7445 +};
7446 +
7447 +static const struct option_blacklist_info net_intf1_blacklist = {
7448 + .reserved = BIT(1),
7449 +};
7450 +
7451 +static const struct option_blacklist_info net_intf3_blacklist = {
7452 + .reserved = BIT(3),
7453 +};
7454 +
7455 +static const struct option_blacklist_info net_intf4_blacklist = {
7456 + .reserved = BIT(4),
7457 +};
7458 +
7459 +static const struct option_blacklist_info net_intf5_blacklist = {
7460 + .reserved = BIT(5),
7461 +};
7462 +
7463 +static const struct option_blacklist_info zte_mf626_blacklist = {
7464 + .sendsetup = BIT(0) | BIT(1),
7465 + .reserved = BIT(4),
7466 };
7467
7468 static const struct usb_device_id option_ids[] = {
7469 @@ -599,12 +622,15 @@ static const struct usb_device_id option_ids[] = {
7470 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
7471 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
7472 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
7473 - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
7474 - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
7475 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
7476 + .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
7477 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
7478 + .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
7479 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
7480 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
7481 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
7482 - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
7483 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
7484 + .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
7485 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
7486 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
7487 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
7488 @@ -705,7 +731,8 @@ static const struct usb_device_id option_ids[] = {
7489 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
7490 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
7491 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
7492 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
7493 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
7494 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
7495 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
7496 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
7497 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
7498 @@ -720,51 +747,62 @@ static const struct usb_device_id option_ids[] = {
7499 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
7500 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
7501 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
7502 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
7503 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
7504 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
7505 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
7506 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
7507 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
7508 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
7509 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
7510 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
7511 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
7512 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
7513 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
7514 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
7515 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
7516 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
7517 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7518 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
7519 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
7520 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
7521 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
7522 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
7523 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
7524 /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
7525 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
7526 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
7527 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
7528 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
7529 - 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist },
7530 + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
7531 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
7532 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
7533 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
7534 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
7535 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
7536 + .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
7537 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
7538 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
7539 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
7540 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
7541 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
7542 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7543 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
7544 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
7545 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
7546 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
7547 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
7548 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
7549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
7550 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
7551 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
7552 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
7553 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7554 /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
7555 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
7556 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
7557 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
7558 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
7559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
7560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
7561 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
7562 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
7563 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7564 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
7565 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
7566 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
7567 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
7568 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
7569 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
7571 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
7572 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
7573 @@ -779,11 +817,13 @@ static const struct usb_device_id option_ids[] = {
7574 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
7575 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
7576 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
7577 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
7578 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
7579 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7580 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
7581 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
7582 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
7583 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
7584 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
7585 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
7586 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
7587 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
7588 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
7589 @@ -1214,10 +1254,35 @@ static void __exit option_exit(void)
7590 module_init(option_init);
7591 module_exit(option_exit);
7592
7593 +static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
7594 + const struct option_blacklist_info *blacklist)
7595 +{
7596 + unsigned long num;
7597 + const unsigned long *intf_list;
7598 +
7599 + if (blacklist) {
7600 + if (reason == OPTION_BLACKLIST_SENDSETUP)
7601 + intf_list = &blacklist->sendsetup;
7602 + else if (reason == OPTION_BLACKLIST_RESERVED_IF)
7603 + intf_list = &blacklist->reserved;
7604 + else {
7605 + BUG_ON(reason);
7606 + return false;
7607 + }
7608 +
7609 + for_each_set_bit(num, intf_list, MAX_BL_NUM + 1) {
7610 + if (num == ifnum)
7611 + return true;
7612 + }
7613 + }
7614 + return false;
7615 +}
7616 +
7617 static int option_probe(struct usb_serial *serial,
7618 const struct usb_device_id *id)
7619 {
7620 struct usb_wwan_intf_private *data;
7621 +
7622 /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
7623 if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
7624 serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
7625 @@ -1230,14 +1295,14 @@ static int option_probe(struct usb_serial *serial,
7626 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
7627 return -ENODEV;
7628
7629 - /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
7630 - if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
7631 - (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
7632 - serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
7633 - serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
7634 - (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
7635 - serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
7636 - return -ENODEV;
7637 + /* Don't bind reserved interfaces (like network ones) which often have
7638 + * the same class/subclass/protocol as the serial interfaces. Look at
7639 + * the Windows driver .INF files for reserved interface numbers.
7640 + */
7641 + if (is_blacklisted(
7642 + serial->interface->cur_altsetting->desc.bInterfaceNumber,
7643 + OPTION_BLACKLIST_RESERVED_IF,
7644 + (const struct option_blacklist_info *) id->driver_info))
7645
7646 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
7647 if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID &&
7648 @@ -1246,7 +1311,6 @@ static int option_probe(struct usb_serial *serial,
7649 return -ENODEV;
7650
7651 data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
7652 -
7653 if (!data)
7654 return -ENOMEM;
7655 data->send_setup = option_send_setup;
7656 @@ -1255,23 +1319,6 @@ static int option_probe(struct usb_serial *serial,
7657 return 0;
7658 }
7659
7660 -static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
7661 - const struct option_blacklist_info *blacklist)
7662 -{
7663 - const u8 *info;
7664 - int i;
7665 -
7666 - if (blacklist) {
7667 - info = blacklist->ifaceinfo;
7668 -
7669 - for (i = 0; i < blacklist->infolen; i++) {
7670 - if (info[i] == ifnum)
7671 - return blacklist->reason;
7672 - }
7673 - }
7674 - return OPTION_BLACKLIST_NONE;
7675 -}
7676 -
7677 static void option_instat_callback(struct urb *urb)
7678 {
7679 int err;
7680 @@ -1343,9 +1390,8 @@ static int option_send_setup(struct usb_serial_port *port)
7681 int val = 0;
7682 dbg("%s", __func__);
7683
7684 - if (is_blacklisted(ifNum,
7685 - (struct option_blacklist_info *) intfdata->private)
7686 - == OPTION_BLACKLIST_SENDSETUP) {
7687 + if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP,
7688 + (struct option_blacklist_info *) intfdata->private)) {
7689 dbg("No send_setup on blacklisted interface #%d\n", ifNum);
7690 return -EIO;
7691 }
7692 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
7693 index 1d33260..614fabc 100644
7694 --- a/drivers/usb/serial/pl2303.c
7695 +++ b/drivers/usb/serial/pl2303.c
7696 @@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
7697 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
7698 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
7699 { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
7700 + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
7701 { } /* Terminating entry */
7702 };
7703
7704 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
7705 index ca0d237..3d10d7f 100644
7706 --- a/drivers/usb/serial/pl2303.h
7707 +++ b/drivers/usb/serial/pl2303.h
7708 @@ -148,3 +148,8 @@
7709 /* WinChipHead USB->RS 232 adapter */
7710 #define WINCHIPHEAD_VENDOR_ID 0x4348
7711 #define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
7712 +
7713 +/* SMART USB Serial Adapter */
7714 +#define SMART_VENDOR_ID 0x0b8c
7715 +#define SMART_PRODUCT_ID 0x2303
7716 +
7717 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
7718 index aeccc7f..b9bb247 100644
7719 --- a/drivers/usb/serial/qcserial.c
7720 +++ b/drivers/usb/serial/qcserial.c
7721 @@ -28,6 +28,7 @@ static const struct usb_device_id id_table[] = {
7722 {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
7723 {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
7724 {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
7725 + {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
7726 {USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
7727 {USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
7728 {USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
7729 @@ -84,6 +85,7 @@ static const struct usb_device_id id_table[] = {
7730 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
7731 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
7732 {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
7733 + {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
7734 { } /* Terminating entry */
7735 };
7736 MODULE_DEVICE_TABLE(usb, id_table);
7737 diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
7738 index bedc4b9..fe2d803 100644
7739 --- a/drivers/usb/storage/Kconfig
7740 +++ b/drivers/usb/storage/Kconfig
7741 @@ -42,7 +42,7 @@ config USB_STORAGE_REALTEK
7742
7743 config REALTEK_AUTOPM
7744 bool "Realtek Card Reader autosuspend support"
7745 - depends on USB_STORAGE_REALTEK && CONFIG_PM_RUNTIME
7746 + depends on USB_STORAGE_REALTEK && PM_RUNTIME
7747 default y
7748
7749 config USB_STORAGE_DATAFAB
7750 diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
7751 index 34adc4b..232167a 100644
7752 --- a/drivers/usb/storage/realtek_cr.c
7753 +++ b/drivers/usb/storage/realtek_cr.c
7754 @@ -320,6 +320,11 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
7755 {
7756 int retval;
7757 u8 cmnd[12] = { 0 };
7758 + u8 *buf;
7759 +
7760 + buf = kmalloc(len, GFP_NOIO);
7761 + if (buf == NULL)
7762 + return USB_STOR_TRANSPORT_ERROR;
7763
7764 US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
7765
7766 @@ -331,10 +336,14 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
7767 cmnd[5] = (u8) len;
7768
7769 retval = rts51x_bulk_transport(us, 0, cmnd, 12,
7770 - data, len, DMA_FROM_DEVICE, NULL);
7771 - if (retval != USB_STOR_TRANSPORT_GOOD)
7772 + buf, len, DMA_FROM_DEVICE, NULL);
7773 + if (retval != USB_STOR_TRANSPORT_GOOD) {
7774 + kfree(buf);
7775 return -EIO;
7776 + }
7777
7778 + memcpy(data, buf, len);
7779 + kfree(buf);
7780 return 0;
7781 }
7782
7783 @@ -342,6 +351,12 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
7784 {
7785 int retval;
7786 u8 cmnd[12] = { 0 };
7787 + u8 *buf;
7788 +
7789 + buf = kmalloc(len, GFP_NOIO);
7790 + if (buf == NULL)
7791 + return USB_STOR_TRANSPORT_ERROR;
7792 + memcpy(buf, data, len);
7793
7794 US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
7795
7796 @@ -353,7 +368,8 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
7797 cmnd[5] = (u8) len;
7798
7799 retval = rts51x_bulk_transport(us, 0, cmnd, 12,
7800 - data, len, DMA_TO_DEVICE, NULL);
7801 + buf, len, DMA_TO_DEVICE, NULL);
7802 + kfree(buf);
7803 if (retval != USB_STOR_TRANSPORT_GOOD)
7804 return -EIO;
7805
7806 @@ -365,6 +381,11 @@ static int rts51x_read_status(struct us_data *us,
7807 {
7808 int retval;
7809 u8 cmnd[12] = { 0 };
7810 + u8 *buf;
7811 +
7812 + buf = kmalloc(len, GFP_NOIO);
7813 + if (buf == NULL)
7814 + return USB_STOR_TRANSPORT_ERROR;
7815
7816 US_DEBUGP("%s, lun = %d\n", __func__, lun);
7817
7818 @@ -372,10 +393,14 @@ static int rts51x_read_status(struct us_data *us,
7819 cmnd[1] = 0x09;
7820
7821 retval = rts51x_bulk_transport(us, lun, cmnd, 12,
7822 - status, len, DMA_FROM_DEVICE, actlen);
7823 - if (retval != USB_STOR_TRANSPORT_GOOD)
7824 + buf, len, DMA_FROM_DEVICE, actlen);
7825 + if (retval != USB_STOR_TRANSPORT_GOOD) {
7826 + kfree(buf);
7827 return -EIO;
7828 + }
7829
7830 + memcpy(status, buf, len);
7831 + kfree(buf);
7832 return 0;
7833 }
7834
7835 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
7836 index e8ae21b..ff32390 100644
7837 --- a/drivers/usb/storage/transport.c
7838 +++ b/drivers/usb/storage/transport.c
7839 @@ -691,6 +691,9 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
7840 int temp_result;
7841 struct scsi_eh_save ses;
7842 int sense_size = US_SENSE_SIZE;
7843 + struct scsi_sense_hdr sshdr;
7844 + const u8 *scdd;
7845 + u8 fm_ili;
7846
7847 /* device supports and needs bigger sense buffer */
7848 if (us->fflags & US_FL_SANE_SENSE)
7849 @@ -774,32 +777,30 @@ Retry_Sense:
7850 srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
7851 }
7852
7853 + scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7854 + &sshdr);
7855 +
7856 US_DEBUGP("-- Result from auto-sense is %d\n", temp_result);
7857 US_DEBUGP("-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
7858 - srb->sense_buffer[0],
7859 - srb->sense_buffer[2] & 0xf,
7860 - srb->sense_buffer[12],
7861 - srb->sense_buffer[13]);
7862 + sshdr.response_code, sshdr.sense_key,
7863 + sshdr.asc, sshdr.ascq);
7864 #ifdef CONFIG_USB_STORAGE_DEBUG
7865 - usb_stor_show_sense(
7866 - srb->sense_buffer[2] & 0xf,
7867 - srb->sense_buffer[12],
7868 - srb->sense_buffer[13]);
7869 + usb_stor_show_sense(sshdr.sense_key, sshdr.asc, sshdr.ascq);
7870 #endif
7871
7872 /* set the result so the higher layers expect this data */
7873 srb->result = SAM_STAT_CHECK_CONDITION;
7874
7875 + scdd = scsi_sense_desc_find(srb->sense_buffer,
7876 + SCSI_SENSE_BUFFERSIZE, 4);
7877 + fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
7878 +
7879 /* We often get empty sense data. This could indicate that
7880 * everything worked or that there was an unspecified
7881 * problem. We have to decide which.
7882 */
7883 - if ( /* Filemark 0, ignore EOM, ILI 0, no sense */
7884 - (srb->sense_buffer[2] & 0xaf) == 0 &&
7885 - /* No ASC or ASCQ */
7886 - srb->sense_buffer[12] == 0 &&
7887 - srb->sense_buffer[13] == 0) {
7888 -
7889 + if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
7890 + fm_ili == 0) {
7891 /* If things are really okay, then let's show that.
7892 * Zero out the sense buffer so the higher layers
7893 * won't realize we did an unsolicited auto-sense.
7894 @@ -814,7 +815,10 @@ Retry_Sense:
7895 */
7896 } else {
7897 srb->result = DID_ERROR << 16;
7898 - srb->sense_buffer[2] = HARDWARE_ERROR;
7899 + if ((sshdr.response_code & 0x72) == 0x72)
7900 + srb->sense_buffer[1] = HARDWARE_ERROR;
7901 + else
7902 + srb->sense_buffer[2] = HARDWARE_ERROR;
7903 }
7904 }
7905 }
7906 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
7907 index 0ca0958..c325e69 100644
7908 --- a/drivers/usb/storage/usb.c
7909 +++ b/drivers/usb/storage/usb.c
7910 @@ -831,12 +831,22 @@ static int usb_stor_scan_thread(void * __us)
7911
7912 dev_dbg(dev, "device found\n");
7913
7914 - set_freezable();
7915 - /* Wait for the timeout to expire or for a disconnect */
7916 + set_freezable_with_signal();
7917 + /*
7918 + * Wait for the timeout to expire or for a disconnect
7919 + *
7920 + * We can't freeze in this thread or we risk causing khubd to
7921 + * fail to freeze, but we can't be non-freezable either. Nor can
7922 + * khubd freeze while waiting for scanning to complete as it may
7923 + * hold the device lock, causing a hang when suspending devices.
7924 + * So we request a fake signal when freezing and use
7925 + * interruptible sleep to kick us out of our wait early when
7926 + * freezing happens.
7927 + */
7928 if (delay_use > 0) {
7929 dev_dbg(dev, "waiting for device to settle "
7930 "before scanning\n");
7931 - wait_event_freezable_timeout(us->delay_wait,
7932 + wait_event_interruptible_timeout(us->delay_wait,
7933 test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
7934 delay_use * HZ);
7935 }
7936 diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
7937 index caaa27d..cb09aa1f 100644
7938 --- a/drivers/video/carminefb.c
7939 +++ b/drivers/video/carminefb.c
7940 @@ -32,11 +32,11 @@
7941 #define CARMINEFB_DEFAULT_VIDEO_MODE 1
7942
7943 static unsigned int fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
7944 -module_param(fb_mode, uint, 444);
7945 +module_param(fb_mode, uint, 0444);
7946 MODULE_PARM_DESC(fb_mode, "Initial video mode as integer.");
7947
7948 static char *fb_mode_str;
7949 -module_param(fb_mode_str, charp, 444);
7950 +module_param(fb_mode_str, charp, 0444);
7951 MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
7952
7953 /*
7954 @@ -46,7 +46,7 @@ MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
7955 * 0b010 Display 1
7956 */
7957 static int fb_displays = CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1;
7958 -module_param(fb_displays, int, 444);
7959 +module_param(fb_displays, int, 0444);
7960 MODULE_PARM_DESC(fb_displays, "Bit mode, which displays are used");
7961
7962 struct carmine_hw {
7963 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
7964 index 5aac00e..ad93629 100644
7965 --- a/drivers/video/fbmem.c
7966 +++ b/drivers/video/fbmem.c
7967 @@ -1738,8 +1738,6 @@ void fb_set_suspend(struct fb_info *info, int state)
7968 {
7969 struct fb_event event;
7970
7971 - if (!lock_fb_info(info))
7972 - return;
7973 event.info = info;
7974 if (state) {
7975 fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
7976 @@ -1748,7 +1746,6 @@ void fb_set_suspend(struct fb_info *info, int state)
7977 info->state = FBINFO_STATE_RUNNING;
7978 fb_notifier_call_chain(FB_EVENT_RESUME, &event);
7979 }
7980 - unlock_fb_info(info);
7981 }
7982
7983 /**
7984 diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
7985 index 04251ce..67afa9c 100644
7986 --- a/drivers/video/fbsysfs.c
7987 +++ b/drivers/video/fbsysfs.c
7988 @@ -399,9 +399,12 @@ static ssize_t store_fbstate(struct device *device,
7989
7990 state = simple_strtoul(buf, &last, 0);
7991
7992 + if (!lock_fb_info(fb_info))
7993 + return -ENODEV;
7994 console_lock();
7995 fb_set_suspend(fb_info, (int)state);
7996 console_unlock();
7997 + unlock_fb_info(fb_info);
7998
7999 return count;
8000 }
8001 diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
8002 index 7d54e2c..647ba98 100644
8003 --- a/drivers/video/sh_mobile_hdmi.c
8004 +++ b/drivers/video/sh_mobile_hdmi.c
8005 @@ -1111,6 +1111,7 @@ static long sh_hdmi_clk_configure(struct sh_hdmi *hdmi, unsigned long hdmi_rate,
8006 static void sh_hdmi_edid_work_fn(struct work_struct *work)
8007 {
8008 struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work);
8009 + struct fb_info *info;
8010 struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data;
8011 struct sh_mobile_lcdc_chan *ch;
8012 int ret;
8013 @@ -1123,8 +1124,9 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
8014
8015 mutex_lock(&hdmi->mutex);
8016
8017 + info = hdmi->info;
8018 +
8019 if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
8020 - struct fb_info *info = hdmi->info;
8021 unsigned long parent_rate = 0, hdmi_rate;
8022
8023 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
8024 @@ -1148,42 +1150,45 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
8025
8026 ch = info->par;
8027
8028 - console_lock();
8029 + if (lock_fb_info(info)) {
8030 + console_lock();
8031
8032 - /* HDMI plug in */
8033 - if (!sh_hdmi_must_reconfigure(hdmi) &&
8034 - info->state == FBINFO_STATE_RUNNING) {
8035 - /*
8036 - * First activation with the default monitor - just turn
8037 - * on, if we run a resume here, the logo disappears
8038 - */
8039 - if (lock_fb_info(info)) {
8040 + /* HDMI plug in */
8041 + if (!sh_hdmi_must_reconfigure(hdmi) &&
8042 + info->state == FBINFO_STATE_RUNNING) {
8043 + /*
8044 + * First activation with the default monitor - just turn
8045 + * on, if we run a resume here, the logo disappears
8046 + */
8047 info->var.width = hdmi->var.width;
8048 info->var.height = hdmi->var.height;
8049 sh_hdmi_display_on(hdmi, info);
8050 - unlock_fb_info(info);
8051 + } else {
8052 + /* New monitor or have to wake up */
8053 + fb_set_suspend(info, 0);
8054 }
8055 - } else {
8056 - /* New monitor or have to wake up */
8057 - fb_set_suspend(info, 0);
8058 - }
8059
8060 - console_unlock();
8061 + console_unlock();
8062 + unlock_fb_info(info);
8063 + }
8064 } else {
8065 ret = 0;
8066 - if (!hdmi->info)
8067 + if (!info)
8068 goto out;
8069
8070 hdmi->monspec.modedb_len = 0;
8071 fb_destroy_modedb(hdmi->monspec.modedb);
8072 hdmi->monspec.modedb = NULL;
8073
8074 - console_lock();
8075 + if (lock_fb_info(info)) {
8076 + console_lock();
8077
8078 - /* HDMI disconnect */
8079 - fb_set_suspend(hdmi->info, 1);
8080 + /* HDMI disconnect */
8081 + fb_set_suspend(info, 1);
8082
8083 - console_unlock();
8084 + console_unlock();
8085 + unlock_fb_info(info);
8086 + }
8087 }
8088
8089 out:
8090 diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
8091 index ae35cfd..0138845 100644
8092 --- a/drivers/video/via/via_modesetting.h
8093 +++ b/drivers/video/via/via_modesetting.h
8094 @@ -28,6 +28,11 @@
8095
8096 #include <linux/types.h>
8097
8098 +
8099 +#define VIA_PITCH_SIZE (1<<3)
8100 +#define VIA_PITCH_MAX 0x3FF8
8101 +
8102 +
8103 void via_set_primary_address(u32 addr);
8104 void via_set_secondary_address(u32 addr);
8105 void via_set_primary_pitch(u32 pitch);
8106 diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
8107 index 53aa443..09fa57c 100644
8108 --- a/drivers/video/via/viafbdev.c
8109 +++ b/drivers/video/via/viafbdev.c
8110 @@ -151,7 +151,8 @@ static void viafb_update_fix(struct fb_info *info)
8111
8112 info->fix.visual =
8113 bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
8114 - info->fix.line_length = (info->var.xres_virtual * bpp / 8 + 7) & ~7;
8115 + info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8,
8116 + VIA_PITCH_SIZE);
8117 }
8118
8119 static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix,
8120 @@ -238,8 +239,12 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
8121 depth = 24;
8122
8123 viafb_fill_var_color_info(var, depth);
8124 - line = (var->xres_virtual * var->bits_per_pixel / 8 + 7) & ~7;
8125 - if (line * var->yres_virtual > ppar->memsize)
8126 + if (var->xres_virtual < var->xres)
8127 + var->xres_virtual = var->xres;
8128 +
8129 + line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8,
8130 + VIA_PITCH_SIZE);
8131 + if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize)
8132 return -EINVAL;
8133
8134 /* Based on var passed in to calculate the refresh,
8135 @@ -348,8 +353,9 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
8136 struct fb_info *info)
8137 {
8138 struct viafb_par *viapar = info->par;
8139 - u32 vram_addr = (var->yoffset * var->xres_virtual + var->xoffset)
8140 - * (var->bits_per_pixel / 8) + viapar->vram_addr;
8141 + u32 vram_addr = viapar->vram_addr
8142 + + var->yoffset * info->fix.line_length
8143 + + var->xoffset * info->var.bits_per_pixel / 8;
8144
8145 DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
8146 if (!viafb_dual_fb) {
8147 diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
8148 index 274c8f3..505b17d 100644
8149 --- a/drivers/w1/slaves/w1_ds2780.c
8150 +++ b/drivers/w1/slaves/w1_ds2780.c
8151 @@ -26,20 +26,14 @@
8152 #include "../w1_family.h"
8153 #include "w1_ds2780.h"
8154
8155 -int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
8156 - int io)
8157 +static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
8158 + size_t count, int io)
8159 {
8160 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
8161
8162 - if (!dev)
8163 - return -ENODEV;
8164 + if (addr > DS2780_DATA_SIZE || addr < 0)
8165 + return 0;
8166
8167 - mutex_lock(&sl->master->mutex);
8168 -
8169 - if (addr > DS2780_DATA_SIZE || addr < 0) {
8170 - count = 0;
8171 - goto out;
8172 - }
8173 count = min_t(int, count, DS2780_DATA_SIZE - addr);
8174
8175 if (w1_reset_select_slave(sl) == 0) {
8176 @@ -47,7 +41,6 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
8177 w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
8178 w1_write_8(sl->master, addr);
8179 w1_write_block(sl->master, buf, count);
8180 - /* XXX w1_write_block returns void, not n_written */
8181 } else {
8182 w1_write_8(sl->master, W1_DS2780_READ_DATA);
8183 w1_write_8(sl->master, addr);
8184 @@ -55,13 +48,42 @@ int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
8185 }
8186 }
8187
8188 -out:
8189 + return count;
8190 +}
8191 +
8192 +int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
8193 + int io)
8194 +{
8195 + struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
8196 + int ret;
8197 +
8198 + if (!dev)
8199 + return -ENODEV;
8200 +
8201 + mutex_lock(&sl->master->mutex);
8202 +
8203 + ret = w1_ds2780_do_io(dev, buf, addr, count, io);
8204 +
8205 mutex_unlock(&sl->master->mutex);
8206
8207 - return count;
8208 + return ret;
8209 }
8210 EXPORT_SYMBOL(w1_ds2780_io);
8211
8212 +int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr, size_t count,
8213 + int io)
8214 +{
8215 + int ret;
8216 +
8217 + if (!dev)
8218 + return -ENODEV;
8219 +
8220 + ret = w1_ds2780_do_io(dev, buf, addr, count, io);
8221 +
8222 + return ret;
8223 +}
8224 +EXPORT_SYMBOL(w1_ds2780_io_nolock);
8225 +
8226 int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
8227 {
8228 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
8229 diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h
8230 index a1fba79..7373793 100644
8231 --- a/drivers/w1/slaves/w1_ds2780.h
8232 +++ b/drivers/w1/slaves/w1_ds2780.h
8233 @@ -124,6 +124,8 @@
8234
8235 extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
8236 int io);
8237 +extern int w1_ds2780_io_nolock(struct device *dev, char *buf, int addr,
8238 + size_t count, int io);
8239 extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
8240
8241 #endif /* !_W1_DS2780_H */
8242 diff --git a/drivers/xen/events.c b/drivers/xen/events.c
8243 index 7523719..44490de 100644
8244 --- a/drivers/xen/events.c
8245 +++ b/drivers/xen/events.c
8246 @@ -1021,7 +1021,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
8247 if (irq < 0)
8248 return irq;
8249
8250 - irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
8251 + irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
8252 retval = request_irq(irq, handler, irqflags, devname, dev_id);
8253 if (retval != 0) {
8254 unbind_from_irq(irq);
8255 diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
8256 index 6e8c15a..84f317e 100644
8257 --- a/drivers/xen/swiotlb-xen.c
8258 +++ b/drivers/xen/swiotlb-xen.c
8259 @@ -278,9 +278,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
8260 /*
8261 * Ensure that the address returned is DMA'ble
8262 */
8263 - if (!dma_capable(dev, dev_addr, size))
8264 - panic("map_single: bounce buffer is not DMA'ble");
8265 -
8266 + if (!dma_capable(dev, dev_addr, size)) {
8267 + swiotlb_tbl_unmap_single(dev, map, size, dir);
8268 + dev_addr = 0;
8269 + }
8270 return dev_addr;
8271 }
8272 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
8273 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
8274 index dd0fdfc..21ac5ee 100644
8275 --- a/fs/binfmt_elf.c
8276 +++ b/fs/binfmt_elf.c
8277 @@ -795,7 +795,16 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8278 * might try to exec. This is because the brk will
8279 * follow the loader, and is not movable. */
8280 #if defined(CONFIG_X86) || defined(CONFIG_ARM)
8281 - load_bias = 0;
8282 + /* Memory randomization might have been switched off
8283 + * in runtime via sysctl.
8284 + * If that is the case, retain the original non-zero
8285 + * load_bias value in order to establish proper
8286 + * non-randomized mappings.
8287 + */
8288 + if (current->flags & PF_RANDOMIZE)
8289 + load_bias = 0;
8290 + else
8291 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
8292 #else
8293 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
8294 #endif
8295 diff --git a/fs/block_dev.c b/fs/block_dev.c
8296 index 95f786e..1c44b8d 100644
8297 --- a/fs/block_dev.c
8298 +++ b/fs/block_dev.c
8299 @@ -1085,6 +1085,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
8300 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
8301 {
8302 struct gendisk *disk;
8303 + struct module *owner;
8304 int ret;
8305 int partno;
8306 int perm = 0;
8307 @@ -1110,6 +1111,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
8308 disk = get_gendisk(bdev->bd_dev, &partno);
8309 if (!disk)
8310 goto out;
8311 + owner = disk->fops->owner;
8312
8313 disk_block_events(disk);
8314 mutex_lock_nested(&bdev->bd_mutex, for_part);
8315 @@ -1137,8 +1139,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
8316 bdev->bd_disk = NULL;
8317 mutex_unlock(&bdev->bd_mutex);
8318 disk_unblock_events(disk);
8319 - module_put(disk->fops->owner);
8320 put_disk(disk);
8321 + module_put(owner);
8322 goto restart;
8323 }
8324 }
8325 @@ -1194,8 +1196,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
8326 goto out_unlock_bdev;
8327 }
8328 /* only one opener holds refs to the module and disk */
8329 - module_put(disk->fops->owner);
8330 put_disk(disk);
8331 + module_put(owner);
8332 }
8333 bdev->bd_openers++;
8334 if (for_part)
8335 @@ -1215,8 +1217,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
8336 out_unlock_bdev:
8337 mutex_unlock(&bdev->bd_mutex);
8338 disk_unblock_events(disk);
8339 - module_put(disk->fops->owner);
8340 put_disk(disk);
8341 + module_put(owner);
8342 out:
8343 bdput(bdev);
8344
8345 @@ -1442,14 +1444,15 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
8346 if (!bdev->bd_openers) {
8347 struct module *owner = disk->fops->owner;
8348
8349 - put_disk(disk);
8350 - module_put(owner);
8351 disk_put_part(bdev->bd_part);
8352 bdev->bd_part = NULL;
8353 bdev->bd_disk = NULL;
8354 if (bdev != bdev->bd_contains)
8355 victim = bdev->bd_contains;
8356 bdev->bd_contains = NULL;
8357 +
8358 + put_disk(disk);
8359 + module_put(owner);
8360 }
8361 mutex_unlock(&bdev->bd_mutex);
8362 bdput(bdev);
8363 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8364 index 71beb02..28bba57 100644
8365 --- a/fs/cifs/connect.c
8366 +++ b/fs/cifs/connect.c
8367 @@ -2807,10 +2807,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
8368
8369 /*
8370 * When the server doesn't allow large posix writes, only allow a wsize of
8371 - * 128k minus the size of the WRITE_AND_X header. That allows for a write up
8372 + * 2^17-1 minus the size of the WRITE_AND_X header. That allows for a write up
8373 * to the maximum size described by RFC1002.
8374 */
8375 -#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
8376 +#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
8377
8378 /*
8379 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
8380 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
8381 index a7b2dcd..745e5cd 100644
8382 --- a/fs/cifs/inode.c
8383 +++ b/fs/cifs/inode.c
8384 @@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp)
8385
8386 xid = GetXid();
8387 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
8388 - if (rc == -EOPNOTSUPP || rc == -EINVAL) {
8389 + switch (rc) {
8390 + case 0:
8391 + cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
8392 + break;
8393 + case -EREMOTE:
8394 + cifs_create_dfs_fattr(&fattr, inode->i_sb);
8395 + rc = 0;
8396 + break;
8397 + case -EOPNOTSUPP:
8398 + case -EINVAL:
8399 /*
8400 * FIXME: legacy server -- fall back to path-based call?
8401 * for now, just skip revalidating and mark inode for
8402 @@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp)
8403 */
8404 rc = 0;
8405 CIFS_I(inode)->time = 0;
8406 + default:
8407 goto cgfi_exit;
8408 - } else if (rc == -EREMOTE) {
8409 - cifs_create_dfs_fattr(&fattr, inode->i_sb);
8410 - rc = 0;
8411 - } else if (rc)
8412 - goto cgfi_exit;
8413 + }
8414
8415 /*
8416 * don't bother with SFU junk here -- just mark inode as needing
8417 * revalidation.
8418 */
8419 - cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
8420 fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
8421 fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
8422 cifs_fattr_to_inode(inode, &fattr);
8423 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
8424 index fe047d96..2d1744a 100644
8425 --- a/fs/eventpoll.c
8426 +++ b/fs/eventpoll.c
8427 @@ -70,6 +70,15 @@
8428 * simultaneous inserts (A into B and B into A) from racing and
8429 * constructing a cycle without either insert observing that it is
8430 * going to.
8431 + * It is necessary to acquire multiple "ep->mtx"es at once in the
8432 + * case when one epoll fd is added to another. In this case, we
8433 + * always acquire the locks in the order of nesting (i.e. after
8434 + * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
8435 + * before e2->mtx). Since we disallow cycles of epoll file
8436 + * descriptors, this ensures that the mutexes are well-ordered. In
8437 + * order to communicate this nesting to lockdep, when walking a tree
8438 + * of epoll file descriptors, we use the current recursion depth as
8439 + * the lockdep subkey.
8440 * It is possible to drop the "ep->mtx" and to use the global
8441 * mutex "epmutex" (together with "ep->lock") to have it working,
8442 * but having "ep->mtx" will make the interface more scalable.
8443 @@ -464,13 +473,15 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
8444 * @ep: Pointer to the epoll private data structure.
8445 * @sproc: Pointer to the scan callback.
8446 * @priv: Private opaque data passed to the @sproc callback.
8447 + * @depth: The current depth of recursive f_op->poll calls.
8448 *
8449 * Returns: The same integer error code returned by the @sproc callback.
8450 */
8451 static int ep_scan_ready_list(struct eventpoll *ep,
8452 int (*sproc)(struct eventpoll *,
8453 struct list_head *, void *),
8454 - void *priv)
8455 + void *priv,
8456 + int depth)
8457 {
8458 int error, pwake = 0;
8459 unsigned long flags;
8460 @@ -481,7 +492,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
8461 * We need to lock this because we could be hit by
8462 * eventpoll_release_file() and epoll_ctl().
8463 */
8464 - mutex_lock(&ep->mtx);
8465 + mutex_lock_nested(&ep->mtx, depth);
8466
8467 /*
8468 * Steal the ready list, and re-init the original one to the
8469 @@ -670,7 +681,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
8470
8471 static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
8472 {
8473 - return ep_scan_ready_list(priv, ep_read_events_proc, NULL);
8474 + return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
8475 }
8476
8477 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
8478 @@ -737,7 +748,7 @@ void eventpoll_release_file(struct file *file)
8479
8480 ep = epi->ep;
8481 list_del_init(&epi->fllink);
8482 - mutex_lock(&ep->mtx);
8483 + mutex_lock_nested(&ep->mtx, 0);
8484 ep_remove(ep, epi);
8485 mutex_unlock(&ep->mtx);
8486 }
8487 @@ -1134,7 +1145,7 @@ static int ep_send_events(struct eventpoll *ep,
8488 esed.maxevents = maxevents;
8489 esed.events = events;
8490
8491 - return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
8492 + return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
8493 }
8494
8495 static inline struct timespec ep_set_mstimeout(long ms)
8496 @@ -1267,7 +1278,7 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
8497 struct rb_node *rbp;
8498 struct epitem *epi;
8499
8500 - mutex_lock(&ep->mtx);
8501 + mutex_lock_nested(&ep->mtx, call_nests + 1);
8502 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
8503 epi = rb_entry(rbp, struct epitem, rbn);
8504 if (unlikely(is_file_epoll(epi->ffd.file))) {
8505 @@ -1409,7 +1420,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
8506 }
8507
8508
8509 - mutex_lock(&ep->mtx);
8510 + mutex_lock_nested(&ep->mtx, 0);
8511
8512 /*
8513 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
8514 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
8515 index b7d7bd0..5c38120 100644
8516 --- a/fs/ext4/ext4.h
8517 +++ b/fs/ext4/ext4.h
8518 @@ -358,8 +358,7 @@ struct flex_groups {
8519
8520 /* Flags that should be inherited by new inodes from their parent. */
8521 #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
8522 - EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
8523 - EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
8524 + EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
8525 EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
8526 EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
8527
8528 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8529 index 986e238..89c47f4 100644
8530 --- a/fs/ext4/inode.c
8531 +++ b/fs/ext4/inode.c
8532 @@ -4416,6 +4416,7 @@ retry_alloc:
8533 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
8534 unlock_page(page);
8535 ret = VM_FAULT_SIGBUS;
8536 + ext4_journal_stop(handle);
8537 goto out;
8538 }
8539 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
8540 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
8541 index 1c924fa..50c7294 100644
8542 --- a/fs/ext4/namei.c
8543 +++ b/fs/ext4/namei.c
8544 @@ -1586,7 +1586,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
8545 dxtrace(dx_show_index("node", frames[1].entries));
8546 dxtrace(dx_show_index("node",
8547 ((struct dx_node *) bh2->b_data)->entries));
8548 - err = ext4_handle_dirty_metadata(handle, inode, bh2);
8549 + err = ext4_handle_dirty_metadata(handle, dir, bh2);
8550 if (err)
8551 goto journal_error;
8552 brelse (bh2);
8553 @@ -1612,7 +1612,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
8554 if (err)
8555 goto journal_error;
8556 }
8557 - err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
8558 + err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh);
8559 if (err) {
8560 ext4_std_error(inode->i_sb, err);
8561 goto cleanup;
8562 @@ -1863,7 +1863,7 @@ retry:
8563 ext4_set_de_type(dir->i_sb, de, S_IFDIR);
8564 inode->i_nlink = 2;
8565 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
8566 - err = ext4_handle_dirty_metadata(handle, dir, dir_block);
8567 + err = ext4_handle_dirty_metadata(handle, inode, dir_block);
8568 if (err)
8569 goto out_clear_inode;
8570 err = ext4_mark_inode_dirty(handle, inode);
8571 @@ -2530,7 +2530,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
8572 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
8573 cpu_to_le32(new_dir->i_ino);
8574 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
8575 - retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
8576 + retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh);
8577 if (retval) {
8578 ext4_std_error(old_dir->i_sb, retval);
8579 goto end_rename;
8580 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
8581 index c757adc..19fe4e3 100644
8582 --- a/fs/ext4/xattr.c
8583 +++ b/fs/ext4/xattr.c
8584 @@ -820,8 +820,14 @@ inserted:
8585 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
8586 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
8587
8588 + /*
8589 + * take i_data_sem because we will test
8590 + * i_delalloc_reserved_flag in ext4_mb_new_blocks
8591 + */
8592 + down_read((&EXT4_I(inode)->i_data_sem));
8593 block = ext4_new_meta_blocks(handle, inode, goal, 0,
8594 NULL, &error);
8595 + up_read((&EXT4_I(inode)->i_data_sem));
8596 if (error)
8597 goto cleanup;
8598
8599 diff --git a/fs/namei.c b/fs/namei.c
8600 index 0b3138d..3d15072 100644
8601 --- a/fs/namei.c
8602 +++ b/fs/namei.c
8603 @@ -137,7 +137,7 @@ static int do_getname(const char __user *filename, char *page)
8604 return retval;
8605 }
8606
8607 -static char *getname_flags(const char __user * filename, int flags)
8608 +static char *getname_flags(const char __user *filename, int flags, int *empty)
8609 {
8610 char *tmp, *result;
8611
8612 @@ -148,6 +148,8 @@ static char *getname_flags(const char __user * filename, int flags)
8613
8614 result = tmp;
8615 if (retval < 0) {
8616 + if (retval == -ENOENT && empty)
8617 + *empty = 1;
8618 if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
8619 __putname(tmp);
8620 result = ERR_PTR(retval);
8621 @@ -160,7 +162,7 @@ static char *getname_flags(const char __user * filename, int flags)
8622
8623 char *getname(const char __user * filename)
8624 {
8625 - return getname_flags(filename, 0);
8626 + return getname_flags(filename, 0, 0);
8627 }
8628
8629 #ifdef CONFIG_AUDITSYSCALL
8630 @@ -850,7 +852,7 @@ static int follow_managed(struct path *path, unsigned flags)
8631 mntput(path->mnt);
8632 if (ret == -EISDIR)
8633 ret = 0;
8634 - return ret;
8635 + return ret < 0 ? ret : need_mntput;
8636 }
8637
8638 int follow_down_one(struct path *path)
8639 @@ -898,6 +900,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
8640 break;
8641 path->mnt = mounted;
8642 path->dentry = mounted->mnt_root;
8643 + nd->flags |= LOOKUP_JUMPED;
8644 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
8645 /*
8646 * Update the inode too. We don't need to re-check the
8647 @@ -1211,6 +1214,8 @@ retry:
8648 path_put_conditional(path, nd);
8649 return err;
8650 }
8651 + if (err)
8652 + nd->flags |= LOOKUP_JUMPED;
8653 *inode = path->dentry->d_inode;
8654 return 0;
8655 }
8656 @@ -1798,11 +1803,11 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
8657 return __lookup_hash(&this, base, NULL);
8658 }
8659
8660 -int user_path_at(int dfd, const char __user *name, unsigned flags,
8661 - struct path *path)
8662 +int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
8663 + struct path *path, int *empty)
8664 {
8665 struct nameidata nd;
8666 - char *tmp = getname_flags(name, flags);
8667 + char *tmp = getname_flags(name, flags, empty);
8668 int err = PTR_ERR(tmp);
8669 if (!IS_ERR(tmp)) {
8670
8671 @@ -1816,6 +1821,12 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
8672 return err;
8673 }
8674
8675 +int user_path_at(int dfd, const char __user *name, unsigned flags,
8676 + struct path *path)
8677 +{
8678 + return user_path_at_empty(dfd, name, flags, path, 0);
8679 +}
8680 +
8681 static int user_path_parent(int dfd, const char __user *path,
8682 struct nameidata *nd, char **name)
8683 {
8684 @@ -2141,6 +2152,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
8685 }
8686
8687 /* create side of things */
8688 + /*
8689 + * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
8690 + * cleared when we got to the last component we are about to look up
8691 + */
8692 error = complete_walk(nd);
8693 if (error)
8694 return ERR_PTR(error);
8695 @@ -2209,6 +2224,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
8696 if (error < 0)
8697 goto exit_dput;
8698
8699 + if (error)
8700 + nd->flags |= LOOKUP_JUMPED;
8701 +
8702 error = -ENOENT;
8703 if (!path->dentry->d_inode)
8704 goto exit_dput;
8705 @@ -2218,6 +2236,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
8706
8707 path_to_nameidata(path, nd);
8708 nd->inode = path->dentry->d_inode;
8709 + /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
8710 + error = complete_walk(nd);
8711 + if (error)
8712 + goto exit;
8713 error = -EISDIR;
8714 if (S_ISDIR(nd->inode->i_mode))
8715 goto exit;
8716 diff --git a/fs/namespace.c b/fs/namespace.c
8717 index b4febb2..e5e1c7d 100644
8718 --- a/fs/namespace.c
8719 +++ b/fs/namespace.c
8720 @@ -1109,6 +1109,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
8721
8722 /* device */
8723 if (mnt->mnt_sb->s_op->show_devname) {
8724 + seq_puts(m, "device ");
8725 err = mnt->mnt_sb->s_op->show_devname(m, mnt);
8726 } else {
8727 if (mnt->mnt_devname) {
8728 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
8729 index 9561c8f..281ae95 100644
8730 --- a/fs/nfs/blocklayout/blocklayout.c
8731 +++ b/fs/nfs/blocklayout/blocklayout.c
8732 @@ -176,17 +176,6 @@ retry:
8733 return bio;
8734 }
8735
8736 -static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
8737 -{
8738 - if (lseg->pls_range.iomode == IOMODE_RW) {
8739 - dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
8740 - set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
8741 - } else {
8742 - dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
8743 - set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
8744 - }
8745 -}
8746 -
8747 /* This is basically copied from mpage_end_io_read */
8748 static void bl_end_io_read(struct bio *bio, int err)
8749 {
8750 @@ -206,7 +195,7 @@ static void bl_end_io_read(struct bio *bio, int err)
8751 if (!uptodate) {
8752 if (!rdata->pnfs_error)
8753 rdata->pnfs_error = -EIO;
8754 - bl_set_lo_fail(rdata->lseg);
8755 + pnfs_set_lo_fail(rdata->lseg);
8756 }
8757 bio_put(bio);
8758 put_parallel(par);
8759 @@ -303,6 +292,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
8760 bl_end_io_read, par);
8761 if (IS_ERR(bio)) {
8762 rdata->pnfs_error = PTR_ERR(bio);
8763 + bio = NULL;
8764 goto out;
8765 }
8766 }
8767 @@ -370,7 +360,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
8768 if (!uptodate) {
8769 if (!wdata->pnfs_error)
8770 wdata->pnfs_error = -EIO;
8771 - bl_set_lo_fail(wdata->lseg);
8772 + pnfs_set_lo_fail(wdata->lseg);
8773 }
8774 bio_put(bio);
8775 put_parallel(par);
8776 @@ -386,7 +376,7 @@ static void bl_end_io_write(struct bio *bio, int err)
8777 if (!uptodate) {
8778 if (!wdata->pnfs_error)
8779 wdata->pnfs_error = -EIO;
8780 - bl_set_lo_fail(wdata->lseg);
8781 + pnfs_set_lo_fail(wdata->lseg);
8782 }
8783 bio_put(bio);
8784 put_parallel(par);
8785 @@ -543,6 +533,11 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
8786 fill_invalid_ext:
8787 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
8788 for (;npg_zero > 0; npg_zero--) {
8789 + if (bl_is_sector_init(be->be_inval, isect)) {
8790 + dprintk("isect %llu already init\n",
8791 + (unsigned long long)isect);
8792 + goto next_page;
8793 + }
8794 /* page ref released in bl_end_io_write_zero */
8795 index = isect >> PAGE_CACHE_SECTOR_SHIFT;
8796 dprintk("%s zero %dth page: index %lu isect %llu\n",
8797 @@ -562,8 +557,7 @@ fill_invalid_ext:
8798 * PageUptodate: It was read before
8799 * sector_initialized: already written out
8800 */
8801 - if (PageDirty(page) || PageWriteback(page) ||
8802 - bl_is_sector_init(be->be_inval, isect)) {
8803 + if (PageDirty(page) || PageWriteback(page)) {
8804 print_page(page);
8805 unlock_page(page);
8806 page_cache_release(page);
8807 @@ -592,6 +586,7 @@ fill_invalid_ext:
8808 bl_end_io_write_zero, par);
8809 if (IS_ERR(bio)) {
8810 wdata->pnfs_error = PTR_ERR(bio);
8811 + bio = NULL;
8812 goto out;
8813 }
8814 /* FIXME: This should be done in bi_end_io */
8815 @@ -640,6 +635,7 @@ next_page:
8816 bl_end_io_write, par);
8817 if (IS_ERR(bio)) {
8818 wdata->pnfs_error = PTR_ERR(bio);
8819 + bio = NULL;
8820 goto out;
8821 }
8822 isect += PAGE_CACHE_SECTORS;
8823 @@ -805,7 +801,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
8824 struct nfs4_deviceid *d_id)
8825 {
8826 struct pnfs_device *dev;
8827 - struct pnfs_block_dev *rv = NULL;
8828 + struct pnfs_block_dev *rv;
8829 u32 max_resp_sz;
8830 int max_pages;
8831 struct page **pages = NULL;
8832 @@ -823,18 +819,20 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
8833 dev = kmalloc(sizeof(*dev), GFP_NOFS);
8834 if (!dev) {
8835 dprintk("%s kmalloc failed\n", __func__);
8836 - return NULL;
8837 + return ERR_PTR(-ENOMEM);
8838 }
8839
8840 pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
8841 if (pages == NULL) {
8842 kfree(dev);
8843 - return NULL;
8844 + return ERR_PTR(-ENOMEM);
8845 }
8846 for (i = 0; i < max_pages; i++) {
8847 pages[i] = alloc_page(GFP_NOFS);
8848 - if (!pages[i])
8849 + if (!pages[i]) {
8850 + rv = ERR_PTR(-ENOMEM);
8851 goto out_free;
8852 + }
8853 }
8854
8855 memcpy(&dev->dev_id, d_id, sizeof(*d_id));
8856 @@ -847,8 +845,10 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
8857 dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
8858 rc = nfs4_proc_getdeviceinfo(server, dev);
8859 dprintk("%s getdevice info returns %d\n", __func__, rc);
8860 - if (rc)
8861 + if (rc) {
8862 + rv = ERR_PTR(rc);
8863 goto out_free;
8864 + }
8865
8866 rv = nfs4_blk_decode_device(server, dev);
8867 out_free:
8868 @@ -866,7 +866,7 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
8869 struct pnfs_devicelist *dlist = NULL;
8870 struct pnfs_block_dev *bdev;
8871 LIST_HEAD(block_disklist);
8872 - int status = 0, i;
8873 + int status, i;
8874
8875 dprintk("%s enter\n", __func__);
8876
8877 @@ -898,8 +898,8 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
8878 for (i = 0; i < dlist->num_devs; i++) {
8879 bdev = nfs4_blk_get_deviceinfo(server, fh,
8880 &dlist->dev_id[i]);
8881 - if (!bdev) {
8882 - status = -ENODEV;
8883 + if (IS_ERR(bdev)) {
8884 + status = PTR_ERR(bdev);
8885 goto out_error;
8886 }
8887 spin_lock(&b_mt_id->bm_lock);
8888 @@ -960,7 +960,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
8889 };
8890
8891 static const struct rpc_pipe_ops bl_upcall_ops = {
8892 - .upcall = bl_pipe_upcall,
8893 + .upcall = rpc_pipe_generic_upcall,
8894 .downcall = bl_pipe_downcall,
8895 .destroy_msg = bl_pipe_destroy_msg,
8896 };
8897 @@ -989,17 +989,20 @@ static int __init nfs4blocklayout_init(void)
8898 mnt,
8899 NFS_PIPE_DIRNAME, 0, &path);
8900 if (ret)
8901 - goto out_remove;
8902 + goto out_putrpc;
8903
8904 bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
8905 &bl_upcall_ops, 0);
8906 + path_put(&path);
8907 if (IS_ERR(bl_device_pipe)) {
8908 ret = PTR_ERR(bl_device_pipe);
8909 - goto out_remove;
8910 + goto out_putrpc;
8911 }
8912 out:
8913 return ret;
8914
8915 +out_putrpc:
8916 + rpc_put_mount();
8917 out_remove:
8918 pnfs_unregister_layoutdriver(&blocklayout_type);
8919 return ret;
8920 @@ -1012,6 +1015,7 @@ static void __exit nfs4blocklayout_exit(void)
8921
8922 pnfs_unregister_layoutdriver(&blocklayout_type);
8923 rpc_unlink(bl_device_pipe);
8924 + rpc_put_mount();
8925 }
8926
8927 MODULE_ALIAS("nfs-layouttype4-3");
8928 diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
8929 index f27d827..42acf7e 100644
8930 --- a/fs/nfs/blocklayout/blocklayout.h
8931 +++ b/fs/nfs/blocklayout/blocklayout.h
8932 @@ -150,7 +150,7 @@ BLK_LSEG2EXT(struct pnfs_layout_segment *lseg)
8933 }
8934
8935 struct bl_dev_msg {
8936 - int status;
8937 + int32_t status;
8938 uint32_t major, minor;
8939 };
8940
8941 @@ -169,8 +169,6 @@ extern wait_queue_head_t bl_wq;
8942 #define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
8943
8944 /* blocklayoutdev.c */
8945 -ssize_t bl_pipe_upcall(struct file *, struct rpc_pipe_msg *,
8946 - char __user *, size_t);
8947 ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
8948 void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
8949 struct block_device *nfs4_blkdev_get(dev_t dev);
8950 diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
8951 index a83b393..d08ba91 100644
8952 --- a/fs/nfs/blocklayout/blocklayoutdev.c
8953 +++ b/fs/nfs/blocklayout/blocklayoutdev.c
8954 @@ -79,28 +79,6 @@ int nfs4_blkdev_put(struct block_device *bdev)
8955 return blkdev_put(bdev, FMODE_READ);
8956 }
8957
8958 -/*
8959 - * Shouldn't there be a rpc_generic_upcall() to do this for us?
8960 - */
8961 -ssize_t bl_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
8962 - char __user *dst, size_t buflen)
8963 -{
8964 - char *data = (char *)msg->data + msg->copied;
8965 - size_t mlen = min(msg->len - msg->copied, buflen);
8966 - unsigned long left;
8967 -
8968 - left = copy_to_user(dst, data, mlen);
8969 - if (left == mlen) {
8970 - msg->errno = -EFAULT;
8971 - return -EFAULT;
8972 - }
8973 -
8974 - mlen -= left;
8975 - msg->copied += mlen;
8976 - msg->errno = 0;
8977 - return mlen;
8978 -}
8979 -
8980 static struct bl_dev_msg bl_mount_reply;
8981
8982 ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
8983 @@ -131,7 +109,7 @@ struct pnfs_block_dev *
8984 nfs4_blk_decode_device(struct nfs_server *server,
8985 struct pnfs_device *dev)
8986 {
8987 - struct pnfs_block_dev *rv = NULL;
8988 + struct pnfs_block_dev *rv;
8989 struct block_device *bd = NULL;
8990 struct rpc_pipe_msg msg;
8991 struct bl_msg_hdr bl_msg = {
8992 @@ -141,7 +119,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
8993 uint8_t *dataptr;
8994 DECLARE_WAITQUEUE(wq, current);
8995 struct bl_dev_msg *reply = &bl_mount_reply;
8996 - int offset, len, i;
8997 + int offset, len, i, rc;
8998
8999 dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
9000 dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data,
9001 @@ -168,8 +146,10 @@ nfs4_blk_decode_device(struct nfs_server *server,
9002
9003 dprintk("%s CALLING USERSPACE DAEMON\n", __func__);
9004 add_wait_queue(&bl_wq, &wq);
9005 - if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
9006 + rc = rpc_queue_upcall(bl_device_pipe->d_inode, &msg);
9007 + if (rc < 0) {
9008 remove_wait_queue(&bl_wq, &wq);
9009 + rv = ERR_PTR(rc);
9010 goto out;
9011 }
9012
9013 @@ -187,8 +167,9 @@ nfs4_blk_decode_device(struct nfs_server *server,
9014
9015 bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
9016 if (IS_ERR(bd)) {
9017 - dprintk("%s failed to open device : %ld\n",
9018 - __func__, PTR_ERR(bd));
9019 + rc = PTR_ERR(bd);
9020 + dprintk("%s failed to open device : %d\n", __func__, rc);
9021 + rv = ERR_PTR(rc);
9022 goto out;
9023 }
9024
9025 diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
9026 index f20801a..47d1c6f 100644
9027 --- a/fs/nfs/idmap.c
9028 +++ b/fs/nfs/idmap.c
9029 @@ -336,8 +336,6 @@ struct idmap {
9030 struct idmap_hashtable idmap_group_hash;
9031 };
9032
9033 -static ssize_t idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
9034 - char __user *, size_t);
9035 static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
9036 size_t);
9037 static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
9038 @@ -345,7 +343,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
9039 static unsigned int fnvhash32(const void *, size_t);
9040
9041 static const struct rpc_pipe_ops idmap_upcall_ops = {
9042 - .upcall = idmap_pipe_upcall,
9043 + .upcall = rpc_pipe_generic_upcall,
9044 .downcall = idmap_pipe_downcall,
9045 .destroy_msg = idmap_pipe_destroy_msg,
9046 };
9047 @@ -595,27 +593,6 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
9048 return ret;
9049 }
9050
9051 -/* RPC pipefs upcall/downcall routines */
9052 -static ssize_t
9053 -idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
9054 - char __user *dst, size_t buflen)
9055 -{
9056 - char *data = (char *)msg->data + msg->copied;
9057 - size_t mlen = min(msg->len, buflen);
9058 - unsigned long left;
9059 -
9060 - left = copy_to_user(dst, data, mlen);
9061 - if (left == mlen) {
9062 - msg->errno = -EFAULT;
9063 - return -EFAULT;
9064 - }
9065 -
9066 - mlen -= left;
9067 - msg->copied += mlen;
9068 - msg->errno = 0;
9069 - return mlen;
9070 -}
9071 -
9072 static ssize_t
9073 idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
9074 {
9075 diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
9076 index e8915d4..4c78c62 100644
9077 --- a/fs/nfs/nfs4filelayout.c
9078 +++ b/fs/nfs/nfs4filelayout.c
9079 @@ -77,19 +77,6 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
9080 BUG();
9081 }
9082
9083 -/* For data server errors we don't recover from */
9084 -static void
9085 -filelayout_set_lo_fail(struct pnfs_layout_segment *lseg)
9086 -{
9087 - if (lseg->pls_range.iomode == IOMODE_RW) {
9088 - dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
9089 - set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
9090 - } else {
9091 - dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
9092 - set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
9093 - }
9094 -}
9095 -
9096 static int filelayout_async_handle_error(struct rpc_task *task,
9097 struct nfs4_state *state,
9098 struct nfs_client *clp,
9099 @@ -145,7 +132,7 @@ static int filelayout_read_done_cb(struct rpc_task *task,
9100 dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
9101 __func__, data->ds_clp, data->ds_clp->cl_session);
9102 if (reset) {
9103 - filelayout_set_lo_fail(data->lseg);
9104 + pnfs_set_lo_fail(data->lseg);
9105 nfs4_reset_read(task, data);
9106 clp = NFS_SERVER(data->inode)->nfs_client;
9107 }
9108 @@ -221,7 +208,7 @@ static int filelayout_write_done_cb(struct rpc_task *task,
9109 dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
9110 __func__, data->ds_clp, data->ds_clp->cl_session);
9111 if (reset) {
9112 - filelayout_set_lo_fail(data->lseg);
9113 + pnfs_set_lo_fail(data->lseg);
9114 nfs4_reset_write(task, data);
9115 clp = NFS_SERVER(data->inode)->nfs_client;
9116 } else
9117 @@ -256,7 +243,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
9118 __func__, data->ds_clp, data->ds_clp->cl_session);
9119 if (reset) {
9120 prepare_to_resend_writes(data);
9121 - filelayout_set_lo_fail(data->lseg);
9122 + pnfs_set_lo_fail(data->lseg);
9123 } else
9124 nfs_restart_rpc(task, data->ds_clp);
9125 return -EAGAIN;
9126 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
9127 index e550e88..ee73d9a 100644
9128 --- a/fs/nfs/pnfs.c
9129 +++ b/fs/nfs/pnfs.c
9130 @@ -1168,23 +1168,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
9131 /*
9132 * Called by non rpc-based layout drivers
9133 */
9134 -int
9135 -pnfs_ld_write_done(struct nfs_write_data *data)
9136 +void pnfs_ld_write_done(struct nfs_write_data *data)
9137 {
9138 - int status;
9139 -
9140 - if (!data->pnfs_error) {
9141 + if (likely(!data->pnfs_error)) {
9142 pnfs_set_layoutcommit(data);
9143 data->mds_ops->rpc_call_done(&data->task, data);
9144 - data->mds_ops->rpc_release(data);
9145 - return 0;
9146 + } else {
9147 + put_lseg(data->lseg);
9148 + data->lseg = NULL;
9149 + dprintk("pnfs write error = %d\n", data->pnfs_error);
9150 }
9151 -
9152 - dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
9153 - data->pnfs_error);
9154 - status = nfs_initiate_write(data, NFS_CLIENT(data->inode),
9155 - data->mds_ops, NFS_FILE_SYNC);
9156 - return status ? : -EAGAIN;
9157 + data->mds_ops->rpc_release(data);
9158 }
9159 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
9160
9161 @@ -1268,23 +1262,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
9162 /*
9163 * Called by non rpc-based layout drivers
9164 */
9165 -int
9166 -pnfs_ld_read_done(struct nfs_read_data *data)
9167 +void pnfs_ld_read_done(struct nfs_read_data *data)
9168 {
9169 - int status;
9170 -
9171 - if (!data->pnfs_error) {
9172 + if (likely(!data->pnfs_error)) {
9173 __nfs4_read_done_cb(data);
9174 data->mds_ops->rpc_call_done(&data->task, data);
9175 - data->mds_ops->rpc_release(data);
9176 - return 0;
9177 + } else {
9178 + put_lseg(data->lseg);
9179 + data->lseg = NULL;
9180 + dprintk("pnfs write error = %d\n", data->pnfs_error);
9181 }
9182 -
9183 - dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
9184 - data->pnfs_error);
9185 - status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
9186 - data->mds_ops);
9187 - return status ? : -EAGAIN;
9188 + data->mds_ops->rpc_release(data);
9189 }
9190 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
9191
9192 @@ -1381,6 +1369,18 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
9193 }
9194 }
9195
9196 +void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
9197 +{
9198 + if (lseg->pls_range.iomode == IOMODE_RW) {
9199 + dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
9200 + set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
9201 + } else {
9202 + dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
9203 + set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
9204 + }
9205 +}
9206 +EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
9207 +
9208 void
9209 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
9210 {
9211 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
9212 index 01cbfd5..1509530 100644
9213 --- a/fs/nfs/pnfs.h
9214 +++ b/fs/nfs/pnfs.h
9215 @@ -178,6 +178,7 @@ int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
9216 void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
9217 int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
9218 bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
9219 +void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg);
9220 int pnfs_layout_process(struct nfs4_layoutget *lgp);
9221 void pnfs_free_lseg_list(struct list_head *tmp_list);
9222 void pnfs_destroy_layout(struct nfs_inode *);
9223 @@ -200,8 +201,8 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
9224 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
9225 int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
9226 int _pnfs_return_layout(struct inode *);
9227 -int pnfs_ld_write_done(struct nfs_write_data *);
9228 -int pnfs_ld_read_done(struct nfs_read_data *);
9229 +void pnfs_ld_write_done(struct nfs_write_data *);
9230 +void pnfs_ld_read_done(struct nfs_read_data *);
9231 struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
9232 struct nfs_open_context *ctx,
9233 loff_t pos,
9234 diff --git a/fs/nfs/read.c b/fs/nfs/read.c
9235 index 2171c04..bfc20b1 100644
9236 --- a/fs/nfs/read.c
9237 +++ b/fs/nfs/read.c
9238 @@ -541,13 +541,23 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
9239 static void nfs_readpage_release_full(void *calldata)
9240 {
9241 struct nfs_read_data *data = calldata;
9242 + struct nfs_pageio_descriptor pgio;
9243
9244 + if (data->pnfs_error) {
9245 + nfs_pageio_init_read_mds(&pgio, data->inode);
9246 + pgio.pg_recoalesce = 1;
9247 + }
9248 while (!list_empty(&data->pages)) {
9249 struct nfs_page *req = nfs_list_entry(data->pages.next);
9250
9251 nfs_list_remove_request(req);
9252 - nfs_readpage_release(req);
9253 + if (!data->pnfs_error)
9254 + nfs_readpage_release(req);
9255 + else
9256 + nfs_pageio_add_request(&pgio, req);
9257 }
9258 + if (data->pnfs_error)
9259 + nfs_pageio_complete(&pgio);
9260 nfs_readdata_release(calldata);
9261 }
9262
9263 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
9264 index c9bd2a6..106fd06 100644
9265 --- a/fs/nfs/write.c
9266 +++ b/fs/nfs/write.c
9267 @@ -428,7 +428,6 @@ static void
9268 nfs_mark_request_dirty(struct nfs_page *req)
9269 {
9270 __set_page_dirty_nobuffers(req->wb_page);
9271 - __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
9272 }
9273
9274 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
9275 @@ -762,6 +761,8 @@ int nfs_updatepage(struct file *file, struct page *page,
9276 status = nfs_writepage_setup(ctx, page, offset, count);
9277 if (status < 0)
9278 nfs_set_pageerror(page);
9279 + else
9280 + __set_page_dirty_nobuffers(page);
9281
9282 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
9283 status, (long long)i_size_read(inode));
9284 @@ -1165,7 +1166,13 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
9285 static void nfs_writeback_release_full(void *calldata)
9286 {
9287 struct nfs_write_data *data = calldata;
9288 - int status = data->task.tk_status;
9289 + int ret, status = data->task.tk_status;
9290 + struct nfs_pageio_descriptor pgio;
9291 +
9292 + if (data->pnfs_error) {
9293 + nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
9294 + pgio.pg_recoalesce = 1;
9295 + }
9296
9297 /* Update attributes as result of writeback. */
9298 while (!list_empty(&data->pages)) {
9299 @@ -1181,6 +1188,11 @@ static void nfs_writeback_release_full(void *calldata)
9300 req->wb_bytes,
9301 (long long)req_offset(req));
9302
9303 + if (data->pnfs_error) {
9304 + dprintk(", pnfs error = %d\n", data->pnfs_error);
9305 + goto next;
9306 + }
9307 +
9308 if (status < 0) {
9309 nfs_set_pageerror(page);
9310 nfs_context_set_write_error(req->wb_context, status);
9311 @@ -1200,7 +1212,19 @@ remove_request:
9312 next:
9313 nfs_clear_page_tag_locked(req);
9314 nfs_end_page_writeback(page);
9315 + if (data->pnfs_error) {
9316 + lock_page(page);
9317 + nfs_pageio_cond_complete(&pgio, page->index);
9318 + ret = nfs_page_async_flush(&pgio, page, 0);
9319 + if (ret) {
9320 + nfs_set_pageerror(page);
9321 + dprintk("rewrite to MDS error = %d\n", ret);
9322 + }
9323 + unlock_page(page);
9324 + }
9325 }
9326 + if (data->pnfs_error)
9327 + nfs_pageio_complete(&pgio);
9328 nfs_writedata_release(calldata);
9329 }
9330
9331 @@ -1553,6 +1577,10 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
9332 int flags = FLUSH_SYNC;
9333 int ret = 0;
9334
9335 + /* no commits means nothing needs to be done */
9336 + if (!nfsi->ncommit)
9337 + return ret;
9338 +
9339 if (wbc->sync_mode == WB_SYNC_NONE) {
9340 /* Don't commit yet if this is a non-blocking flush and there
9341 * are a lot of outstanding writes for this mapping.
9342 @@ -1686,34 +1714,20 @@ out_error:
9343 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
9344 struct page *page)
9345 {
9346 - struct nfs_page *req;
9347 - int ret;
9348 + /*
9349 + * If PagePrivate is set, then the page is currently associated with
9350 + * an in-progress read or write request. Don't try to migrate it.
9351 + *
9352 + * FIXME: we could do this in principle, but we'll need a way to ensure
9353 + * that we can safely release the inode reference while holding
9354 + * the page lock.
9355 + */
9356 + if (PagePrivate(page))
9357 + return -EBUSY;
9358
9359 nfs_fscache_release_page(page, GFP_KERNEL);
9360
9361 - req = nfs_find_and_lock_request(page, false);
9362 - ret = PTR_ERR(req);
9363 - if (IS_ERR(req))
9364 - goto out;
9365 -
9366 - ret = migrate_page(mapping, newpage, page);
9367 - if (!req)
9368 - goto out;
9369 - if (ret)
9370 - goto out_unlock;
9371 - page_cache_get(newpage);
9372 - spin_lock(&mapping->host->i_lock);
9373 - req->wb_page = newpage;
9374 - SetPagePrivate(newpage);
9375 - set_page_private(newpage, (unsigned long)req);
9376 - ClearPagePrivate(page);
9377 - set_page_private(page, 0);
9378 - spin_unlock(&mapping->host->i_lock);
9379 - page_cache_release(page);
9380 -out_unlock:
9381 - nfs_clear_page_tag_locked(req);
9382 -out:
9383 - return ret;
9384 + return migrate_page(mapping, newpage, page);
9385 }
9386 #endif
9387
9388 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
9389 index e807776..05b397c 100644
9390 --- a/fs/nfsd/nfs4proc.c
9391 +++ b/fs/nfsd/nfs4proc.c
9392 @@ -156,6 +156,8 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
9393 !(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
9394 return nfserr_inval;
9395
9396 + accmode |= NFSD_MAY_READ_IF_EXEC;
9397 +
9398 if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
9399 accmode |= NFSD_MAY_READ;
9400 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
9401 @@ -691,7 +693,7 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
9402 readdir->rd_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
9403 readdir->rd_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
9404
9405 - if ((cookie > ~(u32)0) || (cookie == 1) || (cookie == 2) ||
9406 + if ((cookie == 1) || (cookie == 2) ||
9407 (cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
9408 return nfserr_bad_cookie;
9409
9410 @@ -930,7 +932,7 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
9411 count = 4 + (verify->ve_attrlen >> 2);
9412 buf = kmalloc(count << 2, GFP_KERNEL);
9413 if (!buf)
9414 - return nfserr_resource;
9415 + return nfserr_jukebox;
9416
9417 status = nfsd4_encode_fattr(&cstate->current_fh,
9418 cstate->current_fh.fh_export,
9419 diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
9420 index 29d77f6..02eb38e 100644
9421 --- a/fs/nfsd/nfs4recover.c
9422 +++ b/fs/nfsd/nfs4recover.c
9423 @@ -88,7 +88,7 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
9424 struct xdr_netobj cksum;
9425 struct hash_desc desc;
9426 struct scatterlist sg;
9427 - __be32 status = nfserr_resource;
9428 + __be32 status = nfserr_jukebox;
9429
9430 dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
9431 clname->len, clname->data);
9432 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
9433 index 3787ec1..6f8bcc7 100644
9434 --- a/fs/nfsd/nfs4state.c
9435 +++ b/fs/nfsd/nfs4state.c
9436 @@ -192,8 +192,15 @@ static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
9437 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
9438 {
9439 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
9440 - nfs4_file_put_fd(fp, O_RDWR);
9441 nfs4_file_put_fd(fp, oflag);
9442 + /*
9443 + * It's also safe to get rid of the RDWR open *if*
9444 + * we no longer have need of the other kind of access
9445 + * or if we already have the other kind of open:
9446 + */
9447 + if (fp->fi_fds[1-oflag]
9448 + || atomic_read(&fp->fi_access[1 - oflag]) == 0)
9449 + nfs4_file_put_fd(fp, O_RDWR);
9450 }
9451 }
9452
9453 @@ -1946,7 +1953,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
9454 * of 5 bullet points, labeled as CASE0 - CASE4 below.
9455 */
9456 unconf = find_unconfirmed_client_by_str(dname, strhashval);
9457 - status = nfserr_resource;
9458 + status = nfserr_jukebox;
9459 if (!conf) {
9460 /*
9461 * RFC 3530 14.2.33 CASE 4:
9462 @@ -2483,7 +2490,7 @@ renew:
9463 if (open->op_stateowner == NULL) {
9464 sop = alloc_init_open_stateowner(strhashval, clp, open);
9465 if (sop == NULL)
9466 - return nfserr_resource;
9467 + return nfserr_jukebox;
9468 open->op_stateowner = sop;
9469 }
9470 list_del_init(&sop->so_close_lru);
9471 @@ -2619,7 +2626,7 @@ nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
9472
9473 stp = nfs4_alloc_stateid();
9474 if (stp == NULL)
9475 - return nfserr_resource;
9476 + return nfserr_jukebox;
9477
9478 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
9479 if (status) {
9480 @@ -2850,7 +2857,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
9481 status = nfserr_bad_stateid;
9482 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
9483 goto out;
9484 - status = nfserr_resource;
9485 + status = nfserr_jukebox;
9486 fp = alloc_init_file(ino);
9487 if (fp == NULL)
9488 goto out;
9489 @@ -3530,8 +3537,9 @@ static inline void nfs4_file_downgrade(struct nfs4_stateid *stp, unsigned int to
9490 int i;
9491
9492 for (i = 1; i < 4; i++) {
9493 - if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
9494 - nfs4_file_put_access(stp->st_file, i);
9495 + if (test_bit(i, &stp->st_access_bmap)
9496 + && ((i & to_access) != i)) {
9497 + nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(i));
9498 __clear_bit(i, &stp->st_access_bmap);
9499 }
9500 }
9501 @@ -3562,6 +3570,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
9502 if (!access_valid(od->od_share_access, cstate->minorversion)
9503 || !deny_valid(od->od_share_deny))
9504 return nfserr_inval;
9505 + /* We don't yet support WANT bits: */
9506 + od->od_share_access &= NFS4_SHARE_ACCESS_MASK;
9507
9508 nfs4_lock_state();
9509 if ((status = nfs4_preprocess_seqid_op(cstate,
9510 @@ -4035,7 +4045,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
9511 /* XXX: Do we need to check for duplicate stateowners on
9512 * the same file, or should they just be allowed (and
9513 * create new stateids)? */
9514 - status = nfserr_resource;
9515 + status = nfserr_jukebox;
9516 lock_sop = alloc_init_lock_stateowner(strhashval,
9517 open_sop->so_client, open_stp, lock);
9518 if (lock_sop == NULL)
9519 @@ -4119,9 +4129,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
9520 case (EDEADLK):
9521 status = nfserr_deadlock;
9522 break;
9523 - default:
9524 + default:
9525 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
9526 - status = nfserr_resource;
9527 + status = nfserrno(err);
9528 break;
9529 }
9530 out:
9531 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
9532 index c8bf405..f810996 100644
9533 --- a/fs/nfsd/nfs4xdr.c
9534 +++ b/fs/nfsd/nfs4xdr.c
9535 @@ -1623,6 +1623,18 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
9536 \
9537 save = resp->p;
9538
9539 +static bool seqid_mutating_err(__be32 err)
9540 +{
9541 + /* rfc 3530 section 8.1.5: */
9542 + return err != nfserr_stale_clientid &&
9543 + err != nfserr_stale_stateid &&
9544 + err != nfserr_bad_stateid &&
9545 + err != nfserr_bad_seqid &&
9546 + err != nfserr_bad_xdr &&
9547 + err != nfserr_resource &&
9548 + err != nfserr_nofilehandle;
9549 +}
9550 +
9551 /*
9552 * Routine for encoding the result of a "seqid-mutating" NFSv4 operation. This
9553 * is where sequence id's are incremented, and the replay cache is filled.
9554 diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
9555 index 4eefaf1..5cfebe5 100644
9556 --- a/fs/nfsd/state.h
9557 +++ b/fs/nfsd/state.h
9558 @@ -447,12 +447,6 @@ struct nfs4_stateid {
9559 #define WR_STATE 0x00000020
9560 #define CLOSE_STATE 0x00000040
9561
9562 -#define seqid_mutating_err(err) \
9563 - (((err) != nfserr_stale_clientid) && \
9564 - ((err) != nfserr_bad_seqid) && \
9565 - ((err) != nfserr_stale_stateid) && \
9566 - ((err) != nfserr_bad_stateid))
9567 -
9568 struct nfsd4_compound_state;
9569
9570 extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
9571 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
9572 index fd0acca..acf88ae 100644
9573 --- a/fs/nfsd/vfs.c
9574 +++ b/fs/nfsd/vfs.c
9575 @@ -2114,7 +2114,8 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
9576
9577 /* Allow read access to binaries even when mode 111 */
9578 if (err == -EACCES && S_ISREG(inode->i_mode) &&
9579 - acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE))
9580 + (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
9581 + acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
9582 err = inode_permission(inode, MAY_EXEC);
9583
9584 return err? nfserrno(err) : 0;
9585 diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
9586 index e0bbac0..a22e40e 100644
9587 --- a/fs/nfsd/vfs.h
9588 +++ b/fs/nfsd/vfs.h
9589 @@ -25,6 +25,7 @@
9590 #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
9591 #define NFSD_MAY_NOT_BREAK_LEASE 512
9592 #define NFSD_MAY_BYPASS_GSS 1024
9593 +#define NFSD_MAY_READ_IF_EXEC 2048
9594
9595 #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
9596 #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
9597 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
9598 index 5afaa58..c7d4ee6 100644
9599 --- a/fs/proc/task_mmu.c
9600 +++ b/fs/proc/task_mmu.c
9601 @@ -1039,6 +1039,9 @@ static int show_numa_map(struct seq_file *m, void *v)
9602 seq_printf(m, " stack");
9603 }
9604
9605 + if (is_vm_hugetlb_page(vma))
9606 + seq_printf(m, " huge");
9607 +
9608 walk_page_range(vma->vm_start, vma->vm_end, &walk);
9609
9610 if (!md->pages)
9611 diff --git a/fs/stat.c b/fs/stat.c
9612 index 78a3aa8..8806b89 100644
9613 --- a/fs/stat.c
9614 +++ b/fs/stat.c
9615 @@ -294,15 +294,16 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
9616 {
9617 struct path path;
9618 int error;
9619 + int empty = 0;
9620
9621 if (bufsiz <= 0)
9622 return -EINVAL;
9623
9624 - error = user_path_at(dfd, pathname, LOOKUP_EMPTY, &path);
9625 + error = user_path_at_empty(dfd, pathname, LOOKUP_EMPTY, &path, &empty);
9626 if (!error) {
9627 struct inode *inode = path.dentry->d_inode;
9628
9629 - error = -EINVAL;
9630 + error = empty ? -ENOENT : -EINVAL;
9631 if (inode->i_op->readlink) {
9632 error = security_inode_readlink(path.dentry);
9633 if (!error) {
9634 diff --git a/fs/statfs.c b/fs/statfs.c
9635 index 8244924..9cf04a1 100644
9636 --- a/fs/statfs.c
9637 +++ b/fs/statfs.c
9638 @@ -76,7 +76,7 @@ EXPORT_SYMBOL(vfs_statfs);
9639 int user_statfs(const char __user *pathname, struct kstatfs *st)
9640 {
9641 struct path path;
9642 - int error = user_path(pathname, &path);
9643 + int error = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
9644 if (!error) {
9645 error = vfs_statfs(&path, st);
9646 path_put(&path);
9647 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
9648 index e49c36d..bb145e4 100644
9649 --- a/include/acpi/acpi_drivers.h
9650 +++ b/include/acpi/acpi_drivers.h
9651 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
9652 {
9653 }
9654 static inline int register_hotplug_dock_device(acpi_handle handle,
9655 - struct acpi_dock_ops *ops,
9656 + const struct acpi_dock_ops *ops,
9657 void *context)
9658 {
9659 return -ENODEV;
9660 diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
9661 index 91567bb..03eb1d6 100644
9662 --- a/include/drm/drm_dp_helper.h
9663 +++ b/include/drm/drm_dp_helper.h
9664 @@ -72,6 +72,7 @@
9665
9666 #define DP_MAIN_LINK_CHANNEL_CODING 0x006
9667
9668 +#define DP_EDP_CONFIGURATION_CAP 0x00d
9669 #define DP_TRAINING_AUX_RD_INTERVAL 0x00e
9670
9671 /* link configuration */
9672 @@ -133,6 +134,8 @@
9673 #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
9674 # define DP_SET_ANSI_8B10B (1 << 0)
9675
9676 +#define DP_EDP_CONFIGURATION_SET 0x10a
9677 +
9678 #define DP_LANE0_1_STATUS 0x202
9679 #define DP_LANE2_3_STATUS 0x203
9680 # define DP_LANE_CR_DONE (1 << 0)
9681 diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
9682 index 53792bf..ce1b719 100644
9683 --- a/include/linux/ext2_fs.h
9684 +++ b/include/linux/ext2_fs.h
9685 @@ -197,8 +197,8 @@ struct ext2_group_desc
9686
9687 /* Flags that should be inherited by new inodes from their parent. */
9688 #define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
9689 - EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\
9690 - EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\
9691 + EXT2_SYNC_FL | EXT2_NODUMP_FL |\
9692 + EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
9693 EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
9694 EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
9695
9696 diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
9697 index 67a803a..0244611 100644
9698 --- a/include/linux/ext3_fs.h
9699 +++ b/include/linux/ext3_fs.h
9700 @@ -180,8 +180,8 @@ struct ext3_group_desc
9701
9702 /* Flags that should be inherited by new inodes from their parent. */
9703 #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
9704 - EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\
9705 - EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\
9706 + EXT3_SYNC_FL | EXT3_NODUMP_FL |\
9707 + EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
9708 EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
9709 EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
9710
9711 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
9712 index a103732..f51a81b 100644
9713 --- a/include/linux/interrupt.h
9714 +++ b/include/linux/interrupt.h
9715 @@ -59,6 +59,8 @@
9716 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
9717 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
9718 * IRQF_NO_THREAD - Interrupt cannot be threaded
9719 + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
9720 + * resume time.
9721 */
9722 #define IRQF_DISABLED 0x00000020
9723 #define IRQF_SAMPLE_RANDOM 0x00000040
9724 @@ -72,6 +74,7 @@
9725 #define IRQF_NO_SUSPEND 0x00004000
9726 #define IRQF_FORCE_RESUME 0x00008000
9727 #define IRQF_NO_THREAD 0x00010000
9728 +#define IRQF_EARLY_RESUME 0x00020000
9729
9730 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
9731
9732 diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
9733 index 8cdcc2a1..1feeb52 100644
9734 --- a/include/linux/io-mapping.h
9735 +++ b/include/linux/io-mapping.h
9736 @@ -117,6 +117,8 @@ io_mapping_unmap(void __iomem *vaddr)
9737
9738 #else
9739
9740 +#include <linux/uaccess.h>
9741 +
9742 /* this struct isn't actually defined anywhere */
9743 struct io_mapping;
9744
9745 @@ -138,12 +140,14 @@ static inline void __iomem *
9746 io_mapping_map_atomic_wc(struct io_mapping *mapping,
9747 unsigned long offset)
9748 {
9749 + pagefault_disable();
9750 return ((char __force __iomem *) mapping) + offset;
9751 }
9752
9753 static inline void
9754 io_mapping_unmap_atomic(void __iomem *vaddr)
9755 {
9756 + pagefault_enable();
9757 }
9758
9759 /* Non-atomic map/unmap */
9760 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
9761 index f97672a..265e2c3 100644
9762 --- a/include/linux/jiffies.h
9763 +++ b/include/linux/jiffies.h
9764 @@ -303,7 +303,7 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
9765 extern unsigned long timeval_to_jiffies(const struct timeval *value);
9766 extern void jiffies_to_timeval(const unsigned long jiffies,
9767 struct timeval *value);
9768 -extern clock_t jiffies_to_clock_t(long x);
9769 +extern clock_t jiffies_to_clock_t(unsigned long x);
9770 extern unsigned long clock_t_to_jiffies(unsigned long x);
9771 extern u64 jiffies_64_to_clock_t(u64 x);
9772 extern u64 nsec_to_clock_t(u64 x);
9773 diff --git a/include/linux/mm.h b/include/linux/mm.h
9774 index 7438071..fedc5f0 100644
9775 --- a/include/linux/mm.h
9776 +++ b/include/linux/mm.h
9777 @@ -356,36 +356,50 @@ static inline struct page *compound_head(struct page *page)
9778 return page;
9779 }
9780
9781 +/*
9782 + * The atomic page->_mapcount, starts from -1: so that transitions
9783 + * both from it and to it can be tracked, using atomic_inc_and_test
9784 + * and atomic_add_negative(-1).
9785 + */
9786 +static inline void reset_page_mapcount(struct page *page)
9787 +{
9788 + atomic_set(&(page)->_mapcount, -1);
9789 +}
9790 +
9791 +static inline int page_mapcount(struct page *page)
9792 +{
9793 + return atomic_read(&(page)->_mapcount) + 1;
9794 +}
9795 +
9796 static inline int page_count(struct page *page)
9797 {
9798 return atomic_read(&compound_head(page)->_count);
9799 }
9800
9801 +static inline void get_huge_page_tail(struct page *page)
9802 +{
9803 + /*
9804 + * __split_huge_page_refcount() cannot run
9805 + * from under us.
9806 + */
9807 + VM_BUG_ON(page_mapcount(page) < 0);
9808 + VM_BUG_ON(atomic_read(&page->_count) != 0);
9809 + atomic_inc(&page->_mapcount);
9810 +}
9811 +
9812 +extern bool __get_page_tail(struct page *page);
9813 +
9814 static inline void get_page(struct page *page)
9815 {
9816 + if (unlikely(PageTail(page)))
9817 + if (likely(__get_page_tail(page)))
9818 + return;
9819 /*
9820 * Getting a normal page or the head of a compound page
9821 - * requires to already have an elevated page->_count. Only if
9822 - * we're getting a tail page, the elevated page->_count is
9823 - * required only in the head page, so for tail pages the
9824 - * bugcheck only verifies that the page->_count isn't
9825 - * negative.
9826 + * requires to already have an elevated page->_count.
9827 */
9828 - VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
9829 + VM_BUG_ON(atomic_read(&page->_count) <= 0);
9830 atomic_inc(&page->_count);
9831 - /*
9832 - * Getting a tail page will elevate both the head and tail
9833 - * page->_count(s).
9834 - */
9835 - if (unlikely(PageTail(page))) {
9836 - /*
9837 - * This is safe only because
9838 - * __split_huge_page_refcount can't run under
9839 - * get_page().
9840 - */
9841 - VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
9842 - atomic_inc(&page->first_page->_count);
9843 - }
9844 }
9845
9846 static inline struct page *virt_to_head_page(const void *x)
9847 @@ -804,21 +818,6 @@ static inline pgoff_t page_index(struct page *page)
9848 }
9849
9850 /*
9851 - * The atomic page->_mapcount, like _count, starts from -1:
9852 - * so that transitions both from it and to it can be tracked,
9853 - * using atomic_inc_and_test and atomic_add_negative(-1).
9854 - */
9855 -static inline void reset_page_mapcount(struct page *page)
9856 -{
9857 - atomic_set(&(page)->_mapcount, -1);
9858 -}
9859 -
9860 -static inline int page_mapcount(struct page *page)
9861 -{
9862 - return atomic_read(&(page)->_mapcount) + 1;
9863 -}
9864 -
9865 -/*
9866 * Return true if this page is mapped into pagetables.
9867 */
9868 static inline int page_mapped(struct page *page)
9869 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
9870 index 774b895..10a2f62 100644
9871 --- a/include/linux/mm_types.h
9872 +++ b/include/linux/mm_types.h
9873 @@ -62,10 +62,23 @@ struct page {
9874 struct {
9875
9876 union {
9877 - atomic_t _mapcount; /* Count of ptes mapped in mms,
9878 - * to show when page is mapped
9879 - * & limit reverse map searches.
9880 - */
9881 + /*
9882 + * Count of ptes mapped in
9883 + * mms, to show when page is
9884 + * mapped & limit reverse map
9885 + * searches.
9886 + *
9887 + * Used also for tail pages
9888 + * refcounting instead of
9889 + * _count. Tail pages cannot
9890 + * be mapped and keeping the
9891 + * tail page _count zero at
9892 + * all times guarantees
9893 + * get_page_unless_zero() will
9894 + * never succeed on tail
9895 + * pages.
9896 + */
9897 + atomic_t _mapcount;
9898
9899 struct {
9900 unsigned inuse:16;
9901 diff --git a/include/linux/namei.h b/include/linux/namei.h
9902 index 409328d..ffc0213 100644
9903 --- a/include/linux/namei.h
9904 +++ b/include/linux/namei.h
9905 @@ -67,6 +67,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
9906 #define LOOKUP_EMPTY 0x4000
9907
9908 extern int user_path_at(int, const char __user *, unsigned, struct path *);
9909 +extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
9910
9911 #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
9912 #define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
9913 diff --git a/include/linux/phy.h b/include/linux/phy.h
9914 index 54fc413..79f337c 100644
9915 --- a/include/linux/phy.h
9916 +++ b/include/linux/phy.h
9917 @@ -420,7 +420,7 @@ struct phy_driver {
9918
9919 /*
9920 * Requests a Tx timestamp for 'skb'. The phy driver promises
9921 - * to deliver it to the socket's error queue as soon as a
9922 + * to deliver it using skb_complete_tx_timestamp() as soon as a
9923 * timestamp becomes available. One of the PTP_CLASS_ values
9924 * is passed in 'type'.
9925 */
9926 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
9927 index 8bd383c..0f96646 100644
9928 --- a/include/linux/skbuff.h
9929 +++ b/include/linux/skbuff.h
9930 @@ -2020,8 +2020,13 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
9931 /**
9932 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
9933 *
9934 + * PHY drivers may accept clones of transmitted packets for
9935 + * timestamping via their phy_driver.txtstamp method. These drivers
9936 + * must call this function to return the skb back to the stack, with
9937 + * or without a timestamp.
9938 + *
9939 * @skb: clone of the the original outgoing packet
9940 - * @hwtstamps: hardware time stamps
9941 + * @hwtstamps: hardware time stamps, may be NULL if not available
9942 *
9943 */
9944 void skb_complete_tx_timestamp(struct sk_buff *skb,
9945 diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
9946 index cf14db9..e4ea430 100644
9947 --- a/include/linux/sunrpc/rpc_pipe_fs.h
9948 +++ b/include/linux/sunrpc/rpc_pipe_fs.h
9949 @@ -44,6 +44,8 @@ RPC_I(struct inode *inode)
9950 return container_of(inode, struct rpc_inode, vfs_inode);
9951 }
9952
9953 +extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
9954 + char __user *, size_t);
9955 extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
9956
9957 struct rpc_clnt;
9958 diff --git a/include/linux/tty.h b/include/linux/tty.h
9959 index 5f2ede8..d553ea4 100644
9960 --- a/include/linux/tty.h
9961 +++ b/include/linux/tty.h
9962 @@ -473,7 +473,9 @@ extern void proc_clear_tty(struct task_struct *p);
9963 extern struct tty_struct *get_current_tty(void);
9964 extern void tty_default_fops(struct file_operations *fops);
9965 extern struct tty_struct *alloc_tty_struct(void);
9966 -extern int tty_add_file(struct tty_struct *tty, struct file *file);
9967 +extern int tty_alloc_file(struct file *file);
9968 +extern void tty_add_file(struct tty_struct *tty, struct file *file);
9969 +extern void tty_free_file(struct file *file);
9970 extern void free_tty_struct(struct tty_struct *tty);
9971 extern void initialize_tty_struct(struct tty_struct *tty,
9972 struct tty_driver *driver, int idx);
9973 diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
9974 index 0097136..c0ecc5a 100644
9975 --- a/include/linux/usb/hcd.h
9976 +++ b/include/linux/usb/hcd.h
9977 @@ -178,7 +178,7 @@ struct usb_hcd {
9978 * this structure.
9979 */
9980 unsigned long hcd_priv[0]
9981 - __attribute__ ((aligned(sizeof(unsigned long))));
9982 + __attribute__ ((aligned(sizeof(s64))));
9983 };
9984
9985 /* 2.4 does this a bit differently ... */
9986 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
9987 index 9332e52..687fb11 100644
9988 --- a/include/linux/vmalloc.h
9989 +++ b/include/linux/vmalloc.h
9990 @@ -13,6 +13,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
9991 #define VM_MAP 0x00000004 /* vmap()ed pages */
9992 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
9993 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
9994 +#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
9995 /* bits [20..32] reserved for arch specific ioremap internals */
9996
9997 /*
9998 diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
9999 index f76fc00..15e53b1 100644
10000 --- a/kernel/irq/pm.c
10001 +++ b/kernel/irq/pm.c
10002 @@ -9,6 +9,7 @@
10003 #include <linux/irq.h>
10004 #include <linux/module.h>
10005 #include <linux/interrupt.h>
10006 +#include <linux/syscore_ops.h>
10007
10008 #include "internals.h"
10009
10010 @@ -39,25 +40,58 @@ void suspend_device_irqs(void)
10011 }
10012 EXPORT_SYMBOL_GPL(suspend_device_irqs);
10013
10014 -/**
10015 - * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
10016 - *
10017 - * Enable all interrupt lines previously disabled by suspend_device_irqs() that
10018 - * have the IRQS_SUSPENDED flag set.
10019 - */
10020 -void resume_device_irqs(void)
10021 +static void resume_irqs(bool want_early)
10022 {
10023 struct irq_desc *desc;
10024 int irq;
10025
10026 for_each_irq_desc(irq, desc) {
10027 unsigned long flags;
10028 + bool is_early = desc->action &&
10029 + desc->action->flags & IRQF_EARLY_RESUME;
10030 +
10031 + if (is_early != want_early)
10032 + continue;
10033
10034 raw_spin_lock_irqsave(&desc->lock, flags);
10035 __enable_irq(desc, irq, true);
10036 raw_spin_unlock_irqrestore(&desc->lock, flags);
10037 }
10038 }
10039 +
10040 +/**
10041 + * irq_pm_syscore_ops - enable interrupt lines early
10042 + *
10043 + * Enable all interrupt lines with %IRQF_EARLY_RESUME set.
10044 + */
10045 +static void irq_pm_syscore_resume(void)
10046 +{
10047 + resume_irqs(true);
10048 +}
10049 +
10050 +static struct syscore_ops irq_pm_syscore_ops = {
10051 + .resume = irq_pm_syscore_resume,
10052 +};
10053 +
10054 +static int __init irq_pm_init_ops(void)
10055 +{
10056 + register_syscore_ops(&irq_pm_syscore_ops);
10057 + return 0;
10058 +}
10059 +
10060 +device_initcall(irq_pm_init_ops);
10061 +
10062 +/**
10063 + * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
10064 + *
10065 + * Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
10066 + * disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
10067 + * set as well as those with %IRQF_FORCE_RESUME.
10068 + */
10069 +void resume_device_irqs(void)
10070 +{
10071 + resume_irqs(false);
10072 +}
10073 EXPORT_SYMBOL_GPL(resume_device_irqs);
10074
10075 /**
10076 diff --git a/kernel/kmod.c b/kernel/kmod.c
10077 index ddc7644..a4bea97 100644
10078 --- a/kernel/kmod.c
10079 +++ b/kernel/kmod.c
10080 @@ -114,10 +114,12 @@ int __request_module(bool wait, const char *fmt, ...)
10081 atomic_inc(&kmod_concurrent);
10082 if (atomic_read(&kmod_concurrent) > max_modprobes) {
10083 /* We may be blaming an innocent here, but unlikely */
10084 - if (kmod_loop_msg++ < 5)
10085 + if (kmod_loop_msg < 5) {
10086 printk(KERN_ERR
10087 "request_module: runaway loop modprobe %s\n",
10088 module_name);
10089 + kmod_loop_msg++;
10090 + }
10091 atomic_dec(&kmod_concurrent);
10092 return -ENOMEM;
10093 }
10094 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
10095 index b6b71ad..d3caa76 100644
10096 --- a/kernel/power/suspend.c
10097 +++ b/kernel/power/suspend.c
10098 @@ -315,7 +315,7 @@ int enter_state(suspend_state_t state)
10099 */
10100 int pm_suspend(suspend_state_t state)
10101 {
10102 - if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
10103 + if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
10104 return enter_state(state);
10105 return -EINVAL;
10106 }
10107 diff --git a/kernel/time.c b/kernel/time.c
10108 index 8e8dc6d..d776062 100644
10109 --- a/kernel/time.c
10110 +++ b/kernel/time.c
10111 @@ -575,7 +575,7 @@ EXPORT_SYMBOL(jiffies_to_timeval);
10112 /*
10113 * Convert jiffies/jiffies_64 to clock_t and back.
10114 */
10115 -clock_t jiffies_to_clock_t(long x)
10116 +clock_t jiffies_to_clock_t(unsigned long x)
10117 {
10118 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
10119 # if HZ < USER_HZ
10120 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
10121 index e5df02c..17a2d44 100644
10122 --- a/kernel/trace/trace.c
10123 +++ b/kernel/trace/trace.c
10124 @@ -3808,8 +3808,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
10125 if (info->read < PAGE_SIZE)
10126 goto read;
10127
10128 - info->read = 0;
10129 -
10130 trace_access_lock(info->cpu);
10131 ret = ring_buffer_read_page(info->tr->buffer,
10132 &info->spare,
10133 @@ -3819,6 +3817,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
10134 if (ret < 0)
10135 return 0;
10136
10137 + info->read = 0;
10138 +
10139 read:
10140 size = PAGE_SIZE - info->read;
10141 if (size > count)
10142 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
10143 index 5fb3697..00d527c 100644
10144 --- a/kernel/trace/trace_kprobe.c
10145 +++ b/kernel/trace/trace_kprobe.c
10146 @@ -836,11 +836,17 @@ static void __unregister_trace_probe(struct trace_probe *tp)
10147 }
10148
10149 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
10150 -static void unregister_trace_probe(struct trace_probe *tp)
10151 +static int unregister_trace_probe(struct trace_probe *tp)
10152 {
10153 + /* Enabled event can not be unregistered */
10154 + if (trace_probe_is_enabled(tp))
10155 + return -EBUSY;
10156 +
10157 __unregister_trace_probe(tp);
10158 list_del(&tp->list);
10159 unregister_probe_event(tp);
10160 +
10161 + return 0;
10162 }
10163
10164 /* Register a trace_probe and probe_event */
10165 @@ -854,7 +860,9 @@ static int register_trace_probe(struct trace_probe *tp)
10166 /* Delete old (same name) event if exist */
10167 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
10168 if (old_tp) {
10169 - unregister_trace_probe(old_tp);
10170 + ret = unregister_trace_probe(old_tp);
10171 + if (ret < 0)
10172 + goto end;
10173 free_trace_probe(old_tp);
10174 }
10175
10176 @@ -892,6 +900,7 @@ static int trace_probe_module_callback(struct notifier_block *nb,
10177 mutex_lock(&probe_lock);
10178 list_for_each_entry(tp, &probe_list, list) {
10179 if (trace_probe_within_module(tp, mod)) {
10180 + /* Don't need to check busy - this should have gone. */
10181 __unregister_trace_probe(tp);
10182 ret = __register_trace_probe(tp);
10183 if (ret)
10184 @@ -1205,10 +1214,11 @@ static int create_trace_probe(int argc, char **argv)
10185 return -ENOENT;
10186 }
10187 /* delete an event */
10188 - unregister_trace_probe(tp);
10189 - free_trace_probe(tp);
10190 + ret = unregister_trace_probe(tp);
10191 + if (ret == 0)
10192 + free_trace_probe(tp);
10193 mutex_unlock(&probe_lock);
10194 - return 0;
10195 + return ret;
10196 }
10197
10198 if (argc < 2) {
10199 @@ -1317,18 +1327,29 @@ error:
10200 return ret;
10201 }
10202
10203 -static void release_all_trace_probes(void)
10204 +static int release_all_trace_probes(void)
10205 {
10206 struct trace_probe *tp;
10207 + int ret = 0;
10208
10209 mutex_lock(&probe_lock);
10210 + /* Ensure no probe is in use. */
10211 + list_for_each_entry(tp, &probe_list, list)
10212 + if (trace_probe_is_enabled(tp)) {
10213 + ret = -EBUSY;
10214 + goto end;
10215 + }
10216 /* TODO: Use batch unregistration */
10217 while (!list_empty(&probe_list)) {
10218 tp = list_entry(probe_list.next, struct trace_probe, list);
10219 unregister_trace_probe(tp);
10220 free_trace_probe(tp);
10221 }
10222 +
10223 +end:
10224 mutex_unlock(&probe_lock);
10225 +
10226 + return ret;
10227 }
10228
10229 /* Probes listing interfaces */
10230 @@ -1380,9 +1401,13 @@ static const struct seq_operations probes_seq_op = {
10231
10232 static int probes_open(struct inode *inode, struct file *file)
10233 {
10234 - if ((file->f_mode & FMODE_WRITE) &&
10235 - (file->f_flags & O_TRUNC))
10236 - release_all_trace_probes();
10237 + int ret;
10238 +
10239 + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
10240 + ret = release_all_trace_probes();
10241 + if (ret < 0)
10242 + return ret;
10243 + }
10244
10245 return seq_open(file, &probes_seq_op);
10246 }
10247 @@ -2055,6 +2080,21 @@ static __init int kprobe_trace_self_tests_init(void)
10248
10249 ret = target(1, 2, 3, 4, 5, 6);
10250
10251 + /* Disable trace points before removing it */
10252 + tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
10253 + if (WARN_ON_ONCE(tp == NULL)) {
10254 + pr_warning("error on getting test probe.\n");
10255 + warn++;
10256 + } else
10257 + disable_trace_probe(tp, TP_FLAG_TRACE);
10258 +
10259 + tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
10260 + if (WARN_ON_ONCE(tp == NULL)) {
10261 + pr_warning("error on getting 2nd test probe.\n");
10262 + warn++;
10263 + } else
10264 + disable_trace_probe(tp, TP_FLAG_TRACE);
10265 +
10266 ret = command_trace_probe("-:testprobe");
10267 if (WARN_ON_ONCE(ret)) {
10268 pr_warning("error on deleting a probe.\n");
10269 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
10270 index 70af0a7..ad72a03 100644
10271 --- a/lib/kobject_uevent.c
10272 +++ b/lib/kobject_uevent.c
10273 @@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
10274 kobj_bcast_filter,
10275 kobj);
10276 /* ENOBUFS should be handled in userspace */
10277 - if (retval == -ENOBUFS)
10278 + if (retval == -ENOBUFS || retval == -ESRCH)
10279 retval = 0;
10280 } else
10281 retval = -ENOMEM;
10282 diff --git a/lib/nlattr.c b/lib/nlattr.c
10283 index ac09f22..a8408b6 100644
10284 --- a/lib/nlattr.c
10285 +++ b/lib/nlattr.c
10286 @@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
10287 [NLA_U16] = sizeof(u16),
10288 [NLA_U32] = sizeof(u32),
10289 [NLA_U64] = sizeof(u64),
10290 + [NLA_MSECS] = sizeof(u64),
10291 [NLA_NESTED] = NLA_HDRLEN,
10292 };
10293
10294 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
10295 index e2d1587..d819d93 100644
10296 --- a/mm/huge_memory.c
10297 +++ b/mm/huge_memory.c
10298 @@ -989,7 +989,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
10299 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
10300 VM_BUG_ON(!PageCompound(page));
10301 if (flags & FOLL_GET)
10302 - get_page(page);
10303 + get_page_foll(page);
10304
10305 out:
10306 return page;
10307 @@ -1156,6 +1156,7 @@ static void __split_huge_page_refcount(struct page *page)
10308 unsigned long head_index = page->index;
10309 struct zone *zone = page_zone(page);
10310 int zonestat;
10311 + int tail_count = 0;
10312
10313 /* prevent PageLRU to go away from under us, and freeze lru stats */
10314 spin_lock_irq(&zone->lru_lock);
10315 @@ -1164,11 +1165,27 @@ static void __split_huge_page_refcount(struct page *page)
10316 for (i = 1; i < HPAGE_PMD_NR; i++) {
10317 struct page *page_tail = page + i;
10318
10319 - /* tail_page->_count cannot change */
10320 - atomic_sub(atomic_read(&page_tail->_count), &page->_count);
10321 - BUG_ON(page_count(page) <= 0);
10322 - atomic_add(page_mapcount(page) + 1, &page_tail->_count);
10323 - BUG_ON(atomic_read(&page_tail->_count) <= 0);
10324 + /* tail_page->_mapcount cannot change */
10325 + BUG_ON(page_mapcount(page_tail) < 0);
10326 + tail_count += page_mapcount(page_tail);
10327 + /* check for overflow */
10328 + BUG_ON(tail_count < 0);
10329 + BUG_ON(atomic_read(&page_tail->_count) != 0);
10330 + /*
10331 + * tail_page->_count is zero and not changing from
10332 + * under us. But get_page_unless_zero() may be running
10333 + * from under us on the tail_page. If we used
10334 + * atomic_set() below instead of atomic_add(), we
10335 + * would then run atomic_set() concurrently with
10336 + * get_page_unless_zero(), and atomic_set() is
10337 + * implemented in C not using locked ops. spin_unlock
10338 + * on x86 sometime uses locked ops because of PPro
10339 + * errata 66, 92, so unless somebody can guarantee
10340 + * atomic_set() here would be safe on all archs (and
10341 + * not only on x86), it's safer to use atomic_add().
10342 + */
10343 + atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
10344 + &page_tail->_count);
10345
10346 /* after clearing PageTail the gup refcount can be released */
10347 smp_mb();
10348 @@ -1186,10 +1203,7 @@ static void __split_huge_page_refcount(struct page *page)
10349 (1L << PG_uptodate)));
10350 page_tail->flags |= (1L << PG_dirty);
10351
10352 - /*
10353 - * 1) clear PageTail before overwriting first_page
10354 - * 2) clear PageTail before clearing PageHead for VM_BUG_ON
10355 - */
10356 + /* clear PageTail before overwriting first_page */
10357 smp_wmb();
10358
10359 /*
10360 @@ -1206,7 +1220,6 @@ static void __split_huge_page_refcount(struct page *page)
10361 * status is achieved setting a reserved bit in the
10362 * pmd, not by clearing the present bit.
10363 */
10364 - BUG_ON(page_mapcount(page_tail));
10365 page_tail->_mapcount = page->_mapcount;
10366
10367 BUG_ON(page_tail->mapping);
10368 @@ -1223,6 +1236,8 @@ static void __split_huge_page_refcount(struct page *page)
10369
10370 lru_add_page_tail(zone, page, page_tail);
10371 }
10372 + atomic_sub(tail_count, &page->_count);
10373 + BUG_ON(atomic_read(&page->_count) <= 0);
10374
10375 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
10376 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
10377 diff --git a/mm/internal.h b/mm/internal.h
10378 index d071d38..2189af4 100644
10379 --- a/mm/internal.h
10380 +++ b/mm/internal.h
10381 @@ -37,6 +37,52 @@ static inline void __put_page(struct page *page)
10382 atomic_dec(&page->_count);
10383 }
10384
10385 +static inline void __get_page_tail_foll(struct page *page,
10386 + bool get_page_head)
10387 +{
10388 + /*
10389 + * If we're getting a tail page, the elevated page->_count is
10390 + * required only in the head page and we will elevate the head
10391 + * page->_count and tail page->_mapcount.
10392 + *
10393 + * We elevate page_tail->_mapcount for tail pages to force
10394 + * page_tail->_count to be zero at all times to avoid getting
10395 + * false positives from get_page_unless_zero() with
10396 + * speculative page access (like in
10397 + * page_cache_get_speculative()) on tail pages.
10398 + */
10399 + VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
10400 + VM_BUG_ON(atomic_read(&page->_count) != 0);
10401 + VM_BUG_ON(page_mapcount(page) < 0);
10402 + if (get_page_head)
10403 + atomic_inc(&page->first_page->_count);
10404 + atomic_inc(&page->_mapcount);
10405 +}
10406 +
10407 +/*
10408 + * This is meant to be called as the FOLL_GET operation of
10409 + * follow_page() and it must be called while holding the proper PT
10410 + * lock while the pte (or pmd_trans_huge) is still mapping the page.
10411 + */
10412 +static inline void get_page_foll(struct page *page)
10413 +{
10414 + if (unlikely(PageTail(page)))
10415 + /*
10416 + * This is safe only because
10417 + * __split_huge_page_refcount() can't run under
10418 + * get_page_foll() because we hold the proper PT lock.
10419 + */
10420 + __get_page_tail_foll(page, true);
10421 + else {
10422 + /*
10423 + * Getting a normal page or the head of a compound page
10424 + * requires to already have an elevated page->_count.
10425 + */
10426 + VM_BUG_ON(atomic_read(&page->_count) <= 0);
10427 + atomic_inc(&page->_count);
10428 + }
10429 +}
10430 +
10431 extern unsigned long highest_memmap_pfn;
10432
10433 /*
10434 diff --git a/mm/memory.c b/mm/memory.c
10435 index a56e3ba..b2b8731 100644
10436 --- a/mm/memory.c
10437 +++ b/mm/memory.c
10438 @@ -1503,7 +1503,7 @@ split_fallthrough:
10439 }
10440
10441 if (flags & FOLL_GET)
10442 - get_page(page);
10443 + get_page_foll(page);
10444 if (flags & FOLL_TOUCH) {
10445 if ((flags & FOLL_WRITE) &&
10446 !pte_dirty(pte) && !PageDirty(page))
10447 diff --git a/mm/swap.c b/mm/swap.c
10448 index 3a442f1..87627f1 100644
10449 --- a/mm/swap.c
10450 +++ b/mm/swap.c
10451 @@ -78,39 +78,22 @@ static void put_compound_page(struct page *page)
10452 {
10453 if (unlikely(PageTail(page))) {
10454 /* __split_huge_page_refcount can run under us */
10455 - struct page *page_head = page->first_page;
10456 - smp_rmb();
10457 - /*
10458 - * If PageTail is still set after smp_rmb() we can be sure
10459 - * that the page->first_page we read wasn't a dangling pointer.
10460 - * See __split_huge_page_refcount() smp_wmb().
10461 - */
10462 - if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
10463 + struct page *page_head = compound_trans_head(page);
10464 +
10465 + if (likely(page != page_head &&
10466 + get_page_unless_zero(page_head))) {
10467 unsigned long flags;
10468 /*
10469 - * Verify that our page_head wasn't converted
10470 - * to a a regular page before we got a
10471 - * reference on it.
10472 + * page_head wasn't a dangling pointer but it
10473 + * may not be a head page anymore by the time
10474 + * we obtain the lock. That is ok as long as it
10475 + * can't be freed from under us.
10476 */
10477 - if (unlikely(!PageHead(page_head))) {
10478 - /* PageHead is cleared after PageTail */
10479 - smp_rmb();
10480 - VM_BUG_ON(PageTail(page));
10481 - goto out_put_head;
10482 - }
10483 - /*
10484 - * Only run compound_lock on a valid PageHead,
10485 - * after having it pinned with
10486 - * get_page_unless_zero() above.
10487 - */
10488 - smp_mb();
10489 - /* page_head wasn't a dangling pointer */
10490 flags = compound_lock_irqsave(page_head);
10491 if (unlikely(!PageTail(page))) {
10492 /* __split_huge_page_refcount run before us */
10493 compound_unlock_irqrestore(page_head, flags);
10494 VM_BUG_ON(PageHead(page_head));
10495 - out_put_head:
10496 if (put_page_testzero(page_head))
10497 __put_single_page(page_head);
10498 out_put_single:
10499 @@ -121,16 +104,17 @@ static void put_compound_page(struct page *page)
10500 VM_BUG_ON(page_head != page->first_page);
10501 /*
10502 * We can release the refcount taken by
10503 - * get_page_unless_zero now that
10504 - * split_huge_page_refcount is blocked on the
10505 - * compound_lock.
10506 + * get_page_unless_zero() now that
10507 + * __split_huge_page_refcount() is blocked on
10508 + * the compound_lock.
10509 */
10510 if (put_page_testzero(page_head))
10511 VM_BUG_ON(1);
10512 /* __split_huge_page_refcount will wait now */
10513 - VM_BUG_ON(atomic_read(&page->_count) <= 0);
10514 - atomic_dec(&page->_count);
10515 + VM_BUG_ON(page_mapcount(page) <= 0);
10516 + atomic_dec(&page->_mapcount);
10517 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
10518 + VM_BUG_ON(atomic_read(&page->_count) != 0);
10519 compound_unlock_irqrestore(page_head, flags);
10520 if (put_page_testzero(page_head)) {
10521 if (PageHead(page_head))
10522 @@ -160,6 +144,45 @@ void put_page(struct page *page)
10523 }
10524 EXPORT_SYMBOL(put_page);
10525
10526 +/*
10527 + * This function is exported but must not be called by anything other
10528 + * than get_page(). It implements the slow path of get_page().
10529 + */
10530 +bool __get_page_tail(struct page *page)
10531 +{
10532 + /*
10533 + * This takes care of get_page() if run on a tail page
10534 + * returned by one of the get_user_pages/follow_page variants.
10535 + * get_user_pages/follow_page itself doesn't need the compound
10536 + * lock because it runs __get_page_tail_foll() under the
10537 + * proper PT lock that already serializes against
10538 + * split_huge_page().
10539 + */
10540 + unsigned long flags;
10541 + bool got = false;
10542 + struct page *page_head = compound_trans_head(page);
10543 +
10544 + if (likely(page != page_head && get_page_unless_zero(page_head))) {
10545 + /*
10546 + * page_head wasn't a dangling pointer but it
10547 + * may not be a head page anymore by the time
10548 + * we obtain the lock. That is ok as long as it
10549 + * can't be freed from under us.
10550 + */
10551 + flags = compound_lock_irqsave(page_head);
10552 + /* here __split_huge_page_refcount won't run anymore */
10553 + if (likely(PageTail(page))) {
10554 + __get_page_tail_foll(page, false);
10555 + got = true;
10556 + }
10557 + compound_unlock_irqrestore(page_head, flags);
10558 + if (unlikely(!got))
10559 + put_page(page_head);
10560 + }
10561 + return got;
10562 +}
10563 +EXPORT_SYMBOL(__get_page_tail);
10564 +
10565 /**
10566 * put_pages_list() - release a list of pages
10567 * @pages: list of pages threaded on page->lru
10568 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
10569 index 5016f19..56faf31 100644
10570 --- a/mm/vmalloc.c
10571 +++ b/mm/vmalloc.c
10572 @@ -1253,18 +1253,22 @@ EXPORT_SYMBOL_GPL(map_vm_area);
10573 DEFINE_RWLOCK(vmlist_lock);
10574 struct vm_struct *vmlist;
10575
10576 -static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
10577 +static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
10578 unsigned long flags, void *caller)
10579 {
10580 - struct vm_struct *tmp, **p;
10581 -
10582 vm->flags = flags;
10583 vm->addr = (void *)va->va_start;
10584 vm->size = va->va_end - va->va_start;
10585 vm->caller = caller;
10586 va->private = vm;
10587 va->flags |= VM_VM_AREA;
10588 +}
10589 +
10590 +static void insert_vmalloc_vmlist(struct vm_struct *vm)
10591 +{
10592 + struct vm_struct *tmp, **p;
10593
10594 + vm->flags &= ~VM_UNLIST;
10595 write_lock(&vmlist_lock);
10596 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
10597 if (tmp->addr >= vm->addr)
10598 @@ -1275,6 +1279,13 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
10599 write_unlock(&vmlist_lock);
10600 }
10601
10602 +static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
10603 + unsigned long flags, void *caller)
10604 +{
10605 + setup_vmalloc_vm(vm, va, flags, caller);
10606 + insert_vmalloc_vmlist(vm);
10607 +}
10608 +
10609 static struct vm_struct *__get_vm_area_node(unsigned long size,
10610 unsigned long align, unsigned long flags, unsigned long start,
10611 unsigned long end, int node, gfp_t gfp_mask, void *caller)
10612 @@ -1313,7 +1324,18 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
10613 return NULL;
10614 }
10615
10616 - insert_vmalloc_vm(area, va, flags, caller);
10617 + /*
10618 + * When this function is called from __vmalloc_node_range,
10619 + * we do not add vm_struct to vmlist here to avoid
10620 + * accessing uninitialized members of vm_struct such as
10621 + * pages and nr_pages fields. They will be set later.
10622 + * To distinguish it from others, we use a VM_UNLIST flag.
10623 + */
10624 + if (flags & VM_UNLIST)
10625 + setup_vmalloc_vm(area, va, flags, caller);
10626 + else
10627 + insert_vmalloc_vm(area, va, flags, caller);
10628 +
10629 return area;
10630 }
10631
10632 @@ -1381,17 +1403,20 @@ struct vm_struct *remove_vm_area(const void *addr)
10633 va = find_vmap_area((unsigned long)addr);
10634 if (va && va->flags & VM_VM_AREA) {
10635 struct vm_struct *vm = va->private;
10636 - struct vm_struct *tmp, **p;
10637 - /*
10638 - * remove from list and disallow access to this vm_struct
10639 - * before unmap. (address range confliction is maintained by
10640 - * vmap.)
10641 - */
10642 - write_lock(&vmlist_lock);
10643 - for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
10644 - ;
10645 - *p = tmp->next;
10646 - write_unlock(&vmlist_lock);
10647 +
10648 + if (!(vm->flags & VM_UNLIST)) {
10649 + struct vm_struct *tmp, **p;
10650 + /*
10651 + * remove from list and disallow access to
10652 + * this vm_struct before unmap. (address range
10653 + * confliction is maintained by vmap.)
10654 + */
10655 + write_lock(&vmlist_lock);
10656 + for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
10657 + ;
10658 + *p = tmp->next;
10659 + write_unlock(&vmlist_lock);
10660 + }
10661
10662 vmap_debug_free_range(va->va_start, va->va_end);
10663 free_unmap_vmap_area(va);
10664 @@ -1602,8 +1627,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
10665 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
10666 return NULL;
10667
10668 - area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
10669 - gfp_mask, caller);
10670 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
10671 + start, end, node, gfp_mask, caller);
10672
10673 if (!area)
10674 return NULL;
10675 @@ -1611,6 +1636,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
10676 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
10677
10678 /*
10679 + * In this function, newly allocated vm_struct is not added
10680 + * to vmlist at __get_vm_area_node(). so, it is added here.
10681 + */
10682 + insert_vmalloc_vmlist(area);
10683 +
10684 + /*
10685 * A ref_count = 3 is needed because the vm_struct and vmap_area
10686 * structures allocated in the __get_vm_area_node() function contain
10687 * references to the virtual address of the vmalloc'ed block.
10688 diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
10689 index 7f9ac07..47fc8f3 100644
10690 --- a/net/caif/caif_dev.c
10691 +++ b/net/caif/caif_dev.c
10692 @@ -212,8 +212,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
10693 enum cfcnfg_phy_preference pref;
10694 enum cfcnfg_phy_type phy_type;
10695 struct cfcnfg *cfg;
10696 - struct caif_device_entry_list *caifdevs =
10697 - caif_device_list(dev_net(dev));
10698 + struct caif_device_entry_list *caifdevs;
10699
10700 if (dev->type != ARPHRD_CAIF)
10701 return 0;
10702 @@ -222,6 +221,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
10703 if (cfg == NULL)
10704 return 0;
10705
10706 + caifdevs = caif_device_list(dev_net(dev));
10707 +
10708 switch (what) {
10709 case NETDEV_REGISTER:
10710 caifd = caif_device_alloc(dev);
10711 diff --git a/net/core/dev.c b/net/core/dev.c
10712 index b10ff0a..ae5cf2d 100644
10713 --- a/net/core/dev.c
10714 +++ b/net/core/dev.c
10715 @@ -6115,6 +6115,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
10716 */
10717 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10718 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
10719 + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
10720
10721 /*
10722 * Flush the unicast and multicast chains
10723 diff --git a/net/core/sock.c b/net/core/sock.c
10724 index bc745d0..11d67b3 100644
10725 --- a/net/core/sock.c
10726 +++ b/net/core/sock.c
10727 @@ -1260,6 +1260,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
10728 /* It is still raw copy of parent, so invalidate
10729 * destructor and make plain sk_free() */
10730 newsk->sk_destruct = NULL;
10731 + bh_unlock_sock(newsk);
10732 sk_free(newsk);
10733 newsk = NULL;
10734 goto out;
10735 diff --git a/net/core/timestamping.c b/net/core/timestamping.c
10736 index 98a5264..82fb288 100644
10737 --- a/net/core/timestamping.c
10738 +++ b/net/core/timestamping.c
10739 @@ -57,9 +57,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
10740 case PTP_CLASS_V2_VLAN:
10741 phydev = skb->dev->phydev;
10742 if (likely(phydev->drv->txtstamp)) {
10743 + if (!atomic_inc_not_zero(&sk->sk_refcnt))
10744 + return;
10745 clone = skb_clone(skb, GFP_ATOMIC);
10746 - if (!clone)
10747 + if (!clone) {
10748 + sock_put(sk);
10749 return;
10750 + }
10751 clone->sk = sk;
10752 phydev->drv->txtstamp(phydev, clone, type);
10753 }
10754 @@ -77,8 +81,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
10755 struct sock_exterr_skb *serr;
10756 int err;
10757
10758 - if (!hwtstamps)
10759 + if (!hwtstamps) {
10760 + sock_put(sk);
10761 + kfree_skb(skb);
10762 return;
10763 + }
10764
10765 *skb_hwtstamps(skb) = *hwtstamps;
10766 serr = SKB_EXT_ERR(skb);
10767 @@ -87,6 +94,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
10768 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
10769 skb->sk = NULL;
10770 err = sock_queue_err_skb(sk, skb);
10771 + sock_put(sk);
10772 if (err)
10773 kfree_skb(skb);
10774 }
10775 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
10776 index fc5368a..a0b4c5d 100644
10777 --- a/net/ipv4/xfrm4_policy.c
10778 +++ b/net/ipv4/xfrm4_policy.c
10779 @@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
10780 struct rtable *rt = (struct rtable *)xdst->route;
10781 const struct flowi4 *fl4 = &fl->u.ip4;
10782
10783 - rt->rt_key_dst = fl4->daddr;
10784 - rt->rt_key_src = fl4->saddr;
10785 - rt->rt_key_tos = fl4->flowi4_tos;
10786 - rt->rt_route_iif = fl4->flowi4_iif;
10787 - rt->rt_iif = fl4->flowi4_iif;
10788 - rt->rt_oif = fl4->flowi4_oif;
10789 - rt->rt_mark = fl4->flowi4_mark;
10790 + xdst->u.rt.rt_key_dst = fl4->daddr;
10791 + xdst->u.rt.rt_key_src = fl4->saddr;
10792 + xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
10793 + xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
10794 + xdst->u.rt.rt_iif = fl4->flowi4_iif;
10795 + xdst->u.rt.rt_oif = fl4->flowi4_oif;
10796 + xdst->u.rt.rt_mark = fl4->flowi4_mark;
10797
10798 xdst->u.dst.dev = dev;
10799 dev_hold(dev);
10800 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
10801 index fb545ed..57b82dc 100644
10802 --- a/net/ipv6/route.c
10803 +++ b/net/ipv6/route.c
10804 @@ -1086,11 +1086,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
10805 rt->dst.output = ip6_output;
10806 dst_set_neighbour(&rt->dst, neigh);
10807 atomic_set(&rt->dst.__refcnt, 1);
10808 - dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
10809 -
10810 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
10811 rt->rt6i_dst.plen = 128;
10812 rt->rt6i_idev = idev;
10813 + dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
10814
10815 spin_lock_bh(&icmp6_dst_lock);
10816 rt->dst.next = icmp6_dst_gc_list;
10817 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
10818 index 3d1b091..10c95be 100644
10819 --- a/net/mac80211/cfg.c
10820 +++ b/net/mac80211/cfg.c
10821 @@ -1821,7 +1821,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
10822 * so in that case userspace will have to deal with it.
10823 */
10824
10825 - if (wk->offchan_tx.wait && wk->offchan_tx.frame)
10826 + if (wk->offchan_tx.wait && !wk->offchan_tx.status)
10827 cfg80211_mgmt_tx_status(wk->sdata->dev,
10828 (unsigned long) wk->offchan_tx.frame,
10829 wk->ie, wk->ie_len, false, GFP_KERNEL);
10830 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
10831 index 400c09b..9fab144 100644
10832 --- a/net/mac80211/ieee80211_i.h
10833 +++ b/net/mac80211/ieee80211_i.h
10834 @@ -345,6 +345,7 @@ struct ieee80211_work {
10835 struct {
10836 struct sk_buff *frame;
10837 u32 wait;
10838 + bool status;
10839 } offchan_tx;
10840 };
10841
10842 @@ -389,6 +390,7 @@ struct ieee80211_if_managed {
10843
10844 unsigned long timers_running; /* used for quiesce/restart */
10845 bool powersave; /* powersave requested for this iface */
10846 + bool broken_ap; /* AP is broken -- turn off powersave */
10847 enum ieee80211_smps_mode req_smps, /* requested smps mode */
10848 ap_smps, /* smps mode AP thinks we're in */
10849 driver_smps_mode; /* smps mode request */
10850 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
10851 index d6470c7..0f48368 100644
10852 --- a/net/mac80211/mlme.c
10853 +++ b/net/mac80211/mlme.c
10854 @@ -613,6 +613,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
10855 if (!mgd->powersave)
10856 return false;
10857
10858 + if (mgd->broken_ap)
10859 + return false;
10860 +
10861 if (!mgd->associated)
10862 return false;
10863
10864 @@ -1467,10 +1470,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
10865 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
10866
10867 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
10868 - printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
10869 - "set\n", sdata->name, aid);
10870 + printk(KERN_DEBUG
10871 + "%s: invalid AID value 0x%x; bits 15:14 not set\n",
10872 + sdata->name, aid);
10873 aid &= ~(BIT(15) | BIT(14));
10874
10875 + ifmgd->broken_ap = false;
10876 +
10877 + if (aid == 0 || aid > IEEE80211_MAX_AID) {
10878 + printk(KERN_DEBUG
10879 + "%s: invalid AID value %d (out of range), turn off PS\n",
10880 + sdata->name, aid);
10881 + aid = 0;
10882 + ifmgd->broken_ap = true;
10883 + }
10884 +
10885 pos = mgmt->u.assoc_resp.variable;
10886 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
10887
10888 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
10889 index 1658efa..04cdbaf 100644
10890 --- a/net/mac80211/status.c
10891 +++ b/net/mac80211/status.c
10892 @@ -336,7 +336,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
10893 continue;
10894 if (wk->offchan_tx.frame != skb)
10895 continue;
10896 - wk->offchan_tx.frame = NULL;
10897 + wk->offchan_tx.status = true;
10898 break;
10899 }
10900 rcu_read_unlock();
10901 diff --git a/net/mac80211/work.c b/net/mac80211/work.c
10902 index 380b9a7..7737f20 100644
10903 --- a/net/mac80211/work.c
10904 +++ b/net/mac80211/work.c
10905 @@ -579,7 +579,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
10906 /*
10907 * After this, offchan_tx.frame remains but now is no
10908 * longer a valid pointer -- we still need it as the
10909 - * cookie for canceling this work.
10910 + * cookie for canceling this work/status matching.
10911 */
10912 ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
10913
10914 @@ -1086,14 +1086,13 @@ static void ieee80211_work_work(struct work_struct *work)
10915 continue;
10916 if (wk->chan != local->tmp_channel)
10917 continue;
10918 - if (ieee80211_work_ct_coexists(wk->chan_type,
10919 - local->tmp_channel_type))
10920 + if (!ieee80211_work_ct_coexists(wk->chan_type,
10921 + local->tmp_channel_type))
10922 continue;
10923 remain_off_channel = true;
10924 }
10925
10926 if (!remain_off_channel && local->tmp_channel) {
10927 - bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
10928 local->tmp_channel = NULL;
10929 /* If tmp_channel wasn't operating channel, then
10930 * we need to go back on-channel.
10931 @@ -1103,7 +1102,7 @@ static void ieee80211_work_work(struct work_struct *work)
10932 * we still need to do a hardware config. Currently,
10933 * we cannot be here while scanning, however.
10934 */
10935 - if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
10936 + if (!ieee80211_cfg_on_oper_channel(local))
10937 ieee80211_hw_config(local, 0);
10938
10939 /* At the least, we need to disable offchannel_ps,
10940 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
10941 index 364eb45..e9b7693 100644
10942 --- a/net/sunrpc/auth_gss/auth_gss.c
10943 +++ b/net/sunrpc/auth_gss/auth_gss.c
10944 @@ -603,26 +603,6 @@ out:
10945 return err;
10946 }
10947
10948 -static ssize_t
10949 -gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
10950 - char __user *dst, size_t buflen)
10951 -{
10952 - char *data = (char *)msg->data + msg->copied;
10953 - size_t mlen = min(msg->len, buflen);
10954 - unsigned long left;
10955 -
10956 - left = copy_to_user(dst, data, mlen);
10957 - if (left == mlen) {
10958 - msg->errno = -EFAULT;
10959 - return -EFAULT;
10960 - }
10961 -
10962 - mlen -= left;
10963 - msg->copied += mlen;
10964 - msg->errno = 0;
10965 - return mlen;
10966 -}
10967 -
10968 #define MSG_BUF_MAXSIZE 1024
10969
10970 static ssize_t
10971 @@ -1590,7 +1570,7 @@ static const struct rpc_credops gss_nullops = {
10972 };
10973
10974 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
10975 - .upcall = gss_pipe_upcall,
10976 + .upcall = rpc_pipe_generic_upcall,
10977 .downcall = gss_pipe_downcall,
10978 .destroy_msg = gss_pipe_destroy_msg,
10979 .open_pipe = gss_pipe_open_v0,
10980 @@ -1598,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
10981 };
10982
10983 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
10984 - .upcall = gss_pipe_upcall,
10985 + .upcall = rpc_pipe_generic_upcall,
10986 .downcall = gss_pipe_downcall,
10987 .destroy_msg = gss_pipe_destroy_msg,
10988 .open_pipe = gss_pipe_open_v1,
10989 diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
10990 index 4cb70dc..e50502d 100644
10991 --- a/net/sunrpc/auth_unix.c
10992 +++ b/net/sunrpc/auth_unix.c
10993 @@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
10994 for (i = 0; i < groups ; i++)
10995 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
10996 return 0;
10997 + if (groups < NFS_NGROUPS &&
10998 + cred->uc_gids[groups] != NOGROUP)
10999 + return 0;
11000 return 1;
11001 }
11002
11003 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
11004 index b181e34..67dbc18 100644
11005 --- a/net/sunrpc/rpc_pipe.c
11006 +++ b/net/sunrpc/rpc_pipe.c
11007 @@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work)
11008 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
11009 }
11010
11011 +ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
11012 + char __user *dst, size_t buflen)
11013 +{
11014 + char *data = (char *)msg->data + msg->copied;
11015 + size_t mlen = min(msg->len - msg->copied, buflen);
11016 + unsigned long left;
11017 +
11018 + left = copy_to_user(dst, data, mlen);
11019 + if (left == mlen) {
11020 + msg->errno = -EFAULT;
11021 + return -EFAULT;
11022 + }
11023 +
11024 + mlen -= left;
11025 + msg->copied += mlen;
11026 + msg->errno = 0;
11027 + return mlen;
11028 +}
11029 +EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
11030 +
11031 /**
11032 * rpc_queue_upcall - queue an upcall message to userspace
11033 * @inode: inode of upcall pipe on which to queue given message
11034 diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
11035 index d660086..beeb92e 100644
11036 --- a/scripts/kconfig/menu.c
11037 +++ b/scripts/kconfig/menu.c
11038 @@ -597,11 +597,10 @@ void menu_get_ext_help(struct menu *menu, struct gstr *help)
11039 struct symbol *sym = menu->sym;
11040
11041 if (menu_has_help(menu)) {
11042 - if (sym->name) {
11043 + if (sym->name)
11044 str_printf(help, "%s%s:\n\n", CONFIG_, sym->name);
11045 - str_append(help, _(menu_get_help(menu)));
11046 - str_append(help, "\n");
11047 - }
11048 + str_append(help, _(menu_get_help(menu)));
11049 + str_append(help, "\n");
11050 } else {
11051 str_append(help, nohelp_text);
11052 }
11053 diff --git a/sound/pci/hda/alc269_quirks.c b/sound/pci/hda/alc269_quirks.c
11054 index 5ac0e21..9aeeb32 100644
11055 --- a/sound/pci/hda/alc269_quirks.c
11056 +++ b/sound/pci/hda/alc269_quirks.c
11057 @@ -577,6 +577,9 @@ static const struct alc_config_preset alc269_presets[] = {
11058 alc269_laptop_amic_init_verbs },
11059 .num_dacs = ARRAY_SIZE(alc269_dac_nids),
11060 .dac_nids = alc269_dac_nids,
11061 + .adc_nids = alc269_adc_nids,
11062 + .capsrc_nids = alc269_capsrc_nids,
11063 + .num_adc_nids = ARRAY_SIZE(alc269_adc_nids),
11064 .hp_nid = 0x03,
11065 .num_channel_mode = ARRAY_SIZE(alc269_modes),
11066 .channel_mode = alc269_modes,
11067 @@ -591,6 +594,9 @@ static const struct alc_config_preset alc269_presets[] = {
11068 alc269_laptop_dmic_init_verbs },
11069 .num_dacs = ARRAY_SIZE(alc269_dac_nids),
11070 .dac_nids = alc269_dac_nids,
11071 + .adc_nids = alc269_adc_nids,
11072 + .capsrc_nids = alc269_capsrc_nids,
11073 + .num_adc_nids = ARRAY_SIZE(alc269_adc_nids),
11074 .hp_nid = 0x03,
11075 .num_channel_mode = ARRAY_SIZE(alc269_modes),
11076 .channel_mode = alc269_modes,
11077 @@ -605,6 +611,9 @@ static const struct alc_config_preset alc269_presets[] = {
11078 alc269vb_laptop_amic_init_verbs },
11079 .num_dacs = ARRAY_SIZE(alc269_dac_nids),
11080 .dac_nids = alc269_dac_nids,
11081 + .adc_nids = alc269vb_adc_nids,
11082 + .capsrc_nids = alc269vb_capsrc_nids,
11083 + .num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids),
11084 .hp_nid = 0x03,
11085 .num_channel_mode = ARRAY_SIZE(alc269_modes),
11086 .channel_mode = alc269_modes,
11087 @@ -619,6 +628,9 @@ static const struct alc_config_preset alc269_presets[] = {
11088 alc269vb_laptop_dmic_init_verbs },
11089 .num_dacs = ARRAY_SIZE(alc269_dac_nids),
11090 .dac_nids = alc269_dac_nids,
11091 + .adc_nids = alc269vb_adc_nids,
11092 + .capsrc_nids = alc269vb_capsrc_nids,
11093 + .num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids),
11094 .hp_nid = 0x03,
11095 .num_channel_mode = ARRAY_SIZE(alc269_modes),
11096 .channel_mode = alc269_modes,
11097 @@ -633,6 +645,8 @@ static const struct alc_config_preset alc269_presets[] = {
11098 alc269_laptop_dmic_init_verbs },
11099 .num_dacs = ARRAY_SIZE(alc269_dac_nids),
11100 .dac_nids = alc269_dac_nids,
11101 + .adc_nids = alc269_adc_nids,
11102 + .capsrc_nids = alc269_capsrc_nids,
11103 .hp_nid = 0x03,
11104 .num_channel_mode = ARRAY_SIZE(alc269_modes),
11105 .channel_mode = alc269_modes,
11106 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
11107 index 191284a..f665975 100644
11108 --- a/sound/pci/hda/hda_intel.c
11109 +++ b/sound/pci/hda/hda_intel.c
11110 @@ -2912,12 +2912,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
11111 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
11112 .class_mask = 0xffffff,
11113 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
11114 - AZX_DCAPS_RIRB_PRE_DELAY },
11115 + AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
11116 #else
11117 /* this entry seems still valid -- i.e. without emu20kx chip */
11118 { PCI_DEVICE(0x1102, 0x0009),
11119 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
11120 - AZX_DCAPS_RIRB_PRE_DELAY },
11121 + AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
11122 #endif
11123 /* Vortex86MX */
11124 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
11125 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
11126 index 76752d8e..41fecc1 100644
11127 --- a/sound/pci/hda/patch_conexant.c
11128 +++ b/sound/pci/hda/patch_conexant.c
11129 @@ -136,6 +136,8 @@ struct conexant_spec {
11130 unsigned int thinkpad:1;
11131 unsigned int hp_laptop:1;
11132 unsigned int asus:1;
11133 + unsigned int pin_eapd_ctrls:1;
11134 + unsigned int single_adc_amp:1;
11135
11136 unsigned int adc_switching:1;
11137
11138 @@ -3473,12 +3475,14 @@ static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
11139 static void do_automute(struct hda_codec *codec, int num_pins,
11140 hda_nid_t *pins, bool on)
11141 {
11142 + struct conexant_spec *spec = codec->spec;
11143 int i;
11144 for (i = 0; i < num_pins; i++)
11145 snd_hda_codec_write(codec, pins[i], 0,
11146 AC_VERB_SET_PIN_WIDGET_CONTROL,
11147 on ? PIN_OUT : 0);
11148 - cx_auto_turn_eapd(codec, num_pins, pins, on);
11149 + if (spec->pin_eapd_ctrls)
11150 + cx_auto_turn_eapd(codec, num_pins, pins, on);
11151 }
11152
11153 static int detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
11154 @@ -3503,9 +3507,12 @@ static void cx_auto_update_speakers(struct hda_codec *codec)
11155 int on = 1;
11156
11157 /* turn on HP EAPD when HP jacks are present */
11158 - if (spec->auto_mute)
11159 - on = spec->hp_present;
11160 - cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
11161 + if (spec->pin_eapd_ctrls) {
11162 + if (spec->auto_mute)
11163 + on = spec->hp_present;
11164 + cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
11165 + }
11166 +
11167 /* mute speakers in auto-mode if HP or LO jacks are plugged */
11168 if (spec->auto_mute)
11169 on = !(spec->hp_present ||
11170 @@ -3932,20 +3939,10 @@ static void cx_auto_parse_beep(struct hda_codec *codec)
11171 #define cx_auto_parse_beep(codec)
11172 #endif
11173
11174 -static bool found_in_nid_list(hda_nid_t nid, const hda_nid_t *list, int nums)
11175 -{
11176 - int i;
11177 - for (i = 0; i < nums; i++)
11178 - if (list[i] == nid)
11179 - return true;
11180 - return false;
11181 -}
11182 -
11183 -/* parse extra-EAPD that aren't assigned to any pins */
11184 +/* parse EAPDs */
11185 static void cx_auto_parse_eapd(struct hda_codec *codec)
11186 {
11187 struct conexant_spec *spec = codec->spec;
11188 - struct auto_pin_cfg *cfg = &spec->autocfg;
11189 hda_nid_t nid, end_nid;
11190
11191 end_nid = codec->start_nid + codec->num_nodes;
11192 @@ -3954,14 +3951,18 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
11193 continue;
11194 if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD))
11195 continue;
11196 - if (found_in_nid_list(nid, cfg->line_out_pins, cfg->line_outs) ||
11197 - found_in_nid_list(nid, cfg->hp_pins, cfg->hp_outs) ||
11198 - found_in_nid_list(nid, cfg->speaker_pins, cfg->speaker_outs))
11199 - continue;
11200 spec->eapds[spec->num_eapds++] = nid;
11201 if (spec->num_eapds >= ARRAY_SIZE(spec->eapds))
11202 break;
11203 }
11204 +
11205 + /* NOTE: below is a wild guess; if we have more than two EAPDs,
11206 + * it's a new chip, where EAPDs are supposed to be associated to
11207 + * pins, and we can control EAPD per pin.
11208 + * OTOH, if only one or two EAPDs are found, it's an old chip,
11209 + * thus it might control over all pins.
11210 + */
11211 + spec->pin_eapd_ctrls = spec->num_eapds > 2;
11212 }
11213
11214 static int cx_auto_parse_auto_config(struct hda_codec *codec)
11215 @@ -4067,8 +4068,9 @@ static void cx_auto_init_output(struct hda_codec *codec)
11216 }
11217 }
11218 cx_auto_update_speakers(codec);
11219 - /* turn on/off extra EAPDs, too */
11220 - cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
11221 + /* turn on all EAPDs if no individual EAPD control is available */
11222 + if (!spec->pin_eapd_ctrls)
11223 + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
11224 }
11225
11226 static void cx_auto_init_input(struct hda_codec *codec)
11227 @@ -4255,6 +4257,8 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
11228 int idx = get_input_connection(codec, adc_nid, nid);
11229 if (idx < 0)
11230 continue;
11231 + if (spec->single_adc_amp)
11232 + idx = 0;
11233 return cx_auto_add_volume_idx(codec, label, pfx,
11234 cidx, adc_nid, HDA_INPUT, idx);
11235 }
11236 @@ -4295,14 +4299,21 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
11237 struct hda_input_mux *imux = &spec->private_imux;
11238 const char *prev_label;
11239 int input_conn[HDA_MAX_NUM_INPUTS];
11240 - int i, err, cidx;
11241 + int i, j, err, cidx;
11242 int multi_connection;
11243
11244 + if (!imux->num_items)
11245 + return 0;
11246 +
11247 multi_connection = 0;
11248 for (i = 0; i < imux->num_items; i++) {
11249 cidx = get_input_connection(codec, spec->imux_info[i].adc,
11250 spec->imux_info[i].pin);
11251 - input_conn[i] = (spec->imux_info[i].adc << 8) | cidx;
11252 + if (cidx < 0)
11253 + continue;
11254 + input_conn[i] = spec->imux_info[i].adc;
11255 + if (!spec->single_adc_amp)
11256 + input_conn[i] |= cidx << 8;
11257 if (i > 0 && input_conn[i] != input_conn[0])
11258 multi_connection = 1;
11259 }
11260 @@ -4331,6 +4342,15 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
11261 err = cx_auto_add_capture_volume(codec, nid,
11262 "Capture", "", cidx);
11263 } else {
11264 + bool dup_found = false;
11265 + for (j = 0; j < i; j++) {
11266 + if (input_conn[j] == input_conn[i]) {
11267 + dup_found = true;
11268 + break;
11269 + }
11270 + }
11271 + if (dup_found)
11272 + continue;
11273 err = cx_auto_add_capture_volume(codec, nid,
11274 label, " Capture", cidx);
11275 }
11276 @@ -4407,6 +4427,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
11277 return -ENOMEM;
11278 codec->spec = spec;
11279 codec->pin_amp_workaround = 1;
11280 +
11281 + switch (codec->vendor_id) {
11282 + case 0x14f15045:
11283 + spec->single_adc_amp = 1;
11284 + break;
11285 + }
11286 +
11287 err = cx_auto_search_adcs(codec);
11288 if (err < 0)
11289 return err;
11290 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11291 index 7a73621..be79c9f 100644
11292 --- a/sound/pci/hda/patch_realtek.c
11293 +++ b/sound/pci/hda/patch_realtek.c
11294 @@ -1566,27 +1566,29 @@ static void alc_auto_init_digital(struct hda_codec *codec)
11295 static void alc_auto_parse_digital(struct hda_codec *codec)
11296 {
11297 struct alc_spec *spec = codec->spec;
11298 - int i, err;
11299 + int i, err, nums;
11300 hda_nid_t dig_nid;
11301
11302 /* support multiple SPDIFs; the secondary is set up as a slave */
11303 + nums = 0;
11304 for (i = 0; i < spec->autocfg.dig_outs; i++) {
11305 hda_nid_t conn[4];
11306 err = snd_hda_get_connections(codec,
11307 spec->autocfg.dig_out_pins[i],
11308 conn, ARRAY_SIZE(conn));
11309 - if (err < 0)
11310 + if (err <= 0)
11311 continue;
11312 dig_nid = conn[0]; /* assume the first element is audio-out */
11313 - if (!i) {
11314 + if (!nums) {
11315 spec->multiout.dig_out_nid = dig_nid;
11316 spec->dig_out_type = spec->autocfg.dig_out_type[0];
11317 } else {
11318 spec->multiout.slave_dig_outs = spec->slave_dig_outs;
11319 - if (i >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
11320 + if (nums >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
11321 break;
11322 - spec->slave_dig_outs[i - 1] = dig_nid;
11323 + spec->slave_dig_outs[nums - 1] = dig_nid;
11324 }
11325 + nums++;
11326 }
11327
11328 if (spec->autocfg.dig_in_pin) {
11329 @@ -2232,6 +2234,7 @@ static int alc_build_pcms(struct hda_codec *codec)
11330 struct alc_spec *spec = codec->spec;
11331 struct hda_pcm *info = spec->pcm_rec;
11332 const struct hda_pcm_stream *p;
11333 + bool have_multi_adcs;
11334 int i;
11335
11336 codec->num_pcms = 1;
11337 @@ -2310,8 +2313,11 @@ static int alc_build_pcms(struct hda_codec *codec)
11338 /* If the use of more than one ADC is requested for the current
11339 * model, configure a second analog capture-only PCM.
11340 */
11341 + have_multi_adcs = (spec->num_adc_nids > 1) &&
11342 + !spec->dyn_adc_switch && !spec->auto_mic &&
11343 + (!spec->input_mux || spec->input_mux->num_items > 1);
11344 /* Additional Analaog capture for index #2 */
11345 - if (spec->alt_dac_nid || spec->num_adc_nids > 1) {
11346 + if (spec->alt_dac_nid || have_multi_adcs) {
11347 codec->num_pcms = 3;
11348 info = spec->pcm_rec + 2;
11349 info->name = spec->stream_name_analog;
11350 @@ -2327,7 +2333,7 @@ static int alc_build_pcms(struct hda_codec *codec)
11351 alc_pcm_null_stream;
11352 info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = 0;
11353 }
11354 - if (spec->num_adc_nids > 1) {
11355 + if (have_multi_adcs) {
11356 p = spec->stream_analog_alt_capture;
11357 if (!p)
11358 p = &alc_pcm_analog_alt_capture;
11359 @@ -3071,6 +3077,12 @@ static void alc_auto_set_output_and_unmute(struct hda_codec *codec,
11360 if (nid)
11361 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
11362 AMP_OUT_ZERO);
11363 +
11364 + /* unmute DAC if it's not assigned to a mixer */
11365 + nid = alc_look_for_out_mute_nid(codec, pin, dac);
11366 + if (nid == mix && nid_has_mute(codec, dac, HDA_OUTPUT))
11367 + snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE,
11368 + AMP_OUT_ZERO);
11369 }
11370
11371 static void alc_auto_init_multi_out(struct hda_codec *codec)
11372 @@ -5421,6 +5433,8 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
11373 .patch = patch_alc882 },
11374 { .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1",
11375 .patch = patch_alc662 },
11376 + { .id = 0x10ec0662, .rev = 0x100300, .name = "ALC662 rev3",
11377 + .patch = patch_alc662 },
11378 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
11379 { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
11380 { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
11381 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
11382 index 987e3cf..d0671a8 100644
11383 --- a/sound/pci/hda/patch_sigmatel.c
11384 +++ b/sound/pci/hda/patch_sigmatel.c
11385 @@ -5585,9 +5585,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec)
11386 static int patch_stac92hd83xxx(struct hda_codec *codec)
11387 {
11388 struct sigmatel_spec *spec;
11389 - hda_nid_t conn[STAC92HD83_DAC_COUNT + 1];
11390 int err;
11391 - int num_dacs;
11392
11393 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
11394 if (spec == NULL)
11395 @@ -5627,26 +5625,8 @@ again:
11396 stac92xx_set_config_regs(codec,
11397 stac92hd83xxx_brd_tbl[spec->board_config]);
11398
11399 - switch (codec->vendor_id) {
11400 - case 0x111d76d1:
11401 - case 0x111d76d9:
11402 - case 0x111d76df:
11403 - case 0x111d76e5:
11404 - case 0x111d7666:
11405 - case 0x111d7667:
11406 - case 0x111d7668:
11407 - case 0x111d7669:
11408 - case 0x111d76e3:
11409 - case 0x111d7604:
11410 - case 0x111d76d4:
11411 - case 0x111d7605:
11412 - case 0x111d76d5:
11413 - case 0x111d76e7:
11414 - if (spec->board_config == STAC_92HD83XXX_PWR_REF)
11415 - break;
11416 + if (spec->board_config != STAC_92HD83XXX_PWR_REF)
11417 spec->num_pwrs = 0;
11418 - break;
11419 - }
11420
11421 codec->patch_ops = stac92xx_patch_ops;
11422
11423 @@ -5673,7 +5653,11 @@ again:
11424 }
11425 #endif
11426
11427 - err = stac92xx_parse_auto_config(codec, 0x1d, 0);
11428 + /* 92HD65/66 series has S/PDIF-IN */
11429 + if (codec->vendor_id >= 0x111d76e8 && codec->vendor_id <= 0x111d76f3)
11430 + err = stac92xx_parse_auto_config(codec, 0x1d, 0x22);
11431 + else
11432 + err = stac92xx_parse_auto_config(codec, 0x1d, 0);
11433 if (!err) {
11434 if (spec->board_config < 0) {
11435 printk(KERN_WARNING "hda_codec: No auto-config is "
11436 @@ -5689,22 +5673,6 @@ again:
11437 return err;
11438 }
11439
11440 - /* docking output support */
11441 - num_dacs = snd_hda_get_connections(codec, 0xF,
11442 - conn, STAC92HD83_DAC_COUNT + 1) - 1;
11443 - /* skip non-DAC connections */
11444 - while (num_dacs >= 0 &&
11445 - (get_wcaps_type(get_wcaps(codec, conn[num_dacs]))
11446 - != AC_WID_AUD_OUT))
11447 - num_dacs--;
11448 - /* set port E and F to select the last DAC */
11449 - if (num_dacs >= 0) {
11450 - snd_hda_codec_write_cache(codec, 0xE, 0,
11451 - AC_VERB_SET_CONNECT_SEL, num_dacs);
11452 - snd_hda_codec_write_cache(codec, 0xF, 0,
11453 - AC_VERB_SET_CONNECT_SEL, num_dacs);
11454 - }
11455 -
11456 codec->proc_widget_hook = stac92hd_proc_hook;
11457
11458 return 0;
11459 @@ -6579,6 +6547,18 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
11460 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
11461 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
11462 { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
11463 + { .id = 0x111d76e8, .name = "92HD66B1X5", .patch = patch_stac92hd83xxx},
11464 + { .id = 0x111d76e9, .name = "92HD66B2X5", .patch = patch_stac92hd83xxx},
11465 + { .id = 0x111d76ea, .name = "92HD66B3X5", .patch = patch_stac92hd83xxx},
11466 + { .id = 0x111d76eb, .name = "92HD66C1X5", .patch = patch_stac92hd83xxx},
11467 + { .id = 0x111d76ec, .name = "92HD66C2X5", .patch = patch_stac92hd83xxx},
11468 + { .id = 0x111d76ed, .name = "92HD66C3X5", .patch = patch_stac92hd83xxx},
11469 + { .id = 0x111d76ee, .name = "92HD66B1X3", .patch = patch_stac92hd83xxx},
11470 + { .id = 0x111d76ef, .name = "92HD66B2X3", .patch = patch_stac92hd83xxx},
11471 + { .id = 0x111d76f0, .name = "92HD66B3X3", .patch = patch_stac92hd83xxx},
11472 + { .id = 0x111d76f1, .name = "92HD66C1X3", .patch = patch_stac92hd83xxx},
11473 + { .id = 0x111d76f2, .name = "92HD66C2X3", .patch = patch_stac92hd83xxx},
11474 + { .id = 0x111d76f3, .name = "92HD66C3/65", .patch = patch_stac92hd83xxx},
11475 {} /* terminator */
11476 };
11477
11478 diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
11479 index e1a214e..65abd09 100644
11480 --- a/sound/soc/codecs/ak4535.c
11481 +++ b/sound/soc/codecs/ak4535.c
11482 @@ -40,11 +40,11 @@ struct ak4535_priv {
11483 /*
11484 * ak4535 register cache
11485 */
11486 -static const u16 ak4535_reg[AK4535_CACHEREGNUM] = {
11487 - 0x0000, 0x0080, 0x0000, 0x0003,
11488 - 0x0002, 0x0000, 0x0011, 0x0001,
11489 - 0x0000, 0x0040, 0x0036, 0x0010,
11490 - 0x0000, 0x0000, 0x0057, 0x0000,
11491 +static const u8 ak4535_reg[AK4535_CACHEREGNUM] = {
11492 + 0x00, 0x80, 0x00, 0x03,
11493 + 0x02, 0x00, 0x11, 0x01,
11494 + 0x00, 0x40, 0x36, 0x10,
11495 + 0x00, 0x00, 0x57, 0x00,
11496 };
11497
11498 /*
11499 diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
11500 index 65f4604..79c1b3d 100644
11501 --- a/sound/soc/codecs/ak4642.c
11502 +++ b/sound/soc/codecs/ak4642.c
11503 @@ -162,17 +162,17 @@ struct ak4642_priv {
11504 /*
11505 * ak4642 register cache
11506 */
11507 -static const u16 ak4642_reg[AK4642_CACHEREGNUM] = {
11508 - 0x0000, 0x0000, 0x0001, 0x0000,
11509 - 0x0002, 0x0000, 0x0000, 0x0000,
11510 - 0x00e1, 0x00e1, 0x0018, 0x0000,
11511 - 0x00e1, 0x0018, 0x0011, 0x0008,
11512 - 0x0000, 0x0000, 0x0000, 0x0000,
11513 - 0x0000, 0x0000, 0x0000, 0x0000,
11514 - 0x0000, 0x0000, 0x0000, 0x0000,
11515 - 0x0000, 0x0000, 0x0000, 0x0000,
11516 - 0x0000, 0x0000, 0x0000, 0x0000,
11517 - 0x0000,
11518 +static const u8 ak4642_reg[AK4642_CACHEREGNUM] = {
11519 + 0x00, 0x00, 0x01, 0x00,
11520 + 0x02, 0x00, 0x00, 0x00,
11521 + 0xe1, 0xe1, 0x18, 0x00,
11522 + 0xe1, 0x18, 0x11, 0x08,
11523 + 0x00, 0x00, 0x00, 0x00,
11524 + 0x00, 0x00, 0x00, 0x00,
11525 + 0x00, 0x00, 0x00, 0x00,
11526 + 0x00, 0x00, 0x00, 0x00,
11527 + 0x00, 0x00, 0x00, 0x00,
11528 + 0x00,
11529 };
11530
11531 /*
11532 diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
11533 index a537e4a..1dae5c4 100644
11534 --- a/sound/soc/codecs/wm8711.c
11535 +++ b/sound/soc/codecs/wm8711.c
11536 @@ -150,7 +150,7 @@ static int wm8711_hw_params(struct snd_pcm_substream *substream,
11537 {
11538 struct snd_soc_codec *codec = dai->codec;
11539 struct wm8711_priv *wm8711 = snd_soc_codec_get_drvdata(codec);
11540 - u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0xfffc;
11541 + u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0xfff3;
11542 int i = get_coeff(wm8711->sysclk, params_rate(params));
11543 u16 srate = (coeff_div[i].sr << 2) |
11544 (coeff_div[i].bosr << 1) | coeff_div[i].usb;
11545 @@ -231,7 +231,7 @@ static int wm8711_set_dai_fmt(struct snd_soc_dai *codec_dai,
11546 unsigned int fmt)
11547 {
11548 struct snd_soc_codec *codec = codec_dai->codec;
11549 - u16 iface = 0;
11550 + u16 iface = snd_soc_read(codec, WM8711_IFACE) & 0x000c;
11551
11552 /* set master/slave audio interface */
11553 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
11554 diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
11555 index 25af901..c173aee 100644
11556 --- a/sound/soc/codecs/wm8741.c
11557 +++ b/sound/soc/codecs/wm8741.c
11558 @@ -337,10 +337,10 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
11559 iface |= 0x0004;
11560 break;
11561 case SND_SOC_DAIFMT_DSP_A:
11562 - iface |= 0x0003;
11563 + iface |= 0x000C;
11564 break;
11565 case SND_SOC_DAIFMT_DSP_B:
11566 - iface |= 0x0013;
11567 + iface |= 0x001C;
11568 break;
11569 default:
11570 return -EINVAL;
11571 diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
11572 index b085575..cbba0b1 100644
11573 --- a/sound/soc/codecs/wm8904.c
11574 +++ b/sound/soc/codecs/wm8904.c
11575 @@ -868,7 +868,7 @@ SOC_ENUM("Right Capture Mode", rin_mode),
11576 SOC_DOUBLE_R("Capture Volume", WM8904_ANALOGUE_LEFT_INPUT_0,
11577 WM8904_ANALOGUE_RIGHT_INPUT_0, 0, 31, 0),
11578 SOC_DOUBLE_R("Capture Switch", WM8904_ANALOGUE_LEFT_INPUT_0,
11579 - WM8904_ANALOGUE_RIGHT_INPUT_0, 7, 1, 0),
11580 + WM8904_ANALOGUE_RIGHT_INPUT_0, 7, 1, 1),
11581
11582 SOC_SINGLE("High Pass Filter Switch", WM8904_ADC_DIGITAL_0, 4, 1, 0),
11583 SOC_ENUM("High Pass Filter Mode", hpf_mode),
11584 diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
11585 index 056daa0..d40da04 100644
11586 --- a/sound/soc/codecs/wm8940.c
11587 +++ b/sound/soc/codecs/wm8940.c
11588 @@ -470,6 +470,8 @@ static int wm8940_set_bias_level(struct snd_soc_codec *codec,
11589 break;
11590 }
11591
11592 + codec->dapm.bias_level = level;
11593 +
11594 return ret;
11595 }
11596
11597 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
11598 index d2c315f..c610675 100644
11599 --- a/sound/soc/codecs/wm8962.c
11600 +++ b/sound/soc/codecs/wm8962.c
11601 @@ -1959,7 +1959,13 @@ static int wm8962_readable_register(struct snd_soc_codec *codec, unsigned int re
11602
11603 static int wm8962_reset(struct snd_soc_codec *codec)
11604 {
11605 - return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
11606 + int ret;
11607 +
11608 + ret = snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
11609 + if (ret != 0)
11610 + return ret;
11611 +
11612 + return snd_soc_write(codec, WM8962_PLL_SOFTWARE_RESET, 0);
11613 }
11614
11615 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
11616 @@ -2021,7 +2027,6 @@ static int wm8962_put_spk_sw(struct snd_kcontrol *kcontrol,
11617 struct snd_ctl_elem_value *ucontrol)
11618 {
11619 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
11620 - u16 *reg_cache = codec->reg_cache;
11621 int ret;
11622
11623 /* Apply the update (if any) */
11624 @@ -2030,16 +2035,19 @@ static int wm8962_put_spk_sw(struct snd_kcontrol *kcontrol,
11625 return 0;
11626
11627 /* If the left PGA is enabled hit that VU bit... */
11628 - if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_SPKOUTL_PGA_ENA)
11629 - return snd_soc_write(codec, WM8962_SPKOUTL_VOLUME,
11630 - reg_cache[WM8962_SPKOUTL_VOLUME]);
11631 + ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
11632 + if (ret & WM8962_SPKOUTL_PGA_ENA) {
11633 + snd_soc_write(codec, WM8962_SPKOUTL_VOLUME,
11634 + snd_soc_read(codec, WM8962_SPKOUTL_VOLUME));
11635 + return 1;
11636 + }
11637
11638 /* ...otherwise the right. The VU is stereo. */
11639 - if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_SPKOUTR_PGA_ENA)
11640 - return snd_soc_write(codec, WM8962_SPKOUTR_VOLUME,
11641 - reg_cache[WM8962_SPKOUTR_VOLUME]);
11642 + if (ret & WM8962_SPKOUTR_PGA_ENA)
11643 + snd_soc_write(codec, WM8962_SPKOUTR_VOLUME,
11644 + snd_soc_read(codec, WM8962_SPKOUTR_VOLUME));
11645
11646 - return 0;
11647 + return 1;
11648 }
11649
11650 static const char *cap_hpf_mode_text[] = {
11651 @@ -2225,15 +2233,14 @@ static int sysclk_event(struct snd_soc_dapm_widget *w,
11652
11653 snd_soc_update_bits(codec, WM8962_FLL_CONTROL_1,
11654 WM8962_FLL_ENA, WM8962_FLL_ENA);
11655 - if (wm8962->irq) {
11656 - timeout = msecs_to_jiffies(5);
11657 - timeout = wait_for_completion_timeout(&wm8962->fll_lock,
11658 - timeout);
11659
11660 - if (timeout == 0)
11661 - dev_err(codec->dev,
11662 - "Timed out starting FLL\n");
11663 - }
11664 + timeout = msecs_to_jiffies(5);
11665 + timeout = wait_for_completion_timeout(&wm8962->fll_lock,
11666 + timeout);
11667 +
11668 + if (wm8962->irq && timeout == 0)
11669 + dev_err(codec->dev,
11670 + "Timed out starting FLL\n");
11671 }
11672 break;
11673
11674 @@ -2365,7 +2372,6 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
11675 struct snd_kcontrol *kcontrol, int event)
11676 {
11677 struct snd_soc_codec *codec = w->codec;
11678 - u16 *reg_cache = codec->reg_cache;
11679 int reg;
11680
11681 switch (w->shift) {
11682 @@ -2388,7 +2394,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
11683
11684 switch (event) {
11685 case SND_SOC_DAPM_POST_PMU:
11686 - return snd_soc_write(codec, reg, reg_cache[reg]);
11687 + return snd_soc_write(codec, reg, snd_soc_read(codec, reg));
11688 default:
11689 BUG();
11690 return -EINVAL;
11691 @@ -3058,9 +3064,9 @@ static int wm8962_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
11692 int aif0 = 0;
11693
11694 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
11695 - case SND_SOC_DAIFMT_DSP_A:
11696 - aif0 |= WM8962_LRCLK_INV;
11697 case SND_SOC_DAIFMT_DSP_B:
11698 + aif0 |= WM8962_LRCLK_INV | 3;
11699 + case SND_SOC_DAIFMT_DSP_A:
11700 aif0 |= 3;
11701
11702 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
11703 @@ -3847,6 +3853,11 @@ static int wm8962_probe(struct snd_soc_codec *codec)
11704 snd_soc_update_bits(codec, WM8962_CLOCKING2,
11705 WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
11706
11707 + /* Ensure that the oscillator and PLLs are disabled */
11708 + snd_soc_update_bits(codec, WM8962_PLL2,
11709 + WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
11710 + 0);
11711 +
11712 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
11713
11714 if (pdata) {
11715 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
11716 index b393f9f..8468363 100644
11717 --- a/sound/soc/codecs/wm8994.c
11718 +++ b/sound/soc/codecs/wm8994.c
11719 @@ -1282,7 +1282,7 @@ SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux),
11720 SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux),
11721
11722 SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
11723 -SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
11724 +SND_SOC_DAPM_AIF_OUT("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
11725
11726 SND_SOC_DAPM_SUPPLY("TOCLK", WM8994_CLOCKING_1, 4, 0, NULL, 0),
11727
11728 @@ -2311,7 +2311,7 @@ static void wm8994_aif_shutdown(struct snd_pcm_substream *substream,
11729 rate_reg = WM8994_AIF1_RATE;
11730 break;
11731 case 2:
11732 - rate_reg = WM8994_AIF1_RATE;
11733 + rate_reg = WM8994_AIF2_RATE;
11734 break;
11735 default:
11736 break;
11737 diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
11738 index 0cdb9d1..c9c4e5c 100644
11739 --- a/sound/soc/codecs/wm8996.c
11740 +++ b/sound/soc/codecs/wm8996.c
11741 @@ -1847,7 +1847,7 @@ static int wm8996_hw_params(struct snd_pcm_substream *substream,
11742 snd_soc_update_bits(codec, lrclk_reg, WM8996_AIF1RX_RATE_MASK,
11743 lrclk);
11744 snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_2,
11745 - WM8996_DSP1_DIV_SHIFT << dsp_shift, dsp);
11746 + WM8996_DSP1_DIV_MASK << dsp_shift, dsp);
11747
11748 return 0;
11749 }
11750 diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
11751 index 67bec76..c0609c2 100644
11752 --- a/sound/usb/misc/ua101.c
11753 +++ b/sound/usb/misc/ua101.c
11754 @@ -459,7 +459,8 @@ static void kill_stream_urbs(struct ua101_stream *stream)
11755 unsigned int i;
11756
11757 for (i = 0; i < stream->queue_length; ++i)
11758 - usb_kill_urb(&stream->urbs[i]->urb);
11759 + if (stream->urbs[i])
11760 + usb_kill_urb(&stream->urbs[i]->urb);
11761 }
11762
11763 static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index)
11764 @@ -484,6 +485,9 @@ static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index)
11765 {
11766 struct usb_host_interface *alts;
11767
11768 + if (!ua->intf[intf_index])
11769 + return;
11770 +
11771 alts = ua->intf[intf_index]->cur_altsetting;
11772 if (alts->desc.bAlternateSetting != 0) {
11773 int err = usb_set_interface(ua->dev,
11774 @@ -1144,27 +1148,37 @@ static void free_stream_urbs(struct ua101_stream *stream)
11775 {
11776 unsigned int i;
11777
11778 - for (i = 0; i < stream->queue_length; ++i)
11779 + for (i = 0; i < stream->queue_length; ++i) {
11780 kfree(stream->urbs[i]);
11781 + stream->urbs[i] = NULL;
11782 + }
11783 }
11784
11785 static void free_usb_related_resources(struct ua101 *ua,
11786 struct usb_interface *interface)
11787 {
11788 unsigned int i;
11789 + struct usb_interface *intf;
11790
11791 + mutex_lock(&ua->mutex);
11792 free_stream_urbs(&ua->capture);
11793 free_stream_urbs(&ua->playback);
11794 + mutex_unlock(&ua->mutex);
11795 free_stream_buffers(ua, &ua->capture);
11796 free_stream_buffers(ua, &ua->playback);
11797
11798 - for (i = 0; i < ARRAY_SIZE(ua->intf); ++i)
11799 - if (ua->intf[i]) {
11800 - usb_set_intfdata(ua->intf[i], NULL);
11801 - if (ua->intf[i] != interface)
11802 + for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) {
11803 + mutex_lock(&ua->mutex);
11804 + intf = ua->intf[i];
11805 + ua->intf[i] = NULL;
11806 + mutex_unlock(&ua->mutex);
11807 + if (intf) {
11808 + usb_set_intfdata(intf, NULL);
11809 + if (intf != interface)
11810 usb_driver_release_interface(&ua101_driver,
11811 - ua->intf[i]);
11812 + intf);
11813 }
11814 + }
11815 }
11816
11817 static void ua101_card_free(struct snd_card *card)
11818 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
11819 index 1c7bfa5..eb25900 100644
11820 --- a/tools/perf/util/probe-event.c
11821 +++ b/tools/perf/util/probe-event.c
11822 @@ -1956,8 +1956,10 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
11823
11824 pr_debug("Writing event: %s\n", buf);
11825 ret = write(fd, buf, strlen(buf));
11826 - if (ret < 0)
11827 + if (ret < 0) {
11828 + ret = -errno;
11829 goto error;
11830 + }
11831
11832 printf("Remove event: %s\n", ent->s);
11833 return 0;