Magellan Linux

Contents of /trunk/kernel-alx/patches-3.18/0105-3.18.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2553 - (show annotations) (download)
Wed Mar 18 09:24:38 2015 UTC (9 years, 1 month ago) by niro
File size: 141256 byte(s)
-import from kernel-magellan
1 diff --git a/Makefile b/Makefile
2 index 6276fcaabf21..d2bff2d5ae25 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 18
8 -SUBLEVEL = 5
9 +SUBLEVEL = 6
10 EXTRAVERSION =
11 NAME = Diseased Newt
12
13 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
14 index 98838a05ba6d..9d0ac091a52a 100644
15 --- a/arch/alpha/mm/fault.c
16 +++ b/arch/alpha/mm/fault.c
17 @@ -156,6 +156,8 @@ retry:
18 if (unlikely(fault & VM_FAULT_ERROR)) {
19 if (fault & VM_FAULT_OOM)
20 goto out_of_memory;
21 + else if (fault & VM_FAULT_SIGSEGV)
22 + goto bad_area;
23 else if (fault & VM_FAULT_SIGBUS)
24 goto do_sigbus;
25 BUG();
26 diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
27 index 6f7e3a68803a..563cb27e37f5 100644
28 --- a/arch/arc/mm/fault.c
29 +++ b/arch/arc/mm/fault.c
30 @@ -161,6 +161,8 @@ good_area:
31
32 if (fault & VM_FAULT_OOM)
33 goto out_of_memory;
34 + else if (fault & VM_FAULT_SIGSEGV)
35 + goto bad_area;
36 else if (fault & VM_FAULT_SIGBUS)
37 goto do_sigbus;
38
39 diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
40 index 135c24a5ba26..68c739b3fdf4 100644
41 --- a/arch/arm/include/asm/xen/page.h
42 +++ b/arch/arm/include/asm/xen/page.h
43 @@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
44 #define xen_remap(cookie, size) ioremap_cache((cookie), (size))
45 #define xen_unmap(cookie) iounmap((cookie))
46
47 +bool xen_arch_need_swiotlb(struct device *dev,
48 + unsigned long pfn,
49 + unsigned long mfn);
50 +
51 #endif /* _ASM_ARM_XEN_PAGE_H */
52 diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
53 index 1163a3e9accd..2ffccd4eb084 100644
54 --- a/arch/arm/mach-mvebu/coherency.c
55 +++ b/arch/arm/mach-mvebu/coherency.c
56 @@ -342,6 +342,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
57 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
58
59 /*
60 + * We should switch the PL310 to I/O coherency mode only if
61 + * I/O coherency is actually enabled.
62 + */
63 + if (!coherency_available())
64 + return;
65 +
66 + /*
67 * Add the PL310 property "arm,io-coherent". This makes sure the
68 * outer sync operation is not used, which allows to
69 * workaround the system erratum that causes deadlocks when
70 diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
71 index b0e77de99148..f8a576b1d9bb 100644
72 --- a/arch/arm/xen/mm.c
73 +++ b/arch/arm/xen/mm.c
74 @@ -16,6 +16,13 @@
75 #include <asm/xen/hypercall.h>
76 #include <asm/xen/interface.h>
77
78 +bool xen_arch_need_swiotlb(struct device *dev,
79 + unsigned long pfn,
80 + unsigned long mfn)
81 +{
82 + return (pfn != mfn);
83 +}
84 +
85 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
86 unsigned int address_bits,
87 dma_addr_t *dma_handle)
88 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
89 index 0eca93327195..d223a8b57c1e 100644
90 --- a/arch/avr32/mm/fault.c
91 +++ b/arch/avr32/mm/fault.c
92 @@ -142,6 +142,8 @@ good_area:
93 if (unlikely(fault & VM_FAULT_ERROR)) {
94 if (fault & VM_FAULT_OOM)
95 goto out_of_memory;
96 + else if (fault & VM_FAULT_SIGSEGV)
97 + goto bad_area;
98 else if (fault & VM_FAULT_SIGBUS)
99 goto do_sigbus;
100 BUG();
101 diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
102 index 1790f22e71a2..2686a7aa8ec8 100644
103 --- a/arch/cris/mm/fault.c
104 +++ b/arch/cris/mm/fault.c
105 @@ -176,6 +176,8 @@ retry:
106 if (unlikely(fault & VM_FAULT_ERROR)) {
107 if (fault & VM_FAULT_OOM)
108 goto out_of_memory;
109 + else if (fault & VM_FAULT_SIGSEGV)
110 + goto bad_area;
111 else if (fault & VM_FAULT_SIGBUS)
112 goto do_sigbus;
113 BUG();
114 diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
115 index 9a66372fc7c7..ec4917ddf678 100644
116 --- a/arch/frv/mm/fault.c
117 +++ b/arch/frv/mm/fault.c
118 @@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
119 if (unlikely(fault & VM_FAULT_ERROR)) {
120 if (fault & VM_FAULT_OOM)
121 goto out_of_memory;
122 + else if (fault & VM_FAULT_SIGSEGV)
123 + goto bad_area;
124 else if (fault & VM_FAULT_SIGBUS)
125 goto do_sigbus;
126 BUG();
127 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
128 index 7225dad87094..ba5ba7accd0d 100644
129 --- a/arch/ia64/mm/fault.c
130 +++ b/arch/ia64/mm/fault.c
131 @@ -172,6 +172,8 @@ retry:
132 */
133 if (fault & VM_FAULT_OOM) {
134 goto out_of_memory;
135 + } else if (fault & VM_FAULT_SIGSEGV) {
136 + goto bad_area;
137 } else if (fault & VM_FAULT_SIGBUS) {
138 signal = SIGBUS;
139 goto bad_area;
140 diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
141 index e9c6a8014bd6..e3d4d4890104 100644
142 --- a/arch/m32r/mm/fault.c
143 +++ b/arch/m32r/mm/fault.c
144 @@ -200,6 +200,8 @@ good_area:
145 if (unlikely(fault & VM_FAULT_ERROR)) {
146 if (fault & VM_FAULT_OOM)
147 goto out_of_memory;
148 + else if (fault & VM_FAULT_SIGSEGV)
149 + goto bad_area;
150 else if (fault & VM_FAULT_SIGBUS)
151 goto do_sigbus;
152 BUG();
153 diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
154 index 2bd7487440c4..b2f04aee46ec 100644
155 --- a/arch/m68k/mm/fault.c
156 +++ b/arch/m68k/mm/fault.c
157 @@ -145,6 +145,8 @@ good_area:
158 if (unlikely(fault & VM_FAULT_ERROR)) {
159 if (fault & VM_FAULT_OOM)
160 goto out_of_memory;
161 + else if (fault & VM_FAULT_SIGSEGV)
162 + goto map_err;
163 else if (fault & VM_FAULT_SIGBUS)
164 goto bus_err;
165 BUG();
166 diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
167 index 332680e5ebf2..2de5dc695a87 100644
168 --- a/arch/metag/mm/fault.c
169 +++ b/arch/metag/mm/fault.c
170 @@ -141,6 +141,8 @@ good_area:
171 if (unlikely(fault & VM_FAULT_ERROR)) {
172 if (fault & VM_FAULT_OOM)
173 goto out_of_memory;
174 + else if (fault & VM_FAULT_SIGSEGV)
175 + goto bad_area;
176 else if (fault & VM_FAULT_SIGBUS)
177 goto do_sigbus;
178 BUG();
179 diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
180 index fa4cf52aa7a6..d46a5ebb7570 100644
181 --- a/arch/microblaze/mm/fault.c
182 +++ b/arch/microblaze/mm/fault.c
183 @@ -224,6 +224,8 @@ good_area:
184 if (unlikely(fault & VM_FAULT_ERROR)) {
185 if (fault & VM_FAULT_OOM)
186 goto out_of_memory;
187 + else if (fault & VM_FAULT_SIGSEGV)
188 + goto bad_area;
189 else if (fault & VM_FAULT_SIGBUS)
190 goto do_sigbus;
191 BUG();
192 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
193 index becc42bb1849..70ab5d664332 100644
194 --- a/arch/mips/mm/fault.c
195 +++ b/arch/mips/mm/fault.c
196 @@ -158,6 +158,8 @@ good_area:
197 if (unlikely(fault & VM_FAULT_ERROR)) {
198 if (fault & VM_FAULT_OOM)
199 goto out_of_memory;
200 + else if (fault & VM_FAULT_SIGSEGV)
201 + goto bad_area;
202 else if (fault & VM_FAULT_SIGBUS)
203 goto do_sigbus;
204 BUG();
205 diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
206 index 3516cbdf1ee9..0c2cc5d39c8e 100644
207 --- a/arch/mn10300/mm/fault.c
208 +++ b/arch/mn10300/mm/fault.c
209 @@ -262,6 +262,8 @@ good_area:
210 if (unlikely(fault & VM_FAULT_ERROR)) {
211 if (fault & VM_FAULT_OOM)
212 goto out_of_memory;
213 + else if (fault & VM_FAULT_SIGSEGV)
214 + goto bad_area;
215 else if (fault & VM_FAULT_SIGBUS)
216 goto do_sigbus;
217 BUG();
218 diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
219 index 0703acf7d327..230ac20ae794 100644
220 --- a/arch/openrisc/mm/fault.c
221 +++ b/arch/openrisc/mm/fault.c
222 @@ -171,6 +171,8 @@ good_area:
223 if (unlikely(fault & VM_FAULT_ERROR)) {
224 if (fault & VM_FAULT_OOM)
225 goto out_of_memory;
226 + else if (fault & VM_FAULT_SIGSEGV)
227 + goto bad_area;
228 else if (fault & VM_FAULT_SIGBUS)
229 goto do_sigbus;
230 BUG();
231 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
232 index 3ca9c1131cfe..e5120e653240 100644
233 --- a/arch/parisc/mm/fault.c
234 +++ b/arch/parisc/mm/fault.c
235 @@ -256,6 +256,8 @@ good_area:
236 */
237 if (fault & VM_FAULT_OOM)
238 goto out_of_memory;
239 + else if (fault & VM_FAULT_SIGSEGV)
240 + goto bad_area;
241 else if (fault & VM_FAULT_SIGBUS)
242 goto bad_area;
243 BUG();
244 diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
245 index 5a236f082c78..1b5305d4bdab 100644
246 --- a/arch/powerpc/mm/copro_fault.c
247 +++ b/arch/powerpc/mm/copro_fault.c
248 @@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
249 if (*flt & VM_FAULT_OOM) {
250 ret = -ENOMEM;
251 goto out_unlock;
252 - } else if (*flt & VM_FAULT_SIGBUS) {
253 + } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
254 ret = -EFAULT;
255 goto out_unlock;
256 }
257 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
258 index 08d659a9fcdb..f06b56baf0b3 100644
259 --- a/arch/powerpc/mm/fault.c
260 +++ b/arch/powerpc/mm/fault.c
261 @@ -444,6 +444,8 @@ good_area:
262 */
263 fault = handle_mm_fault(mm, vma, address, flags);
264 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
265 + if (fault & VM_FAULT_SIGSEGV)
266 + goto bad_area;
267 rc = mm_fault_error(regs, address, fault);
268 if (rc >= MM_FAULT_RETURN)
269 goto bail;
270 diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
271 index c8efbb37d6e0..e23f559faa47 100644
272 --- a/arch/powerpc/xmon/xmon.c
273 +++ b/arch/powerpc/xmon/xmon.c
274 @@ -293,6 +293,7 @@ static inline void disable_surveillance(void)
275 args.token = rtas_token("set-indicator");
276 if (args.token == RTAS_UNKNOWN_SERVICE)
277 return;
278 + args.token = cpu_to_be32(args.token);
279 args.nargs = cpu_to_be32(3);
280 args.nret = cpu_to_be32(1);
281 args.rets = &args.args[3];
282 diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
283 index a2b81d6ce8a5..fbe8f2cf9245 100644
284 --- a/arch/s390/mm/fault.c
285 +++ b/arch/s390/mm/fault.c
286 @@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
287 do_no_context(regs);
288 else
289 pagefault_out_of_memory();
290 + } else if (fault & VM_FAULT_SIGSEGV) {
291 + /* Kernel mode? Handle exceptions or die */
292 + if (!user_mode(regs))
293 + do_no_context(regs);
294 + else
295 + do_sigsegv(regs, SEGV_MAPERR);
296 } else if (fault & VM_FAULT_SIGBUS) {
297 /* Kernel mode? Handle exceptions or die */
298 if (!user_mode(regs))
299 diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
300 index 52238983527d..6860beb2a280 100644
301 --- a/arch/score/mm/fault.c
302 +++ b/arch/score/mm/fault.c
303 @@ -114,6 +114,8 @@ good_area:
304 if (unlikely(fault & VM_FAULT_ERROR)) {
305 if (fault & VM_FAULT_OOM)
306 goto out_of_memory;
307 + else if (fault & VM_FAULT_SIGSEGV)
308 + goto bad_area;
309 else if (fault & VM_FAULT_SIGBUS)
310 goto do_sigbus;
311 BUG();
312 diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
313 index 541dc6101508..a58fec9b55e0 100644
314 --- a/arch/sh/mm/fault.c
315 +++ b/arch/sh/mm/fault.c
316 @@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
317 } else {
318 if (fault & VM_FAULT_SIGBUS)
319 do_sigbus(regs, error_code, address);
320 + else if (fault & VM_FAULT_SIGSEGV)
321 + bad_area(regs, error_code, address);
322 else
323 BUG();
324 }
325 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
326 index 908e8c17c902..70d817154fe8 100644
327 --- a/arch/sparc/mm/fault_32.c
328 +++ b/arch/sparc/mm/fault_32.c
329 @@ -249,6 +249,8 @@ good_area:
330 if (unlikely(fault & VM_FAULT_ERROR)) {
331 if (fault & VM_FAULT_OOM)
332 goto out_of_memory;
333 + else if (fault & VM_FAULT_SIGSEGV)
334 + goto bad_area;
335 else if (fault & VM_FAULT_SIGBUS)
336 goto do_sigbus;
337 BUG();
338 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
339 index 18fcd7167095..479823249429 100644
340 --- a/arch/sparc/mm/fault_64.c
341 +++ b/arch/sparc/mm/fault_64.c
342 @@ -446,6 +446,8 @@ good_area:
343 if (unlikely(fault & VM_FAULT_ERROR)) {
344 if (fault & VM_FAULT_OOM)
345 goto out_of_memory;
346 + else if (fault & VM_FAULT_SIGSEGV)
347 + goto bad_area;
348 else if (fault & VM_FAULT_SIGBUS)
349 goto do_sigbus;
350 BUG();
351 diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
352 index 6c0571216a9d..c6d2a76d91a8 100644
353 --- a/arch/tile/mm/fault.c
354 +++ b/arch/tile/mm/fault.c
355 @@ -444,6 +444,8 @@ good_area:
356 if (unlikely(fault & VM_FAULT_ERROR)) {
357 if (fault & VM_FAULT_OOM)
358 goto out_of_memory;
359 + else if (fault & VM_FAULT_SIGSEGV)
360 + goto bad_area;
361 else if (fault & VM_FAULT_SIGBUS)
362 goto do_sigbus;
363 BUG();
364 diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
365 index 5678c3571e7c..209617302df8 100644
366 --- a/arch/um/kernel/trap.c
367 +++ b/arch/um/kernel/trap.c
368 @@ -80,6 +80,8 @@ good_area:
369 if (unlikely(fault & VM_FAULT_ERROR)) {
370 if (fault & VM_FAULT_OOM) {
371 goto out_of_memory;
372 + } else if (fault & VM_FAULT_SIGSEGV) {
373 + goto out;
374 } else if (fault & VM_FAULT_SIGBUS) {
375 err = -EACCES;
376 goto out;
377 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
378 index 45abc363dd3e..6a1a8458c042 100644
379 --- a/arch/x86/boot/compressed/Makefile
380 +++ b/arch/x86/boot/compressed/Makefile
381 @@ -77,7 +77,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
382 suffix-$(CONFIG_KERNEL_LZ4) := lz4
383
384 RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
385 - perl $(srctree)/arch/x86/tools/calc_run_size.pl)
386 + $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
387 quiet_cmd_mkpiggy = MKPIGGY $@
388 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
389
390 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
391 index c949923a5668..f58ef6c0613b 100644
392 --- a/arch/x86/include/asm/xen/page.h
393 +++ b/arch/x86/include/asm/xen/page.h
394 @@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
395 #define xen_remap(cookie, size) ioremap((cookie), (size));
396 #define xen_unmap(cookie) iounmap((cookie))
397
398 +static inline bool xen_arch_need_swiotlb(struct device *dev,
399 + unsigned long pfn,
400 + unsigned long mfn)
401 +{
402 + return false;
403 +}
404 +
405 #endif /* _ASM_X86_XEN_PAGE_H */
406 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
407 index 944bf019b74f..498b6d967138 100644
408 --- a/arch/x86/kernel/cpu/perf_event_intel.c
409 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
410 @@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
411 break;
412
413 case 55: /* 22nm Atom "Silvermont" */
414 + case 76: /* 14nm Atom "Airmont" */
415 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
416 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
417 sizeof(hw_cache_event_ids));
418 diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
419 index d64f275fe274..8c256749882c 100644
420 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
421 +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
422 @@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v)
423 * or use ldexp(count, -32).
424 * Watts = Joules/Time delta
425 */
426 - return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
427 + return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
428 }
429
430 static u64 rapl_event_update(struct perf_event *event)
431 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
432 index d973e61e450d..a8612aafeca1 100644
433 --- a/arch/x86/mm/fault.c
434 +++ b/arch/x86/mm/fault.c
435 @@ -905,6 +905,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
436 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
437 VM_FAULT_HWPOISON_LARGE))
438 do_sigbus(regs, error_code, address, fault);
439 + else if (fault & VM_FAULT_SIGSEGV)
440 + bad_area_nosemaphore(regs, error_code, address);
441 else
442 BUG();
443 }
444 diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
445 deleted file mode 100644
446 index 23210baade2d..000000000000
447 --- a/arch/x86/tools/calc_run_size.pl
448 +++ /dev/null
449 @@ -1,39 +0,0 @@
450 -#!/usr/bin/perl
451 -#
452 -# Calculate the amount of space needed to run the kernel, including room for
453 -# the .bss and .brk sections.
454 -#
455 -# Usage:
456 -# objdump -h a.out | perl calc_run_size.pl
457 -use strict;
458 -
459 -my $mem_size = 0;
460 -my $file_offset = 0;
461 -
462 -my $sections=" *[0-9]+ \.(?:bss|brk) +";
463 -while (<>) {
464 - if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
465 - my $size = hex($1);
466 - my $offset = hex($2);
467 - $mem_size += $size;
468 - if ($file_offset == 0) {
469 - $file_offset = $offset;
470 - } elsif ($file_offset != $offset) {
471 - # BFD linker shows the same file offset in ELF.
472 - # Gold linker shows them as consecutive.
473 - next if ($file_offset + $mem_size == $offset + $size);
474 -
475 - printf STDERR "file_offset: 0x%lx\n", $file_offset;
476 - printf STDERR "mem_size: 0x%lx\n", $mem_size;
477 - printf STDERR "offset: 0x%lx\n", $offset;
478 - printf STDERR "size: 0x%lx\n", $size;
479 -
480 - die ".bss and .brk are non-contiguous\n";
481 - }
482 - }
483 -}
484 -
485 -if ($file_offset == 0) {
486 - die "Never found .bss or .brk file offset\n";
487 -}
488 -printf("%d\n", $mem_size + $file_offset);
489 diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
490 new file mode 100644
491 index 000000000000..1a4c17bb3910
492 --- /dev/null
493 +++ b/arch/x86/tools/calc_run_size.sh
494 @@ -0,0 +1,42 @@
495 +#!/bin/sh
496 +#
497 +# Calculate the amount of space needed to run the kernel, including room for
498 +# the .bss and .brk sections.
499 +#
500 +# Usage:
501 +# objdump -h a.out | sh calc_run_size.sh
502 +
503 +NUM='\([0-9a-fA-F]*[ \t]*\)'
504 +OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
505 +if [ -z "$OUT" ] ; then
506 + echo "Never found .bss or .brk file offset" >&2
507 + exit 1
508 +fi
509 +
510 +OUT=$(echo ${OUT# })
511 +sizeA=$(printf "%d" 0x${OUT%% *})
512 +OUT=${OUT#* }
513 +offsetA=$(printf "%d" 0x${OUT%% *})
514 +OUT=${OUT#* }
515 +sizeB=$(printf "%d" 0x${OUT%% *})
516 +OUT=${OUT#* }
517 +offsetB=$(printf "%d" 0x${OUT%% *})
518 +
519 +run_size=$(( $offsetA + $sizeA + $sizeB ))
520 +
521 +# BFD linker shows the same file offset in ELF.
522 +if [ "$offsetA" -ne "$offsetB" ] ; then
523 + # Gold linker shows them as consecutive.
524 + endB=$(( $offsetB + $sizeB ))
525 + if [ "$endB" != "$run_size" ] ; then
526 + printf "sizeA: 0x%x\n" $sizeA >&2
527 + printf "offsetA: 0x%x\n" $offsetA >&2
528 + printf "sizeB: 0x%x\n" $sizeB >&2
529 + printf "offsetB: 0x%x\n" $offsetB >&2
530 + echo ".bss and .brk are non-contiguous" >&2
531 + exit 1
532 + fi
533 +fi
534 +
535 +printf "%d\n" $run_size
536 +exit 0
537 diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
538 index b57c4f91f487..9e3571a6535c 100644
539 --- a/arch/xtensa/mm/fault.c
540 +++ b/arch/xtensa/mm/fault.c
541 @@ -117,6 +117,8 @@ good_area:
542 if (unlikely(fault & VM_FAULT_ERROR)) {
543 if (fault & VM_FAULT_OOM)
544 goto out_of_memory;
545 + else if (fault & VM_FAULT_SIGSEGV)
546 + goto bad_area;
547 else if (fault & VM_FAULT_SIGBUS)
548 goto do_sigbus;
549 BUG();
550 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
551 index 27b71a0b72d0..76b5be937de6 100644
552 --- a/drivers/block/rbd.c
553 +++ b/drivers/block/rbd.c
554 @@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
555 * If an image has a non-zero parent overlap, get a reference to its
556 * parent.
557 *
558 - * We must get the reference before checking for the overlap to
559 - * coordinate properly with zeroing the parent overlap in
560 - * rbd_dev_v2_parent_info() when an image gets flattened. We
561 - * drop it again if there is no overlap.
562 - *
563 * Returns true if the rbd device has a parent with a non-zero
564 * overlap and a reference for it was successfully taken, or
565 * false otherwise.
566 */
567 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
568 {
569 - int counter;
570 + int counter = 0;
571
572 if (!rbd_dev->parent_spec)
573 return false;
574
575 - counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
576 - if (counter > 0 && rbd_dev->parent_overlap)
577 - return true;
578 -
579 - /* Image was flattened, but parent is not yet torn down */
580 + down_read(&rbd_dev->header_rwsem);
581 + if (rbd_dev->parent_overlap)
582 + counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
583 + up_read(&rbd_dev->header_rwsem);
584
585 if (counter < 0)
586 rbd_warn(rbd_dev, "parent reference overflow");
587
588 - return false;
589 + return counter > 0;
590 }
591
592 /*
593 @@ -4236,7 +4230,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
594 */
595 if (rbd_dev->parent_overlap) {
596 rbd_dev->parent_overlap = 0;
597 - smp_mb();
598 rbd_dev_parent_put(rbd_dev);
599 pr_info("%s: clone image has been flattened\n",
600 rbd_dev->disk->disk_name);
601 @@ -4282,7 +4275,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
602 * treat it specially.
603 */
604 rbd_dev->parent_overlap = overlap;
605 - smp_mb();
606 if (!overlap) {
607
608 /* A null parent_spec indicates it's the initial probe */
609 @@ -5111,10 +5103,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
610 {
611 struct rbd_image_header *header;
612
613 - /* Drop parent reference unless it's already been done (or none) */
614 -
615 - if (rbd_dev->parent_overlap)
616 - rbd_dev_parent_put(rbd_dev);
617 + rbd_dev_parent_put(rbd_dev);
618
619 /* Free dynamic fields from the header, then zero it out */
620
621 diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
622 index 1fa2af957b18..84b4c8b7fbd1 100644
623 --- a/drivers/clocksource/arm_arch_timer.c
624 +++ b/drivers/clocksource/arm_arch_timer.c
625 @@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
626
627 /* Register the CP15 based counter if we have one */
628 if (type & ARCH_CP15_TIMER) {
629 - if (arch_timer_use_virtual)
630 + if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
631 arch_timer_read_counter = arch_counter_get_cntvct;
632 else
633 arch_timer_read_counter = arch_counter_get_cntpct;
634 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
635 index ef757f712a3d..e9a2827ad1c4 100644
636 --- a/drivers/gpu/drm/drm_fb_helper.c
637 +++ b/drivers/gpu/drm/drm_fb_helper.c
638 @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
639 }
640 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
641
642 +static void remove_from_modeset(struct drm_mode_set *set,
643 + struct drm_connector *connector)
644 +{
645 + int i, j;
646 +
647 + for (i = 0; i < set->num_connectors; i++) {
648 + if (set->connectors[i] == connector)
649 + break;
650 + }
651 +
652 + if (i == set->num_connectors)
653 + return;
654 +
655 + for (j = i + 1; j < set->num_connectors; j++) {
656 + set->connectors[j - 1] = set->connectors[j];
657 + }
658 + set->num_connectors--;
659 +
660 + /* because i915 is pissy about this..
661 + * TODO maybe need to makes sure we set it back to !=NULL somewhere?
662 + */
663 + if (set->num_connectors == 0)
664 + set->fb = NULL;
665 +}
666 +
667 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
668 struct drm_connector *connector)
669 {
670 @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
671 }
672 fb_helper->connector_count--;
673 kfree(fb_helper_connector);
674 +
675 + /* also cleanup dangling references to the connector: */
676 + for (i = 0; i < fb_helper->crtc_count; i++)
677 + remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
678 +
679 return 0;
680 }
681 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
682 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
683 index 346aee828dc3..c33327d5c543 100644
684 --- a/drivers/gpu/drm/i915/i915_drv.h
685 +++ b/drivers/gpu/drm/i915/i915_drv.h
686 @@ -2076,8 +2076,7 @@ struct drm_i915_cmd_table {
687 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
688 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
689 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
690 - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
691 - (INTEL_DEVID(dev) & 0xf) == 0x6 || \
692 + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
693 (INTEL_DEVID(dev) & 0xf) == 0xe))
694 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
695 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
696 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
697 index fd76933eed04..d88dbedeaa77 100644
698 --- a/drivers/gpu/drm/i915/i915_gem.c
699 +++ b/drivers/gpu/drm/i915/i915_gem.c
700 @@ -3050,6 +3050,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
701 u32 size = i915_gem_obj_ggtt_size(obj);
702 uint64_t val;
703
704 + /* Adjust fence size to match tiled area */
705 + if (obj->tiling_mode != I915_TILING_NONE) {
706 + uint32_t row_size = obj->stride *
707 + (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
708 + size = (size / row_size) * row_size;
709 + }
710 +
711 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
712 0xfffff000) << 32;
713 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
714 @@ -4811,25 +4818,18 @@ i915_gem_init_hw(struct drm_device *dev)
715 for (i = 0; i < NUM_L3_SLICES(dev); i++)
716 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
717
718 - /*
719 - * XXX: Contexts should only be initialized once. Doing a switch to the
720 - * default context switch however is something we'd like to do after
721 - * reset or thaw (the latter may not actually be necessary for HW, but
722 - * goes with our code better). Context switching requires rings (for
723 - * the do_switch), but before enabling PPGTT. So don't move this.
724 - */
725 - ret = i915_gem_context_enable(dev_priv);
726 + ret = i915_ppgtt_init_hw(dev);
727 if (ret && ret != -EIO) {
728 - DRM_ERROR("Context enable failed %d\n", ret);
729 + DRM_ERROR("PPGTT enable failed %d\n", ret);
730 i915_gem_cleanup_ringbuffer(dev);
731 -
732 - return ret;
733 }
734
735 - ret = i915_ppgtt_init_hw(dev);
736 + ret = i915_gem_context_enable(dev_priv);
737 if (ret && ret != -EIO) {
738 - DRM_ERROR("PPGTT enable failed %d\n", ret);
739 + DRM_ERROR("Context enable failed %d\n", ret);
740 i915_gem_cleanup_ringbuffer(dev);
741 +
742 + return ret;
743 }
744
745 return ret;
746 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
747 index 41b3be217493..8bc193f81333 100644
748 --- a/drivers/gpu/drm/i915/intel_panel.c
749 +++ b/drivers/gpu/drm/i915/intel_panel.c
750 @@ -947,7 +947,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
751
752 WARN_ON(panel->backlight.max == 0);
753
754 - if (panel->backlight.level == 0) {
755 + if (panel->backlight.level <= panel->backlight.min) {
756 panel->backlight.level = panel->backlight.max;
757 if (panel->backlight.device)
758 panel->backlight.device->props.brightness =
759 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
760 index b53b31a7b76f..cdf6e2149539 100644
761 --- a/drivers/gpu/drm/radeon/r100.c
762 +++ b/drivers/gpu/drm/radeon/r100.c
763 @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
764 return r;
765 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
766 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
767 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
768 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
769 return radeon_gart_table_ram_alloc(rdev);
770 }
771 @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
772 WREG32(RADEON_AIC_HI_ADDR, 0);
773 }
774
775 +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
776 +{
777 + return addr;
778 +}
779 +
780 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
781 - uint64_t addr, uint32_t flags)
782 + uint64_t entry)
783 {
784 u32 *gtt = rdev->gart.ptr;
785 - gtt[i] = cpu_to_le32(lower_32_bits(addr));
786 + gtt[i] = cpu_to_le32(lower_32_bits(entry));
787 }
788
789 void r100_pci_gart_fini(struct radeon_device *rdev)
790 diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
791 index 1bc4704034ce..f3ef6257d669 100644
792 --- a/drivers/gpu/drm/radeon/r300.c
793 +++ b/drivers/gpu/drm/radeon/r300.c
794 @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
795 #define R300_PTE_WRITEABLE (1 << 2)
796 #define R300_PTE_READABLE (1 << 3)
797
798 -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
799 - uint64_t addr, uint32_t flags)
800 +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
801 {
802 - void __iomem *ptr = rdev->gart.ptr;
803 -
804 addr = (lower_32_bits(addr) >> 8) |
805 ((upper_32_bits(addr) & 0xff) << 24);
806 if (flags & RADEON_GART_PAGE_READ)
807 @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
808 addr |= R300_PTE_WRITEABLE;
809 if (!(flags & RADEON_GART_PAGE_SNOOP))
810 addr |= R300_PTE_UNSNOOPED;
811 + return addr;
812 +}
813 +
814 +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
815 + uint64_t entry)
816 +{
817 + void __iomem *ptr = rdev->gart.ptr;
818 +
819 /* on x86 we want this to be CPU endian, on powerpc
820 * on powerpc without HW swappers, it'll get swapped on way
821 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
822 - writel(addr, ((void __iomem *)ptr) + (i * 4));
823 + writel(entry, ((void __iomem *)ptr) + (i * 4));
824 }
825
826 int rv370_pcie_gart_init(struct radeon_device *rdev)
827 @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
828 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
829 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
830 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
831 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
832 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
833 return radeon_gart_table_vram_alloc(rdev);
834 }
835 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
836 index a9717b3fbf1b..dbe51bfe3ef4 100644
837 --- a/drivers/gpu/drm/radeon/radeon.h
838 +++ b/drivers/gpu/drm/radeon/radeon.h
839 @@ -245,6 +245,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
840 * Dummy page
841 */
842 struct radeon_dummy_page {
843 + uint64_t entry;
844 struct page *page;
845 dma_addr_t addr;
846 };
847 @@ -626,6 +627,7 @@ struct radeon_gart {
848 unsigned table_size;
849 struct page **pages;
850 dma_addr_t *pages_addr;
851 + uint64_t *pages_entry;
852 bool ready;
853 };
854
855 @@ -1819,8 +1821,9 @@ struct radeon_asic {
856 /* gart */
857 struct {
858 void (*tlb_flush)(struct radeon_device *rdev);
859 + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
860 void (*set_page)(struct radeon_device *rdev, unsigned i,
861 - uint64_t addr, uint32_t flags);
862 + uint64_t entry);
863 } gart;
864 struct {
865 int (*init)(struct radeon_device *rdev);
866 @@ -2818,7 +2821,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
867 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
868 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
869 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
870 -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
871 +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
872 +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
873 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
874 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
875 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
876 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
877 index 121aff6a3b41..ed0e10eee2dc 100644
878 --- a/drivers/gpu/drm/radeon/radeon_asic.c
879 +++ b/drivers/gpu/drm/radeon/radeon_asic.c
880 @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
881 DRM_INFO("Forcing AGP to PCIE mode\n");
882 rdev->flags |= RADEON_IS_PCIE;
883 rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
884 + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
885 rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
886 } else {
887 DRM_INFO("Forcing AGP to PCI mode\n");
888 rdev->flags |= RADEON_IS_PCI;
889 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
890 + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
891 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
892 }
893 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
894 @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
895 .mc_wait_for_idle = &r100_mc_wait_for_idle,
896 .gart = {
897 .tlb_flush = &r100_pci_gart_tlb_flush,
898 + .get_page_entry = &r100_pci_gart_get_page_entry,
899 .set_page = &r100_pci_gart_set_page,
900 },
901 .ring = {
902 @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
903 .mc_wait_for_idle = &r100_mc_wait_for_idle,
904 .gart = {
905 .tlb_flush = &r100_pci_gart_tlb_flush,
906 + .get_page_entry = &r100_pci_gart_get_page_entry,
907 .set_page = &r100_pci_gart_set_page,
908 },
909 .ring = {
910 @@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
911 .mc_wait_for_idle = &r300_mc_wait_for_idle,
912 .gart = {
913 .tlb_flush = &r100_pci_gart_tlb_flush,
914 + .get_page_entry = &r100_pci_gart_get_page_entry,
915 .set_page = &r100_pci_gart_set_page,
916 },
917 .ring = {
918 @@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
919 .mc_wait_for_idle = &r300_mc_wait_for_idle,
920 .gart = {
921 .tlb_flush = &rv370_pcie_gart_tlb_flush,
922 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
923 .set_page = &rv370_pcie_gart_set_page,
924 },
925 .ring = {
926 @@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
927 .mc_wait_for_idle = &r300_mc_wait_for_idle,
928 .gart = {
929 .tlb_flush = &rv370_pcie_gart_tlb_flush,
930 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
931 .set_page = &rv370_pcie_gart_set_page,
932 },
933 .ring = {
934 @@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
935 .mc_wait_for_idle = &rs400_mc_wait_for_idle,
936 .gart = {
937 .tlb_flush = &rs400_gart_tlb_flush,
938 + .get_page_entry = &rs400_gart_get_page_entry,
939 .set_page = &rs400_gart_set_page,
940 },
941 .ring = {
942 @@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
943 .mc_wait_for_idle = &rs600_mc_wait_for_idle,
944 .gart = {
945 .tlb_flush = &rs600_gart_tlb_flush,
946 + .get_page_entry = &rs600_gart_get_page_entry,
947 .set_page = &rs600_gart_set_page,
948 },
949 .ring = {
950 @@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
951 .mc_wait_for_idle = &rs690_mc_wait_for_idle,
952 .gart = {
953 .tlb_flush = &rs400_gart_tlb_flush,
954 + .get_page_entry = &rs400_gart_get_page_entry,
955 .set_page = &rs400_gart_set_page,
956 },
957 .ring = {
958 @@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
959 .mc_wait_for_idle = &rv515_mc_wait_for_idle,
960 .gart = {
961 .tlb_flush = &rv370_pcie_gart_tlb_flush,
962 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
963 .set_page = &rv370_pcie_gart_set_page,
964 },
965 .ring = {
966 @@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
967 .mc_wait_for_idle = &r520_mc_wait_for_idle,
968 .gart = {
969 .tlb_flush = &rv370_pcie_gart_tlb_flush,
970 + .get_page_entry = &rv370_pcie_gart_get_page_entry,
971 .set_page = &rv370_pcie_gart_set_page,
972 },
973 .ring = {
974 @@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
975 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
976 .gart = {
977 .tlb_flush = &r600_pcie_gart_tlb_flush,
978 + .get_page_entry = &rs600_gart_get_page_entry,
979 .set_page = &rs600_gart_set_page,
980 },
981 .ring = {
982 @@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
983 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
984 .gart = {
985 .tlb_flush = &r600_pcie_gart_tlb_flush,
986 + .get_page_entry = &rs600_gart_get_page_entry,
987 .set_page = &rs600_gart_set_page,
988 },
989 .ring = {
990 @@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
991 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
992 .gart = {
993 .tlb_flush = &r600_pcie_gart_tlb_flush,
994 + .get_page_entry = &rs600_gart_get_page_entry,
995 .set_page = &rs600_gart_set_page,
996 },
997 .ring = {
998 @@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
999 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1000 .gart = {
1001 .tlb_flush = &r600_pcie_gart_tlb_flush,
1002 + .get_page_entry = &rs600_gart_get_page_entry,
1003 .set_page = &rs600_gart_set_page,
1004 },
1005 .ring = {
1006 @@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
1007 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1008 .gart = {
1009 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1010 + .get_page_entry = &rs600_gart_get_page_entry,
1011 .set_page = &rs600_gart_set_page,
1012 },
1013 .ring = {
1014 @@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
1015 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1016 .gart = {
1017 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1018 + .get_page_entry = &rs600_gart_get_page_entry,
1019 .set_page = &rs600_gart_set_page,
1020 },
1021 .ring = {
1022 @@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
1023 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1024 .gart = {
1025 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1026 + .get_page_entry = &rs600_gart_get_page_entry,
1027 .set_page = &rs600_gart_set_page,
1028 },
1029 .ring = {
1030 @@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
1031 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1032 .gart = {
1033 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1034 + .get_page_entry = &rs600_gart_get_page_entry,
1035 .set_page = &rs600_gart_set_page,
1036 },
1037 .vm = {
1038 @@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
1039 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1040 .gart = {
1041 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1042 + .get_page_entry = &rs600_gart_get_page_entry,
1043 .set_page = &rs600_gart_set_page,
1044 },
1045 .vm = {
1046 @@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
1047 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1048 .gart = {
1049 .tlb_flush = &si_pcie_gart_tlb_flush,
1050 + .get_page_entry = &rs600_gart_get_page_entry,
1051 .set_page = &rs600_gart_set_page,
1052 },
1053 .vm = {
1054 @@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
1055 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
1056 .gart = {
1057 .tlb_flush = &cik_pcie_gart_tlb_flush,
1058 + .get_page_entry = &rs600_gart_get_page_entry,
1059 .set_page = &rs600_gart_set_page,
1060 },
1061 .vm = {
1062 @@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
1063 .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
1064 .gart = {
1065 .tlb_flush = &cik_pcie_gart_tlb_flush,
1066 + .get_page_entry = &rs600_gart_get_page_entry,
1067 .set_page = &rs600_gart_set_page,
1068 },
1069 .vm = {
1070 diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
1071 index d8ace5b28a5b..0c1da2bf1fb4 100644
1072 --- a/drivers/gpu/drm/radeon/radeon_asic.h
1073 +++ b/drivers/gpu/drm/radeon/radeon_asic.h
1074 @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
1075 int r100_asic_reset(struct radeon_device *rdev);
1076 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
1077 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
1078 +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
1079 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
1080 - uint64_t addr, uint32_t flags);
1081 + uint64_t entry);
1082 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
1083 int r100_irq_set(struct radeon_device *rdev);
1084 int r100_irq_process(struct radeon_device *rdev);
1085 @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
1086 struct radeon_fence *fence);
1087 extern int r300_cs_parse(struct radeon_cs_parser *p);
1088 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
1089 +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
1090 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
1091 - uint64_t addr, uint32_t flags);
1092 + uint64_t entry);
1093 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
1094 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
1095 extern void r300_set_reg_safe(struct radeon_device *rdev);
1096 @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
1097 extern int rs400_suspend(struct radeon_device *rdev);
1098 extern int rs400_resume(struct radeon_device *rdev);
1099 void rs400_gart_tlb_flush(struct radeon_device *rdev);
1100 +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
1101 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
1102 - uint64_t addr, uint32_t flags);
1103 + uint64_t entry);
1104 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
1105 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1106 int rs400_gart_init(struct radeon_device *rdev);
1107 @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
1108 void rs600_irq_disable(struct radeon_device *rdev);
1109 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
1110 void rs600_gart_tlb_flush(struct radeon_device *rdev);
1111 +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
1112 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
1113 - uint64_t addr, uint32_t flags);
1114 + uint64_t entry);
1115 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
1116 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1117 void rs600_bandwidth_update(struct radeon_device *rdev);
1118 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1119 index 995a8b1770dd..bdf263a4a67c 100644
1120 --- a/drivers/gpu/drm/radeon/radeon_device.c
1121 +++ b/drivers/gpu/drm/radeon/radeon_device.c
1122 @@ -743,6 +743,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
1123 rdev->dummy_page.page = NULL;
1124 return -ENOMEM;
1125 }
1126 + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
1127 + RADEON_GART_PAGE_DUMMY);
1128 return 0;
1129 }
1130
1131 diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
1132 index 84146d5901aa..c7be612b60c9 100644
1133 --- a/drivers/gpu/drm/radeon/radeon_gart.c
1134 +++ b/drivers/gpu/drm/radeon/radeon_gart.c
1135 @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
1136 radeon_bo_unpin(rdev->gart.robj);
1137 radeon_bo_unreserve(rdev->gart.robj);
1138 rdev->gart.table_addr = gpu_addr;
1139 +
1140 + if (!r) {
1141 + int i;
1142 +
1143 + /* We might have dropped some GART table updates while it wasn't
1144 + * mapped, restore all entries
1145 + */
1146 + for (i = 0; i < rdev->gart.num_gpu_pages; i++)
1147 + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
1148 + mb();
1149 + radeon_gart_tlb_flush(rdev);
1150 + }
1151 +
1152 return r;
1153 }
1154
1155 @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
1156 unsigned t;
1157 unsigned p;
1158 int i, j;
1159 - u64 page_base;
1160
1161 if (!rdev->gart.ready) {
1162 WARN(1, "trying to unbind memory from uninitialized GART !\n");
1163 @@ -240,13 +252,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
1164 if (rdev->gart.pages[p]) {
1165 rdev->gart.pages[p] = NULL;
1166 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
1167 - page_base = rdev->gart.pages_addr[p];
1168 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
1169 + rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
1170 if (rdev->gart.ptr) {
1171 - radeon_gart_set_page(rdev, t, page_base,
1172 - RADEON_GART_PAGE_DUMMY);
1173 + radeon_gart_set_page(rdev, t,
1174 + rdev->dummy_page.entry);
1175 }
1176 - page_base += RADEON_GPU_PAGE_SIZE;
1177 }
1178 }
1179 }
1180 @@ -274,7 +285,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
1181 {
1182 unsigned t;
1183 unsigned p;
1184 - uint64_t page_base;
1185 + uint64_t page_base, page_entry;
1186 int i, j;
1187
1188 if (!rdev->gart.ready) {
1189 @@ -287,12 +298,14 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
1190 for (i = 0; i < pages; i++, p++) {
1191 rdev->gart.pages_addr[p] = dma_addr[i];
1192 rdev->gart.pages[p] = pagelist[i];
1193 - if (rdev->gart.ptr) {
1194 - page_base = rdev->gart.pages_addr[p];
1195 - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
1196 - radeon_gart_set_page(rdev, t, page_base, flags);
1197 - page_base += RADEON_GPU_PAGE_SIZE;
1198 + page_base = dma_addr[i];
1199 + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
1200 + page_entry = radeon_gart_get_page_entry(page_base, flags);
1201 + rdev->gart.pages_entry[t] = page_entry;
1202 + if (rdev->gart.ptr) {
1203 + radeon_gart_set_page(rdev, t, page_entry);
1204 }
1205 + page_base += RADEON_GPU_PAGE_SIZE;
1206 }
1207 }
1208 mb();
1209 @@ -340,10 +353,17 @@ int radeon_gart_init(struct radeon_device *rdev)
1210 radeon_gart_fini(rdev);
1211 return -ENOMEM;
1212 }
1213 + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
1214 + rdev->gart.num_gpu_pages);
1215 + if (rdev->gart.pages_entry == NULL) {
1216 + radeon_gart_fini(rdev);
1217 + return -ENOMEM;
1218 + }
1219 /* set GART entry to point to the dummy page by default */
1220 - for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
1221 + for (i = 0; i < rdev->gart.num_cpu_pages; i++)
1222 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
1223 - }
1224 + for (i = 0; i < rdev->gart.num_gpu_pages; i++)
1225 + rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
1226 return 0;
1227 }
1228
1229 @@ -356,15 +376,17 @@ int radeon_gart_init(struct radeon_device *rdev)
1230 */
1231 void radeon_gart_fini(struct radeon_device *rdev)
1232 {
1233 - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
1234 + if (rdev->gart.ready) {
1235 /* unbind pages */
1236 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
1237 }
1238 rdev->gart.ready = false;
1239 vfree(rdev->gart.pages);
1240 vfree(rdev->gart.pages_addr);
1241 + vfree(rdev->gart.pages_entry);
1242 rdev->gart.pages = NULL;
1243 rdev->gart.pages_addr = NULL;
1244 + rdev->gart.pages_entry = NULL;
1245
1246 radeon_dummy_page_fini(rdev);
1247 }
1248 diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
1249 index c5799f16aa4b..34e3235f41d2 100644
1250 --- a/drivers/gpu/drm/radeon/rs400.c
1251 +++ b/drivers/gpu/drm/radeon/rs400.c
1252 @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
1253 #define RS400_PTE_WRITEABLE (1 << 2)
1254 #define RS400_PTE_READABLE (1 << 3)
1255
1256 -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
1257 - uint64_t addr, uint32_t flags)
1258 +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
1259 {
1260 uint32_t entry;
1261 - u32 *gtt = rdev->gart.ptr;
1262
1263 entry = (lower_32_bits(addr) & PAGE_MASK) |
1264 ((upper_32_bits(addr) & 0xff) << 4);
1265 @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
1266 entry |= RS400_PTE_WRITEABLE;
1267 if (!(flags & RADEON_GART_PAGE_SNOOP))
1268 entry |= RS400_PTE_UNSNOOPED;
1269 - entry = cpu_to_le32(entry);
1270 - gtt[i] = entry;
1271 + return entry;
1272 +}
1273 +
1274 +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
1275 + uint64_t entry)
1276 +{
1277 + u32 *gtt = rdev->gart.ptr;
1278 + gtt[i] = cpu_to_le32(lower_32_bits(entry));
1279 }
1280
1281 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
1282 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1283 index 9acb1c3c005b..74bce91aecc1 100644
1284 --- a/drivers/gpu/drm/radeon/rs600.c
1285 +++ b/drivers/gpu/drm/radeon/rs600.c
1286 @@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
1287 radeon_gart_table_vram_free(rdev);
1288 }
1289
1290 -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
1291 - uint64_t addr, uint32_t flags)
1292 +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
1293 {
1294 - void __iomem *ptr = (void *)rdev->gart.ptr;
1295 -
1296 addr = addr & 0xFFFFFFFFFFFFF000ULL;
1297 addr |= R600_PTE_SYSTEM;
1298 if (flags & RADEON_GART_PAGE_VALID)
1299 @@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
1300 addr |= R600_PTE_WRITEABLE;
1301 if (flags & RADEON_GART_PAGE_SNOOP)
1302 addr |= R600_PTE_SNOOPED;
1303 - writeq(addr, ptr + (i * 8));
1304 + return addr;
1305 +}
1306 +
1307 +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
1308 + uint64_t entry)
1309 +{
1310 + void __iomem *ptr = (void *)rdev->gart.ptr;
1311 + writeq(entry, ptr + (i * 8));
1312 }
1313
1314 int rs600_irq_set(struct radeon_device *rdev)
1315 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1316 index daeca571b42f..810dac80179c 100644
1317 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1318 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1319 @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
1320 if (unlikely(ret != 0))
1321 --dev_priv->num_3d_resources;
1322 } else if (unhide_svga) {
1323 - mutex_lock(&dev_priv->hw_mutex);
1324 vmw_write(dev_priv, SVGA_REG_ENABLE,
1325 vmw_read(dev_priv, SVGA_REG_ENABLE) &
1326 ~SVGA_REG_ENABLE_HIDE);
1327 - mutex_unlock(&dev_priv->hw_mutex);
1328 }
1329
1330 mutex_unlock(&dev_priv->release_mutex);
1331 @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
1332 mutex_lock(&dev_priv->release_mutex);
1333 if (unlikely(--dev_priv->num_3d_resources == 0))
1334 vmw_release_device(dev_priv);
1335 - else if (hide_svga) {
1336 - mutex_lock(&dev_priv->hw_mutex);
1337 + else if (hide_svga)
1338 vmw_write(dev_priv, SVGA_REG_ENABLE,
1339 vmw_read(dev_priv, SVGA_REG_ENABLE) |
1340 SVGA_REG_ENABLE_HIDE);
1341 - mutex_unlock(&dev_priv->hw_mutex);
1342 - }
1343
1344 n3d = (int32_t) dev_priv->num_3d_resources;
1345 mutex_unlock(&dev_priv->release_mutex);
1346 @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1347 dev_priv->dev = dev;
1348 dev_priv->vmw_chipset = chipset;
1349 dev_priv->last_read_seqno = (uint32_t) -100;
1350 - mutex_init(&dev_priv->hw_mutex);
1351 mutex_init(&dev_priv->cmdbuf_mutex);
1352 mutex_init(&dev_priv->release_mutex);
1353 mutex_init(&dev_priv->binding_mutex);
1354 rwlock_init(&dev_priv->resource_lock);
1355 ttm_lock_init(&dev_priv->reservation_sem);
1356 + spin_lock_init(&dev_priv->hw_lock);
1357 + spin_lock_init(&dev_priv->waiter_lock);
1358 + spin_lock_init(&dev_priv->cap_lock);
1359
1360 for (i = vmw_res_context; i < vmw_res_max; ++i) {
1361 idr_init(&dev_priv->res_idr[i]);
1362 @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1363
1364 dev_priv->enable_fb = enable_fbdev;
1365
1366 - mutex_lock(&dev_priv->hw_mutex);
1367 -
1368 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1369 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
1370 if (svga_id != SVGA_ID_2) {
1371 ret = -ENOSYS;
1372 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
1373 - mutex_unlock(&dev_priv->hw_mutex);
1374 goto out_err0;
1375 }
1376
1377 @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1378 dev_priv->prim_bb_mem = dev_priv->vram_size;
1379
1380 ret = vmw_dma_masks(dev_priv);
1381 - if (unlikely(ret != 0)) {
1382 - mutex_unlock(&dev_priv->hw_mutex);
1383 + if (unlikely(ret != 0))
1384 goto out_err0;
1385 - }
1386
1387 /*
1388 * Limit back buffer size to VRAM size. Remove this once
1389 @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1390 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
1391 dev_priv->prim_bb_mem = dev_priv->vram_size;
1392
1393 - mutex_unlock(&dev_priv->hw_mutex);
1394 -
1395 vmw_print_capabilities(dev_priv->capabilities);
1396
1397 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
1398 @@ -1161,9 +1151,7 @@ static int vmw_master_set(struct drm_device *dev,
1399 if (unlikely(ret != 0))
1400 return ret;
1401 vmw_kms_save_vga(dev_priv);
1402 - mutex_lock(&dev_priv->hw_mutex);
1403 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1404 - mutex_unlock(&dev_priv->hw_mutex);
1405 }
1406
1407 if (active) {
1408 @@ -1197,9 +1185,7 @@ out_no_active_lock:
1409 if (!dev_priv->enable_fb) {
1410 vmw_kms_restore_vga(dev_priv);
1411 vmw_3d_resource_dec(dev_priv, true);
1412 - mutex_lock(&dev_priv->hw_mutex);
1413 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1414 - mutex_unlock(&dev_priv->hw_mutex);
1415 }
1416 return ret;
1417 }
1418 @@ -1234,9 +1220,7 @@ static void vmw_master_drop(struct drm_device *dev,
1419 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1420 vmw_kms_restore_vga(dev_priv);
1421 vmw_3d_resource_dec(dev_priv, true);
1422 - mutex_lock(&dev_priv->hw_mutex);
1423 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1424 - mutex_unlock(&dev_priv->hw_mutex);
1425 }
1426
1427 dev_priv->active_master = &dev_priv->fbdev_master;
1428 @@ -1368,10 +1352,8 @@ static void vmw_pm_complete(struct device *kdev)
1429 struct drm_device *dev = pci_get_drvdata(pdev);
1430 struct vmw_private *dev_priv = vmw_priv(dev);
1431
1432 - mutex_lock(&dev_priv->hw_mutex);
1433 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1434 (void) vmw_read(dev_priv, SVGA_REG_ID);
1435 - mutex_unlock(&dev_priv->hw_mutex);
1436
1437 /**
1438 * Reclaim 3d reference held by fbdev and potentially
1439 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1440 index 4ee799b43d5d..d26a6daa9719 100644
1441 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1442 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1443 @@ -399,7 +399,8 @@ struct vmw_private {
1444 uint32_t memory_size;
1445 bool has_gmr;
1446 bool has_mob;
1447 - struct mutex hw_mutex;
1448 + spinlock_t hw_lock;
1449 + spinlock_t cap_lock;
1450
1451 /*
1452 * VGA registers.
1453 @@ -449,8 +450,9 @@ struct vmw_private {
1454 atomic_t marker_seq;
1455 wait_queue_head_t fence_queue;
1456 wait_queue_head_t fifo_queue;
1457 - int fence_queue_waiters; /* Protected by hw_mutex */
1458 - int goal_queue_waiters; /* Protected by hw_mutex */
1459 + spinlock_t waiter_lock;
1460 + int fence_queue_waiters; /* Protected by waiter_lock */
1461 + int goal_queue_waiters; /* Protected by waiter_lock */
1462 atomic_t fifo_queue_waiters;
1463 uint32_t last_read_seqno;
1464 spinlock_t irq_lock;
1465 @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
1466 return (struct vmw_master *) master->driver_priv;
1467 }
1468
1469 +/*
1470 + * The locking here is fine-grained, so that it is performed once
1471 + * for every read- and write operation. This is of course costly, but we
1472 + * don't perform much register access in the timing critical paths anyway.
1473 + * Instead we have the extra benefit of being sure that we don't forget
1474 + * the hw lock around register accesses.
1475 + */
1476 static inline void vmw_write(struct vmw_private *dev_priv,
1477 unsigned int offset, uint32_t value)
1478 {
1479 + unsigned long irq_flags;
1480 +
1481 + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
1482 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
1483 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
1484 + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
1485 }
1486
1487 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
1488 unsigned int offset)
1489 {
1490 - uint32_t val;
1491 + unsigned long irq_flags;
1492 + u32 val;
1493
1494 + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
1495 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
1496 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
1497 + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
1498 +
1499 return val;
1500 }
1501
1502 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1503 index b7594cb758af..945f1e0dad92 100644
1504 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1505 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1506 @@ -35,7 +35,7 @@ struct vmw_fence_manager {
1507 struct vmw_private *dev_priv;
1508 spinlock_t lock;
1509 struct list_head fence_list;
1510 - struct work_struct work, ping_work;
1511 + struct work_struct work;
1512 u32 user_fence_size;
1513 u32 fence_size;
1514 u32 event_fence_action_size;
1515 @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
1516 return "svga";
1517 }
1518
1519 -static void vmw_fence_ping_func(struct work_struct *work)
1520 -{
1521 - struct vmw_fence_manager *fman =
1522 - container_of(work, struct vmw_fence_manager, ping_work);
1523 -
1524 - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
1525 -}
1526 -
1527 static bool vmw_fence_enable_signaling(struct fence *f)
1528 {
1529 struct vmw_fence_obj *fence =
1530 @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
1531 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
1532 return false;
1533
1534 - if (mutex_trylock(&dev_priv->hw_mutex)) {
1535 - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
1536 - mutex_unlock(&dev_priv->hw_mutex);
1537 - } else
1538 - schedule_work(&fman->ping_work);
1539 + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
1540
1541 return true;
1542 }
1543 @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
1544 INIT_LIST_HEAD(&fman->fence_list);
1545 INIT_LIST_HEAD(&fman->cleanup_list);
1546 INIT_WORK(&fman->work, &vmw_fence_work_func);
1547 - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
1548 fman->fifo_down = true;
1549 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
1550 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
1551 @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
1552 bool lists_empty;
1553
1554 (void) cancel_work_sync(&fman->work);
1555 - (void) cancel_work_sync(&fman->ping_work);
1556
1557 spin_lock_irqsave(&fman->lock, irq_flags);
1558 lists_empty = list_empty(&fman->fence_list) &&
1559 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1560 index 09e10aefcd8e..39f2b03888e7 100644
1561 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1562 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1563 @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
1564 if (!dev_priv->has_mob)
1565 return false;
1566
1567 - mutex_lock(&dev_priv->hw_mutex);
1568 + spin_lock(&dev_priv->cap_lock);
1569 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
1570 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
1571 - mutex_unlock(&dev_priv->hw_mutex);
1572 + spin_unlock(&dev_priv->cap_lock);
1573
1574 return (result != 0);
1575 }
1576 @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
1577 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
1578 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
1579
1580 - mutex_lock(&dev_priv->hw_mutex);
1581 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
1582 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
1583 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
1584 @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
1585 mb();
1586
1587 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
1588 - mutex_unlock(&dev_priv->hw_mutex);
1589
1590 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
1591 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
1592 @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
1593 return vmw_fifo_send_fence(dev_priv, &dummy);
1594 }
1595
1596 -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
1597 +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
1598 {
1599 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
1600 + static DEFINE_SPINLOCK(ping_lock);
1601 + unsigned long irq_flags;
1602
1603 + /*
1604 + * The ping_lock is needed because we don't have an atomic
1605 + * test-and-set of the SVGA_FIFO_BUSY register.
1606 + */
1607 + spin_lock_irqsave(&ping_lock, irq_flags);
1608 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
1609 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
1610 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
1611 }
1612 -}
1613 -
1614 -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
1615 -{
1616 - mutex_lock(&dev_priv->hw_mutex);
1617 -
1618 - vmw_fifo_ping_host_locked(dev_priv, reason);
1619 -
1620 - mutex_unlock(&dev_priv->hw_mutex);
1621 + spin_unlock_irqrestore(&ping_lock, irq_flags);
1622 }
1623
1624 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
1625 {
1626 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
1627
1628 - mutex_lock(&dev_priv->hw_mutex);
1629 -
1630 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
1631 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
1632 ;
1633 @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
1634 vmw_write(dev_priv, SVGA_REG_TRACES,
1635 dev_priv->traces_state);
1636
1637 - mutex_unlock(&dev_priv->hw_mutex);
1638 vmw_marker_queue_takedown(&fifo->marker_queue);
1639
1640 if (likely(fifo->static_buffer != NULL)) {
1641 @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
1642 return vmw_fifo_wait_noirq(dev_priv, bytes,
1643 interruptible, timeout);
1644
1645 - mutex_lock(&dev_priv->hw_mutex);
1646 + spin_lock(&dev_priv->waiter_lock);
1647 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
1648 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
1649 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
1650 @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
1651 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1652 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1653 }
1654 - mutex_unlock(&dev_priv->hw_mutex);
1655 + spin_unlock(&dev_priv->waiter_lock);
1656
1657 if (interruptible)
1658 ret = wait_event_interruptible_timeout
1659 @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
1660 else if (likely(ret > 0))
1661 ret = 0;
1662
1663 - mutex_lock(&dev_priv->hw_mutex);
1664 + spin_lock(&dev_priv->waiter_lock);
1665 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
1666 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
1667 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
1668 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1669 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1670 }
1671 - mutex_unlock(&dev_priv->hw_mutex);
1672 + spin_unlock(&dev_priv->waiter_lock);
1673
1674 return ret;
1675 }
1676 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1677 index 37881ecf5d7a..69c8ce23123c 100644
1678 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1679 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
1680 @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
1681 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
1682 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
1683
1684 - mutex_lock(&dev_priv->hw_mutex);
1685 + spin_lock(&dev_priv->cap_lock);
1686 for (i = 0; i < max_size; ++i) {
1687 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
1688 compat_cap->pairs[i][0] = i;
1689 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
1690 }
1691 - mutex_unlock(&dev_priv->hw_mutex);
1692 + spin_unlock(&dev_priv->cap_lock);
1693
1694 return 0;
1695 }
1696 @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
1697 if (num > SVGA3D_DEVCAP_MAX)
1698 num = SVGA3D_DEVCAP_MAX;
1699
1700 - mutex_lock(&dev_priv->hw_mutex);
1701 + spin_lock(&dev_priv->cap_lock);
1702 for (i = 0; i < num; ++i) {
1703 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
1704 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
1705 }
1706 - mutex_unlock(&dev_priv->hw_mutex);
1707 + spin_unlock(&dev_priv->cap_lock);
1708 } else if (gb_objects) {
1709 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
1710 if (unlikely(ret != 0))
1711 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
1712 index 0c423766c441..9fe9827ee499 100644
1713 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
1714 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
1715 @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
1716
1717 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
1718 {
1719 - uint32_t busy;
1720
1721 - mutex_lock(&dev_priv->hw_mutex);
1722 - busy = vmw_read(dev_priv, SVGA_REG_BUSY);
1723 - mutex_unlock(&dev_priv->hw_mutex);
1724 -
1725 - return (busy == 0);
1726 + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
1727 }
1728
1729 void vmw_update_seqno(struct vmw_private *dev_priv,
1730 @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
1731
1732 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
1733 {
1734 - mutex_lock(&dev_priv->hw_mutex);
1735 + spin_lock(&dev_priv->waiter_lock);
1736 if (dev_priv->fence_queue_waiters++ == 0) {
1737 unsigned long irq_flags;
1738
1739 @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
1740 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1741 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1742 }
1743 - mutex_unlock(&dev_priv->hw_mutex);
1744 + spin_unlock(&dev_priv->waiter_lock);
1745 }
1746
1747 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
1748 {
1749 - mutex_lock(&dev_priv->hw_mutex);
1750 + spin_lock(&dev_priv->waiter_lock);
1751 if (--dev_priv->fence_queue_waiters == 0) {
1752 unsigned long irq_flags;
1753
1754 @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
1755 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1756 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1757 }
1758 - mutex_unlock(&dev_priv->hw_mutex);
1759 + spin_unlock(&dev_priv->waiter_lock);
1760 }
1761
1762
1763 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
1764 {
1765 - mutex_lock(&dev_priv->hw_mutex);
1766 + spin_lock(&dev_priv->waiter_lock);
1767 if (dev_priv->goal_queue_waiters++ == 0) {
1768 unsigned long irq_flags;
1769
1770 @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
1771 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1772 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1773 }
1774 - mutex_unlock(&dev_priv->hw_mutex);
1775 + spin_unlock(&dev_priv->waiter_lock);
1776 }
1777
1778 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
1779 {
1780 - mutex_lock(&dev_priv->hw_mutex);
1781 + spin_lock(&dev_priv->waiter_lock);
1782 if (--dev_priv->goal_queue_waiters == 0) {
1783 unsigned long irq_flags;
1784
1785 @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
1786 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
1787 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
1788 }
1789 - mutex_unlock(&dev_priv->hw_mutex);
1790 + spin_unlock(&dev_priv->waiter_lock);
1791 }
1792
1793 int vmw_wait_seqno(struct vmw_private *dev_priv,
1794 @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
1795 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
1796 return;
1797
1798 - mutex_lock(&dev_priv->hw_mutex);
1799 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
1800 - mutex_unlock(&dev_priv->hw_mutex);
1801
1802 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
1803 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
1804 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1805 index 941a7bc0b791..fddd53335237 100644
1806 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1807 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1808 @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
1809 struct vmw_private *dev_priv = vmw_priv(dev);
1810 struct vmw_display_unit *du = vmw_connector_to_du(connector);
1811
1812 - mutex_lock(&dev_priv->hw_mutex);
1813 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1814 - mutex_unlock(&dev_priv->hw_mutex);
1815
1816 return ((vmw_connector_to_du(connector)->unit < num_displays &&
1817 du->pref_active) ?
1818 diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
1819 index 3cccff73b9b9..a994477bd25a 100644
1820 --- a/drivers/hid/hid-rmi.c
1821 +++ b/drivers/hid/hid-rmi.c
1822 @@ -584,11 +584,15 @@ static int rmi_populate_f11(struct hid_device *hdev)
1823 bool has_query10 = false;
1824 bool has_query11;
1825 bool has_query12;
1826 + bool has_query27;
1827 + bool has_query28;
1828 + bool has_query36 = false;
1829 bool has_physical_props;
1830 bool has_gestures;
1831 bool has_rel;
1832 + bool has_data40 = false;
1833 unsigned x_size, y_size;
1834 - u16 query12_offset;
1835 + u16 query_offset;
1836
1837 if (!data->f11.query_base_addr) {
1838 hid_err(hdev, "No 2D sensor found, giving up.\n");
1839 @@ -604,6 +608,8 @@ static int rmi_populate_f11(struct hid_device *hdev)
1840 has_query9 = !!(buf[0] & BIT(3));
1841 has_query11 = !!(buf[0] & BIT(4));
1842 has_query12 = !!(buf[0] & BIT(5));
1843 + has_query27 = !!(buf[0] & BIT(6));
1844 + has_query28 = !!(buf[0] & BIT(7));
1845
1846 /* query 1 to get the max number of fingers */
1847 ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
1848 @@ -642,27 +648,27 @@ static int rmi_populate_f11(struct hid_device *hdev)
1849 * +1 for query 5 which is present since absolute events are
1850 * reported and +1 for query 12.
1851 */
1852 - query12_offset = 6;
1853 + query_offset = 6;
1854
1855 if (has_rel)
1856 - ++query12_offset; /* query 6 is present */
1857 + ++query_offset; /* query 6 is present */
1858
1859 if (has_gestures)
1860 - query12_offset += 2; /* query 7 and 8 are present */
1861 + query_offset += 2; /* query 7 and 8 are present */
1862
1863 if (has_query9)
1864 - ++query12_offset;
1865 + ++query_offset;
1866
1867 if (has_query10)
1868 - ++query12_offset;
1869 + ++query_offset;
1870
1871 if (has_query11)
1872 - ++query12_offset;
1873 + ++query_offset;
1874
1875 /* query 12 to know if the physical properties are reported */
1876 if (has_query12) {
1877 ret = rmi_read(hdev, data->f11.query_base_addr
1878 - + query12_offset, buf);
1879 + + query_offset, buf);
1880 if (ret) {
1881 hid_err(hdev, "can not get query 12: %d.\n", ret);
1882 return ret;
1883 @@ -670,9 +676,10 @@ static int rmi_populate_f11(struct hid_device *hdev)
1884 has_physical_props = !!(buf[0] & BIT(5));
1885
1886 if (has_physical_props) {
1887 + query_offset += 1;
1888 ret = rmi_read_block(hdev,
1889 data->f11.query_base_addr
1890 - + query12_offset + 1, buf, 4);
1891 + + query_offset, buf, 4);
1892 if (ret) {
1893 hid_err(hdev, "can not read query 15-18: %d.\n",
1894 ret);
1895 @@ -687,9 +694,45 @@ static int rmi_populate_f11(struct hid_device *hdev)
1896
1897 hid_info(hdev, "%s: size in mm: %d x %d\n",
1898 __func__, data->x_size_mm, data->y_size_mm);
1899 +
1900 + /*
1901 + * query 15 - 18 contain the size of the sensor
1902 + * and query 19 - 26 contain bezel dimensions
1903 + */
1904 + query_offset += 12;
1905 + }
1906 + }
1907 +
1908 + if (has_query27)
1909 + ++query_offset;
1910 +
1911 + if (has_query28) {
1912 + ret = rmi_read(hdev, data->f11.query_base_addr
1913 + + query_offset, buf);
1914 + if (ret) {
1915 + hid_err(hdev, "can not get query 28: %d.\n", ret);
1916 + return ret;
1917 + }
1918 +
1919 + has_query36 = !!(buf[0] & BIT(6));
1920 + }
1921 +
1922 + if (has_query36) {
1923 + query_offset += 2;
1924 + ret = rmi_read(hdev, data->f11.query_base_addr
1925 + + query_offset, buf);
1926 + if (ret) {
1927 + hid_err(hdev, "can not get query 36: %d.\n", ret);
1928 + return ret;
1929 }
1930 +
1931 + has_data40 = !!(buf[0] & BIT(5));
1932 }
1933
1934 +
1935 + if (has_data40)
1936 + data->f11.report_size += data->max_fingers * 2;
1937 +
1938 /*
1939 * retrieve the ctrl registers
1940 * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
1941 diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1942 index 65244774bfa3..c127af99a0e0 100644
1943 --- a/drivers/i2c/busses/i2c-s3c2410.c
1944 +++ b/drivers/i2c/busses/i2c-s3c2410.c
1945 @@ -778,14 +778,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
1946 int ret;
1947
1948 pm_runtime_get_sync(&adap->dev);
1949 - clk_prepare_enable(i2c->clk);
1950 + ret = clk_enable(i2c->clk);
1951 + if (ret)
1952 + return ret;
1953
1954 for (retry = 0; retry < adap->retries; retry++) {
1955
1956 ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
1957
1958 if (ret != -EAGAIN) {
1959 - clk_disable_unprepare(i2c->clk);
1960 + clk_disable(i2c->clk);
1961 pm_runtime_put(&adap->dev);
1962 return ret;
1963 }
1964 @@ -795,7 +797,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
1965 udelay(100);
1966 }
1967
1968 - clk_disable_unprepare(i2c->clk);
1969 + clk_disable(i2c->clk);
1970 pm_runtime_put(&adap->dev);
1971 return -EREMOTEIO;
1972 }
1973 @@ -1174,7 +1176,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1974
1975 clk_prepare_enable(i2c->clk);
1976 ret = s3c24xx_i2c_init(i2c);
1977 - clk_disable_unprepare(i2c->clk);
1978 + clk_disable(i2c->clk);
1979 if (ret != 0) {
1980 dev_err(&pdev->dev, "I2C controller init failed\n");
1981 return ret;
1982 @@ -1187,6 +1189,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1983 i2c->irq = ret = platform_get_irq(pdev, 0);
1984 if (ret <= 0) {
1985 dev_err(&pdev->dev, "cannot find IRQ\n");
1986 + clk_unprepare(i2c->clk);
1987 return ret;
1988 }
1989
1990 @@ -1195,6 +1198,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1991
1992 if (ret != 0) {
1993 dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
1994 + clk_unprepare(i2c->clk);
1995 return ret;
1996 }
1997 }
1998 @@ -1202,6 +1206,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1999 ret = s3c24xx_i2c_register_cpufreq(i2c);
2000 if (ret < 0) {
2001 dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
2002 + clk_unprepare(i2c->clk);
2003 return ret;
2004 }
2005
2006 @@ -1218,6 +1223,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
2007 if (ret < 0) {
2008 dev_err(&pdev->dev, "failed to add bus to i2c core\n");
2009 s3c24xx_i2c_deregister_cpufreq(i2c);
2010 + clk_unprepare(i2c->clk);
2011 return ret;
2012 }
2013
2014 @@ -1239,6 +1245,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
2015 {
2016 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
2017
2018 + clk_unprepare(i2c->clk);
2019 +
2020 pm_runtime_disable(&i2c->adap.dev);
2021 pm_runtime_disable(&pdev->dev);
2022
2023 @@ -1267,10 +1275,13 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
2024 {
2025 struct platform_device *pdev = to_platform_device(dev);
2026 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
2027 + int ret;
2028
2029 - clk_prepare_enable(i2c->clk);
2030 + ret = clk_enable(i2c->clk);
2031 + if (ret)
2032 + return ret;
2033 s3c24xx_i2c_init(i2c);
2034 - clk_disable_unprepare(i2c->clk);
2035 + clk_disable(i2c->clk);
2036 i2c->suspended = 0;
2037
2038 return 0;
2039 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2040 index 77ecf6d32237..6e22682c8255 100644
2041 --- a/drivers/input/mouse/elantech.c
2042 +++ b/drivers/input/mouse/elantech.c
2043 @@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
2044 * Asus UX31 0x361f00 20, 15, 0e clickpad
2045 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
2046 * Avatar AVIU-145A2 0x361f00 ? clickpad
2047 + * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
2048 + * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
2049 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
2050 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
2051 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
2052 @@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
2053 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
2054 },
2055 },
2056 + {
2057 + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
2058 + .matches = {
2059 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2060 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
2061 + },
2062 + },
2063 + {
2064 + /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
2065 + .matches = {
2066 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2067 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
2068 + },
2069 + },
2070 #endif
2071 { }
2072 };
2073 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
2074 index f9472920d986..23e26e0768b5 100644
2075 --- a/drivers/input/mouse/synaptics.c
2076 +++ b/drivers/input/mouse/synaptics.c
2077 @@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
2078 1232, 5710, 1156, 4696
2079 },
2080 {
2081 - (const char * const []){"LEN0034", "LEN0036", "LEN0039",
2082 - "LEN2002", "LEN2004", NULL},
2083 + (const char * const []){"LEN0034", "LEN0036", "LEN0037",
2084 + "LEN0039", "LEN2002", "LEN2004",
2085 + NULL},
2086 1024, 5112, 2024, 4832
2087 },
2088 {
2089 @@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
2090 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
2091 "LEN0035", /* X240 */
2092 "LEN0036", /* T440 */
2093 - "LEN0037",
2094 + "LEN0037", /* X1 Carbon 2nd */
2095 "LEN0038",
2096 "LEN0039", /* T440s */
2097 "LEN0041",
2098 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2099 index 1a858c86a72b..39bec4715f2c 100644
2100 --- a/drivers/input/serio/i8042-x86ia64io.h
2101 +++ b/drivers/input/serio/i8042-x86ia64io.h
2102 @@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
2103 },
2104 },
2105 {
2106 + /* Medion Akoya E7225 */
2107 + .matches = {
2108 + DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
2109 + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
2110 + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
2111 + },
2112 + },
2113 + {
2114 /* Blue FB5601 */
2115 .matches = {
2116 DMI_MATCH(DMI_SYS_VENDOR, "blue"),
2117 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2118 index 97e3a6c07e31..1e64e9c50d85 100644
2119 --- a/drivers/md/dm-cache-metadata.c
2120 +++ b/drivers/md/dm-cache-metadata.c
2121 @@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
2122 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2123 if (!cmd) {
2124 DMERR("could not allocate metadata struct");
2125 - return NULL;
2126 + return ERR_PTR(-ENOMEM);
2127 }
2128
2129 atomic_set(&cmd->ref_count, 1);
2130 @@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
2131 return cmd;
2132
2133 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
2134 - if (cmd) {
2135 + if (!IS_ERR(cmd)) {
2136 mutex_lock(&table_lock);
2137 cmd2 = lookup(bdev);
2138 if (cmd2) {
2139 @@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2140 {
2141 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
2142 may_format_device, policy_hint_size);
2143 - if (cmd && !same_params(cmd, data_block_size)) {
2144 +
2145 + if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
2146 dm_cache_metadata_close(cmd);
2147 - return NULL;
2148 + return ERR_PTR(-EINVAL);
2149 }
2150
2151 return cmd;
2152 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
2153 index aae19133cfac..ac6b0ff161ea 100644
2154 --- a/drivers/md/dm-thin.c
2155 +++ b/drivers/md/dm-thin.c
2156 @@ -2978,6 +2978,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2157 struct pool_c *pt = ti->private;
2158 struct pool *pool = pt->pool;
2159
2160 + if (get_pool_mode(pool) >= PM_READ_ONLY) {
2161 + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
2162 + dm_device_name(pool->pool_md));
2163 + return -EINVAL;
2164 + }
2165 +
2166 if (!strcasecmp(argv[0], "create_thin"))
2167 r = process_create_thin_mesg(argc, argv, pool);
2168
2169 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
2170 index 8e78bb48f5a4..60285820f7b4 100644
2171 --- a/drivers/net/can/c_can/c_can.c
2172 +++ b/drivers/net/can/c_can/c_can.c
2173 @@ -611,6 +611,10 @@ static void c_can_stop(struct net_device *dev)
2174 struct c_can_priv *priv = netdev_priv(dev);
2175
2176 c_can_irq_control(priv, false);
2177 +
2178 + /* put ctrl to init on stop to end ongoing transmission */
2179 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
2180 +
2181 priv->can.state = CAN_STATE_STOPPED;
2182 }
2183
2184 diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
2185 index cc7bfc0c0a71..8b255e777cc7 100644
2186 --- a/drivers/net/can/usb/kvaser_usb.c
2187 +++ b/drivers/net/can/usb/kvaser_usb.c
2188 @@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
2189 usb_sndbulkpipe(dev->udev,
2190 dev->bulk_out->bEndpointAddress),
2191 buf, msg->len,
2192 - kvaser_usb_simple_msg_callback, priv);
2193 + kvaser_usb_simple_msg_callback, netdev);
2194 usb_anchor_urb(urb, &priv->tx_submitted);
2195
2196 err = usb_submit_urb(urb, GFP_ATOMIC);
2197 @@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
2198 priv = dev->nets[channel];
2199 stats = &priv->netdev->stats;
2200
2201 - if (status & M16C_STATE_BUS_RESET) {
2202 - kvaser_usb_unlink_tx_urbs(priv);
2203 - return;
2204 - }
2205 -
2206 skb = alloc_can_err_skb(priv->netdev, &cf);
2207 if (!skb) {
2208 stats->rx_dropped++;
2209 @@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
2210
2211 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
2212
2213 - if (status & M16C_STATE_BUS_OFF) {
2214 + if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
2215 cf->can_id |= CAN_ERR_BUSOFF;
2216
2217 priv->can.can_stats.bus_off++;
2218 @@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
2219 }
2220
2221 new_state = CAN_STATE_ERROR_PASSIVE;
2222 - }
2223 -
2224 - if (status == M16C_STATE_BUS_ERROR) {
2225 + } else if (status & M16C_STATE_BUS_ERROR) {
2226 if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
2227 ((txerr >= 96) || (rxerr >= 96))) {
2228 cf->can_id |= CAN_ERR_CRTL;
2229 @@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
2230
2231 priv->can.can_stats.error_warning++;
2232 new_state = CAN_STATE_ERROR_WARNING;
2233 - } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2234 + } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
2235 + ((txerr < 96) && (rxerr < 96))) {
2236 cf->can_id |= CAN_ERR_PROT;
2237 cf->data[2] = CAN_ERR_PROT_ACTIVE;
2238
2239 @@ -1593,7 +1587,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
2240 {
2241 struct kvaser_usb *dev;
2242 int err = -ENOMEM;
2243 - int i;
2244 + int i, retry = 3;
2245
2246 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
2247 if (!dev)
2248 @@ -1611,7 +1605,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
2249
2250 usb_set_intfdata(intf, dev);
2251
2252 - err = kvaser_usb_get_software_info(dev);
2253 + /* On some x86 laptops, plugging a Kvaser device again after
2254 + * an unplug makes the firmware always ignore the very first
2255 + * command. For such a case, provide some room for retries
2256 + * instead of completely exiting the driver.
2257 + */
2258 + do {
2259 + err = kvaser_usb_get_software_info(dev);
2260 + } while (--retry && err == -ETIMEDOUT);
2261 +
2262 if (err) {
2263 dev_err(&intf->dev,
2264 "Cannot get software infos, error %d\n", err);
2265 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2266 index 64d1cef4cda1..48645504106e 100644
2267 --- a/drivers/net/ethernet/ti/cpsw.c
2268 +++ b/drivers/net/ethernet/ti/cpsw.c
2269 @@ -1676,6 +1676,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2270 if (vid == priv->data.default_vlan)
2271 return 0;
2272
2273 + if (priv->data.dual_emac) {
2274 + /* In dual EMAC, reserved VLAN id should not be used for
2275 + * creating VLAN interfaces as this can break the dual
2276 + * EMAC port separation
2277 + */
2278 + int i;
2279 +
2280 + for (i = 0; i < priv->data.slaves; i++) {
2281 + if (vid == priv->slaves[i].port_vlan)
2282 + return -EINVAL;
2283 + }
2284 + }
2285 +
2286 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2287 return cpsw_add_vlan_ale_entry(priv, vid);
2288 }
2289 @@ -1689,6 +1702,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2290 if (vid == priv->data.default_vlan)
2291 return 0;
2292
2293 + if (priv->data.dual_emac) {
2294 + int i;
2295 +
2296 + for (i = 0; i < priv->data.slaves; i++) {
2297 + if (vid == priv->slaves[i].port_vlan)
2298 + return -EINVAL;
2299 + }
2300 + }
2301 +
2302 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2303 ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
2304 if (ret != 0)
2305 diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2306 index 354a81d40925..d6380c187db6 100644
2307 --- a/drivers/pinctrl/pinctrl-at91.c
2308 +++ b/drivers/pinctrl/pinctrl-at91.c
2309 @@ -179,7 +179,7 @@ struct at91_pinctrl {
2310 struct device *dev;
2311 struct pinctrl_dev *pctl;
2312
2313 - int nbanks;
2314 + int nactive_banks;
2315
2316 uint32_t *mux_mask;
2317 int nmux;
2318 @@ -655,12 +655,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
2319 int mux;
2320
2321 /* check if it's a valid config */
2322 - if (pin->bank >= info->nbanks) {
2323 + if (pin->bank >= gpio_banks) {
2324 dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
2325 - name, index, pin->bank, info->nbanks);
2326 + name, index, pin->bank, gpio_banks);
2327 return -EINVAL;
2328 }
2329
2330 + if (!gpio_chips[pin->bank]) {
2331 + dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
2332 + name, index, pin->bank);
2333 + return -ENXIO;
2334 + }
2335 +
2336 if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
2337 dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
2338 name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
2339 @@ -983,7 +989,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
2340
2341 for_each_child_of_node(np, child) {
2342 if (of_device_is_compatible(child, gpio_compat)) {
2343 - info->nbanks++;
2344 + if (of_device_is_available(child))
2345 + info->nactive_banks++;
2346 } else {
2347 info->nfunctions++;
2348 info->ngroups += of_get_child_count(child);
2349 @@ -1005,11 +1012,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
2350 }
2351
2352 size /= sizeof(*list);
2353 - if (!size || size % info->nbanks) {
2354 - dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
2355 + if (!size || size % gpio_banks) {
2356 + dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
2357 return -EINVAL;
2358 }
2359 - info->nmux = size / info->nbanks;
2360 + info->nmux = size / gpio_banks;
2361
2362 info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
2363 if (!info->mux_mask) {
2364 @@ -1133,7 +1140,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
2365 of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
2366 at91_pinctrl_child_count(info, np);
2367
2368 - if (info->nbanks < 1) {
2369 + if (gpio_banks < 1) {
2370 dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
2371 return -EINVAL;
2372 }
2373 @@ -1146,7 +1153,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
2374
2375 dev_dbg(&pdev->dev, "mux-mask\n");
2376 tmp = info->mux_mask;
2377 - for (i = 0; i < info->nbanks; i++) {
2378 + for (i = 0; i < gpio_banks; i++) {
2379 for (j = 0; j < info->nmux; j++, tmp++) {
2380 dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
2381 }
2382 @@ -1164,7 +1171,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
2383 if (!info->groups)
2384 return -ENOMEM;
2385
2386 - dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
2387 + dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
2388 dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
2389 dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
2390
2391 @@ -1187,7 +1194,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
2392 {
2393 struct at91_pinctrl *info;
2394 struct pinctrl_pin_desc *pdesc;
2395 - int ret, i, j, k;
2396 + int ret, i, j, k, ngpio_chips_enabled = 0;
2397
2398 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
2399 if (!info)
2400 @@ -1202,23 +1209,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
2401 * to obtain references to the struct gpio_chip * for them, and we
2402 * need this to proceed.
2403 */
2404 - for (i = 0; i < info->nbanks; i++) {
2405 - if (!gpio_chips[i]) {
2406 - dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
2407 - devm_kfree(&pdev->dev, info);
2408 - return -EPROBE_DEFER;
2409 - }
2410 + for (i = 0; i < gpio_banks; i++)
2411 + if (gpio_chips[i])
2412 + ngpio_chips_enabled++;
2413 +
2414 + if (ngpio_chips_enabled < info->nactive_banks) {
2415 + dev_warn(&pdev->dev,
2416 + "All GPIO chips are not registered yet (%d/%d)\n",
2417 + ngpio_chips_enabled, info->nactive_banks);
2418 + devm_kfree(&pdev->dev, info);
2419 + return -EPROBE_DEFER;
2420 }
2421
2422 at91_pinctrl_desc.name = dev_name(&pdev->dev);
2423 - at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
2424 + at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
2425 at91_pinctrl_desc.pins = pdesc =
2426 devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
2427
2428 if (!at91_pinctrl_desc.pins)
2429 return -ENOMEM;
2430
2431 - for (i = 0 , k = 0; i < info->nbanks; i++) {
2432 + for (i = 0, k = 0; i < gpio_banks; i++) {
2433 for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
2434 pdesc->number = k;
2435 pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
2436 @@ -1236,8 +1247,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
2437 }
2438
2439 /* We will handle a range of GPIO pins */
2440 - for (i = 0; i < info->nbanks; i++)
2441 - pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
2442 + for (i = 0; i < gpio_banks; i++)
2443 + if (gpio_chips[i])
2444 + pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
2445
2446 dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
2447
2448 @@ -1614,9 +1626,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
2449 static int at91_gpio_of_irq_setup(struct platform_device *pdev,
2450 struct at91_gpio_chip *at91_gpio)
2451 {
2452 + struct gpio_chip *gpiochip_prev = NULL;
2453 struct at91_gpio_chip *prev = NULL;
2454 struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
2455 - int ret;
2456 + int ret, i;
2457
2458 at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
2459
2460 @@ -1642,24 +1655,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
2461 return ret;
2462 }
2463
2464 - /* Setup chained handler */
2465 - if (at91_gpio->pioc_idx)
2466 - prev = gpio_chips[at91_gpio->pioc_idx - 1];
2467 -
2468 /* The top level handler handles one bank of GPIOs, except
2469 * on some SoC it can handle up to three...
2470 * We only set up the handler for the first of the list.
2471 */
2472 - if (prev && prev->next == at91_gpio)
2473 + gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
2474 + if (!gpiochip_prev) {
2475 + /* Then register the chain on the parent IRQ */
2476 + gpiochip_set_chained_irqchip(&at91_gpio->chip,
2477 + &gpio_irqchip,
2478 + at91_gpio->pioc_virq,
2479 + gpio_irq_handler);
2480 return 0;
2481 + }
2482
2483 - /* Then register the chain on the parent IRQ */
2484 - gpiochip_set_chained_irqchip(&at91_gpio->chip,
2485 - &gpio_irqchip,
2486 - at91_gpio->pioc_virq,
2487 - gpio_irq_handler);
2488 + prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
2489
2490 - return 0;
2491 + /* we can only have 2 banks before */
2492 + for (i = 0; i < 2; i++) {
2493 + if (prev->next) {
2494 + prev = prev->next;
2495 + } else {
2496 + prev->next = at91_gpio;
2497 + return 0;
2498 + }
2499 + }
2500 +
2501 + return -EINVAL;
2502 }
2503
2504 /* This structure is replicated for each GPIO block allocated at probe time */
2505 @@ -1676,24 +1698,6 @@ static struct gpio_chip at91_gpio_template = {
2506 .ngpio = MAX_NB_GPIO_PER_BANK,
2507 };
2508
2509 -static void at91_gpio_probe_fixup(void)
2510 -{
2511 - unsigned i;
2512 - struct at91_gpio_chip *at91_gpio, *last = NULL;
2513 -
2514 - for (i = 0; i < gpio_banks; i++) {
2515 - at91_gpio = gpio_chips[i];
2516 -
2517 - /*
2518 - * GPIO controller are grouped on some SoC:
2519 - * PIOC, PIOD and PIOE can share the same IRQ line
2520 - */
2521 - if (last && last->pioc_virq == at91_gpio->pioc_virq)
2522 - last->next = at91_gpio;
2523 - last = at91_gpio;
2524 - }
2525 -}
2526 -
2527 static struct of_device_id at91_gpio_of_match[] = {
2528 { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
2529 { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
2530 @@ -1806,8 +1810,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
2531 gpio_chips[alias_idx] = at91_chip;
2532 gpio_banks = max(gpio_banks, alias_idx + 1);
2533
2534 - at91_gpio_probe_fixup();
2535 -
2536 ret = at91_gpio_of_irq_setup(pdev, at91_chip);
2537 if (ret)
2538 goto irq_setup_err;
2539 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
2540 index cd87c0c37034..fc6fb5422b6f 100644
2541 --- a/drivers/regulator/core.c
2542 +++ b/drivers/regulator/core.c
2543 @@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
2544 }
2545 EXPORT_SYMBOL_GPL(regulator_get_optional);
2546
2547 -/* Locks held by regulator_put() */
2548 +/* regulator_list_mutex lock held by regulator_put() */
2549 static void _regulator_put(struct regulator *regulator)
2550 {
2551 struct regulator_dev *rdev;
2552 @@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
2553 /* remove any sysfs entries */
2554 if (regulator->dev)
2555 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
2556 + mutex_lock(&rdev->mutex);
2557 kfree(regulator->supply_name);
2558 list_del(&regulator->list);
2559 kfree(regulator);
2560
2561 rdev->open_count--;
2562 rdev->exclusive = 0;
2563 + mutex_unlock(&rdev->mutex);
2564
2565 module_put(rdev->owner);
2566 }
2567 diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
2568 index 8754c33361e8..28799d39db8e 100644
2569 --- a/drivers/rtc/rtc-s5m.c
2570 +++ b/drivers/rtc/rtc-s5m.c
2571 @@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
2572 static const struct platform_device_id s5m_rtc_id[] = {
2573 { "s5m-rtc", S5M8767X },
2574 { "s2mps14-rtc", S2MPS14X },
2575 + { },
2576 };
2577
2578 static struct platform_driver s5m_rtc_driver = {
2579 diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
2580 index 46c6d58e1fda..efff55537d8a 100644
2581 --- a/drivers/spi/spi-dw-mid.c
2582 +++ b/drivers/spi/spi-dw-mid.c
2583 @@ -219,7 +219,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
2584 iounmap(clk_reg);
2585
2586 dws->num_cs = 16;
2587 - dws->fifo_len = 40; /* FIFO has 40 words buffer */
2588
2589 #ifdef CONFIG_SPI_DW_MID_DMA
2590 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
2591 diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
2592 index d0d5542efc06..1a0f266c4268 100644
2593 --- a/drivers/spi/spi-dw.c
2594 +++ b/drivers/spi/spi-dw.c
2595 @@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
2596 if (!dws->fifo_len) {
2597 u32 fifo;
2598
2599 - for (fifo = 2; fifo <= 257; fifo++) {
2600 + for (fifo = 2; fifo <= 256; fifo++) {
2601 dw_writew(dws, DW_SPI_TXFLTR, fifo);
2602 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
2603 break;
2604 }
2605
2606 - dws->fifo_len = (fifo == 257) ? 0 : fifo;
2607 + dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
2608 dw_writew(dws, DW_SPI_TXFLTR, 0);
2609 }
2610 }
2611 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2612 index 9e9e0f971e6c..d95656d05eb6 100644
2613 --- a/drivers/spi/spi-pxa2xx.c
2614 +++ b/drivers/spi/spi-pxa2xx.c
2615 @@ -402,8 +402,8 @@ static void giveback(struct driver_data *drv_data)
2616 cs_deassert(drv_data);
2617 }
2618
2619 - spi_finalize_current_message(drv_data->master);
2620 drv_data->cur_chip = NULL;
2621 + spi_finalize_current_message(drv_data->master);
2622 }
2623
2624 static void reset_sccr1(struct driver_data *drv_data)
2625 diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
2626 index d3f967a78138..1f453b275dbc 100644
2627 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c
2628 +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
2629 @@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
2630 return 0;
2631 }
2632
2633 - if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
2634 + if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
2635 CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
2636 return -EFAULT;
2637 }
2638 diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
2639 index de0c9c9d7091..a6315abe7b7c 100644
2640 --- a/drivers/usb/core/otg_whitelist.h
2641 +++ b/drivers/usb/core/otg_whitelist.h
2642 @@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
2643 le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
2644 return 0;
2645
2646 + /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
2647 + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
2648 + le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
2649 + return 1;
2650 +
2651 /* NOTE: can't use usb_match_id() since interface caches
2652 * aren't set up yet. this is cut/paste from that code.
2653 */
2654 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2655 index 0ffb4ed0a945..41e510ae8c83 100644
2656 --- a/drivers/usb/core/quirks.c
2657 +++ b/drivers/usb/core/quirks.c
2658 @@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2659 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
2660 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
2661
2662 + /* Protocol and OTG Electrical Test Device */
2663 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
2664 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
2665 +
2666 { } /* terminating entry must be last */
2667 };
2668
2669 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2670 index 11c7a9676441..8adb53044079 100644
2671 --- a/drivers/usb/storage/unusual_devs.h
2672 +++ b/drivers/usb/storage/unusual_devs.h
2673 @@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
2674 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2675 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
2676
2677 +/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
2678 +UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2679 + "JMicron",
2680 + "USB to ATA/ATAPI Bridge",
2681 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2682 + US_FL_BROKEN_FUA ),
2683 +
2684 /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
2685 * and Mac USB Dock USB-SCSI */
2686 UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
2687 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
2688 index 1f430bb02ca1..2706a434fdbb 100644
2689 --- a/drivers/usb/storage/unusual_uas.h
2690 +++ b/drivers/usb/storage/unusual_uas.h
2691 @@ -138,3 +138,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
2692 "External HDD",
2693 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2694 US_FL_IGNORE_UAS),
2695 +
2696 +/* Reported-by: Richard Henderson <rth@redhat.com> */
2697 +UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
2698 + "SimpleTech",
2699 + "External HDD",
2700 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2701 + US_FL_NO_REPORT_OPCODES),
2702 diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2703 index f3a9d831d0f9..c9d0d5a0e662 100644
2704 --- a/drivers/xen/swiotlb-xen.c
2705 +++ b/drivers/xen/swiotlb-xen.c
2706 @@ -397,7 +397,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
2707 * buffering it.
2708 */
2709 if (dma_capable(dev, dev_addr, size) &&
2710 - !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
2711 + !range_straddles_page_boundary(phys, size) &&
2712 + !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
2713 + !swiotlb_force) {
2714 /* we are not interested in the dma_addr returned by
2715 * xen_dma_map_page, only in the potential cache flushes executed
2716 * by the function. */
2717 @@ -555,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
2718 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
2719
2720 if (swiotlb_force ||
2721 + xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
2722 !dma_capable(hwdev, dev_addr, sg->length) ||
2723 range_straddles_page_boundary(paddr, sg->length)) {
2724 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
2725 diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
2726 index 64b29f7f6b4c..dc482ffff659 100644
2727 --- a/fs/gfs2/quota.c
2728 +++ b/fs/gfs2/quota.c
2729 @@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
2730
2731 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
2732 s64 change, struct gfs2_quota_data *qd,
2733 - struct fs_disk_quota *fdq)
2734 + struct qc_dqblk *fdq)
2735 {
2736 struct inode *inode = &ip->i_inode;
2737 struct gfs2_sbd *sdp = GFS2_SB(inode);
2738 @@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
2739 be64_add_cpu(&q.qu_value, change);
2740 qd->qd_qb.qb_value = q.qu_value;
2741 if (fdq) {
2742 - if (fdq->d_fieldmask & FS_DQ_BSOFT) {
2743 - q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
2744 + if (fdq->d_fieldmask & QC_SPC_SOFT) {
2745 + q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
2746 qd->qd_qb.qb_warn = q.qu_warn;
2747 }
2748 - if (fdq->d_fieldmask & FS_DQ_BHARD) {
2749 - q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
2750 + if (fdq->d_fieldmask & QC_SPC_HARD) {
2751 + q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
2752 qd->qd_qb.qb_limit = q.qu_limit;
2753 }
2754 - if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
2755 - q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
2756 + if (fdq->d_fieldmask & QC_SPACE) {
2757 + q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
2758 qd->qd_qb.qb_value = q.qu_value;
2759 }
2760 }
2761 @@ -1502,7 +1502,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
2762 }
2763
2764 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
2765 - struct fs_disk_quota *fdq)
2766 + struct qc_dqblk *fdq)
2767 {
2768 struct gfs2_sbd *sdp = sb->s_fs_info;
2769 struct gfs2_quota_lvb *qlvb;
2770 @@ -1510,7 +1510,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
2771 struct gfs2_holder q_gh;
2772 int error;
2773
2774 - memset(fdq, 0, sizeof(struct fs_disk_quota));
2775 + memset(fdq, 0, sizeof(*fdq));
2776
2777 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
2778 return -ESRCH; /* Crazy XFS error code */
2779 @@ -1527,12 +1527,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
2780 goto out;
2781
2782 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
2783 - fdq->d_version = FS_DQUOT_VERSION;
2784 - fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
2785 - fdq->d_id = from_kqid_munged(current_user_ns(), qid);
2786 - fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
2787 - fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
2788 - fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
2789 + fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
2790 + fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
2791 + fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
2792
2793 gfs2_glock_dq_uninit(&q_gh);
2794 out:
2795 @@ -1541,10 +1538,10 @@ out:
2796 }
2797
2798 /* GFS2 only supports a subset of the XFS fields */
2799 -#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
2800 +#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
2801
2802 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
2803 - struct fs_disk_quota *fdq)
2804 + struct qc_dqblk *fdq)
2805 {
2806 struct gfs2_sbd *sdp = sb->s_fs_info;
2807 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
2808 @@ -1588,17 +1585,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
2809 goto out_i;
2810
2811 /* If nothing has changed, this is a no-op */
2812 - if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
2813 - ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
2814 - fdq->d_fieldmask ^= FS_DQ_BSOFT;
2815 + if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
2816 + ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
2817 + fdq->d_fieldmask ^= QC_SPC_SOFT;
2818
2819 - if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
2820 - ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
2821 - fdq->d_fieldmask ^= FS_DQ_BHARD;
2822 + if ((fdq->d_fieldmask & QC_SPC_HARD) &&
2823 + ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
2824 + fdq->d_fieldmask ^= QC_SPC_HARD;
2825
2826 - if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
2827 - ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
2828 - fdq->d_fieldmask ^= FS_DQ_BCOUNT;
2829 + if ((fdq->d_fieldmask & QC_SPACE) &&
2830 + ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
2831 + fdq->d_fieldmask ^= QC_SPACE;
2832
2833 if (fdq->d_fieldmask == 0)
2834 goto out_i;
2835 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
2836 index 10bf07280f4a..294692ff83b1 100644
2837 --- a/fs/nfs/direct.c
2838 +++ b/fs/nfs/direct.c
2839 @@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
2840 */
2841 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
2842 {
2843 + struct inode *inode = iocb->ki_filp->f_mapping->host;
2844 +
2845 + /* we only support swap file calling nfs_direct_IO */
2846 + if (!IS_SWAPFILE(inode))
2847 + return 0;
2848 +
2849 #ifndef CONFIG_NFS_SWAP
2850 dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
2851 iocb->ki_filp, (long long) pos, iter->nr_segs);
2852 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2853 index 9588873d4c46..368a6b72290c 100644
2854 --- a/fs/nfs/nfs4client.c
2855 +++ b/fs/nfs/nfs4client.c
2856 @@ -640,7 +640,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
2857 prev = pos;
2858
2859 status = nfs_wait_client_init_complete(pos);
2860 - if (status == 0) {
2861 + if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
2862 nfs4_schedule_lease_recovery(pos);
2863 status = nfs4_wait_clnt_recover(pos);
2864 }
2865 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
2866 index 6b4527216a7f..9340228aff6e 100644
2867 --- a/fs/quota/dquot.c
2868 +++ b/fs/quota/dquot.c
2869 @@ -2391,30 +2391,25 @@ static inline qsize_t stoqb(qsize_t space)
2870 }
2871
2872 /* Generic routine for getting common part of quota structure */
2873 -static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2874 +static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2875 {
2876 struct mem_dqblk *dm = &dquot->dq_dqb;
2877
2878 memset(di, 0, sizeof(*di));
2879 - di->d_version = FS_DQUOT_VERSION;
2880 - di->d_flags = dquot->dq_id.type == USRQUOTA ?
2881 - FS_USER_QUOTA : FS_GROUP_QUOTA;
2882 - di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
2883 -
2884 spin_lock(&dq_data_lock);
2885 - di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
2886 - di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
2887 + di->d_spc_hardlimit = dm->dqb_bhardlimit;
2888 + di->d_spc_softlimit = dm->dqb_bsoftlimit;
2889 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2890 di->d_ino_softlimit = dm->dqb_isoftlimit;
2891 - di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
2892 - di->d_icount = dm->dqb_curinodes;
2893 - di->d_btimer = dm->dqb_btime;
2894 - di->d_itimer = dm->dqb_itime;
2895 + di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2896 + di->d_ino_count = dm->dqb_curinodes;
2897 + di->d_spc_timer = dm->dqb_btime;
2898 + di->d_ino_timer = dm->dqb_itime;
2899 spin_unlock(&dq_data_lock);
2900 }
2901
2902 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2903 - struct fs_disk_quota *di)
2904 + struct qc_dqblk *di)
2905 {
2906 struct dquot *dquot;
2907
2908 @@ -2428,70 +2423,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2909 }
2910 EXPORT_SYMBOL(dquot_get_dqblk);
2911
2912 -#define VFS_FS_DQ_MASK \
2913 - (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
2914 - FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
2915 - FS_DQ_BTIMER | FS_DQ_ITIMER)
2916 +#define VFS_QC_MASK \
2917 + (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2918 + QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2919 + QC_SPC_TIMER | QC_INO_TIMER)
2920
2921 /* Generic routine for setting common part of quota structure */
2922 -static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2923 +static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2924 {
2925 struct mem_dqblk *dm = &dquot->dq_dqb;
2926 int check_blim = 0, check_ilim = 0;
2927 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2928
2929 - if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
2930 + if (di->d_fieldmask & ~VFS_QC_MASK)
2931 return -EINVAL;
2932
2933 - if (((di->d_fieldmask & FS_DQ_BSOFT) &&
2934 - (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
2935 - ((di->d_fieldmask & FS_DQ_BHARD) &&
2936 - (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
2937 - ((di->d_fieldmask & FS_DQ_ISOFT) &&
2938 + if (((di->d_fieldmask & QC_SPC_SOFT) &&
2939 + stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
2940 + ((di->d_fieldmask & QC_SPC_HARD) &&
2941 + stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
2942 + ((di->d_fieldmask & QC_INO_SOFT) &&
2943 (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
2944 - ((di->d_fieldmask & FS_DQ_IHARD) &&
2945 + ((di->d_fieldmask & QC_INO_HARD) &&
2946 (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
2947 return -ERANGE;
2948
2949 spin_lock(&dq_data_lock);
2950 - if (di->d_fieldmask & FS_DQ_BCOUNT) {
2951 - dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
2952 + if (di->d_fieldmask & QC_SPACE) {
2953 + dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2954 check_blim = 1;
2955 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2956 }
2957
2958 - if (di->d_fieldmask & FS_DQ_BSOFT)
2959 - dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
2960 - if (di->d_fieldmask & FS_DQ_BHARD)
2961 - dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
2962 - if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
2963 + if (di->d_fieldmask & QC_SPC_SOFT)
2964 + dm->dqb_bsoftlimit = di->d_spc_softlimit;
2965 + if (di->d_fieldmask & QC_SPC_HARD)
2966 + dm->dqb_bhardlimit = di->d_spc_hardlimit;
2967 + if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2968 check_blim = 1;
2969 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2970 }
2971
2972 - if (di->d_fieldmask & FS_DQ_ICOUNT) {
2973 - dm->dqb_curinodes = di->d_icount;
2974 + if (di->d_fieldmask & QC_INO_COUNT) {
2975 + dm->dqb_curinodes = di->d_ino_count;
2976 check_ilim = 1;
2977 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2978 }
2979
2980 - if (di->d_fieldmask & FS_DQ_ISOFT)
2981 + if (di->d_fieldmask & QC_INO_SOFT)
2982 dm->dqb_isoftlimit = di->d_ino_softlimit;
2983 - if (di->d_fieldmask & FS_DQ_IHARD)
2984 + if (di->d_fieldmask & QC_INO_HARD)
2985 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2986 - if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
2987 + if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2988 check_ilim = 1;
2989 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2990 }
2991
2992 - if (di->d_fieldmask & FS_DQ_BTIMER) {
2993 - dm->dqb_btime = di->d_btimer;
2994 + if (di->d_fieldmask & QC_SPC_TIMER) {
2995 + dm->dqb_btime = di->d_spc_timer;
2996 check_blim = 1;
2997 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2998 }
2999
3000 - if (di->d_fieldmask & FS_DQ_ITIMER) {
3001 - dm->dqb_itime = di->d_itimer;
3002 + if (di->d_fieldmask & QC_INO_TIMER) {
3003 + dm->dqb_itime = di->d_ino_timer;
3004 check_ilim = 1;
3005 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
3006 }
3007 @@ -2501,7 +2496,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
3008 dm->dqb_curspace < dm->dqb_bsoftlimit) {
3009 dm->dqb_btime = 0;
3010 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
3011 - } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
3012 + } else if (!(di->d_fieldmask & QC_SPC_TIMER))
3013 /* Set grace only if user hasn't provided his own... */
3014 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
3015 }
3016 @@ -2510,7 +2505,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
3017 dm->dqb_curinodes < dm->dqb_isoftlimit) {
3018 dm->dqb_itime = 0;
3019 clear_bit(DQ_INODES_B, &dquot->dq_flags);
3020 - } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
3021 + } else if (!(di->d_fieldmask & QC_INO_TIMER))
3022 /* Set grace only if user hasn't provided his own... */
3023 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
3024 }
3025 @@ -2526,7 +2521,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
3026 }
3027
3028 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
3029 - struct fs_disk_quota *di)
3030 + struct qc_dqblk *di)
3031 {
3032 struct dquot *dquot;
3033 int rc;
3034 diff --git a/fs/quota/quota.c b/fs/quota/quota.c
3035 index 75621649dbd7..2ce66201c366 100644
3036 --- a/fs/quota/quota.c
3037 +++ b/fs/quota/quota.c
3038 @@ -115,17 +115,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
3039 return sb->s_qcop->set_info(sb, type, &info);
3040 }
3041
3042 -static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
3043 +static inline qsize_t qbtos(qsize_t blocks)
3044 +{
3045 + return blocks << QIF_DQBLKSIZE_BITS;
3046 +}
3047 +
3048 +static inline qsize_t stoqb(qsize_t space)
3049 +{
3050 + return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
3051 +}
3052 +
3053 +static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
3054 {
3055 memset(dst, 0, sizeof(*dst));
3056 - dst->dqb_bhardlimit = src->d_blk_hardlimit;
3057 - dst->dqb_bsoftlimit = src->d_blk_softlimit;
3058 - dst->dqb_curspace = src->d_bcount;
3059 + dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
3060 + dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
3061 + dst->dqb_curspace = src->d_space;
3062 dst->dqb_ihardlimit = src->d_ino_hardlimit;
3063 dst->dqb_isoftlimit = src->d_ino_softlimit;
3064 - dst->dqb_curinodes = src->d_icount;
3065 - dst->dqb_btime = src->d_btimer;
3066 - dst->dqb_itime = src->d_itimer;
3067 + dst->dqb_curinodes = src->d_ino_count;
3068 + dst->dqb_btime = src->d_spc_timer;
3069 + dst->dqb_itime = src->d_ino_timer;
3070 dst->dqb_valid = QIF_ALL;
3071 }
3072
3073 @@ -133,7 +143,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
3074 void __user *addr)
3075 {
3076 struct kqid qid;
3077 - struct fs_disk_quota fdq;
3078 + struct qc_dqblk fdq;
3079 struct if_dqblk idq;
3080 int ret;
3081
3082 @@ -151,36 +161,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
3083 return 0;
3084 }
3085
3086 -static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
3087 +static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
3088 {
3089 - dst->d_blk_hardlimit = src->dqb_bhardlimit;
3090 - dst->d_blk_softlimit = src->dqb_bsoftlimit;
3091 - dst->d_bcount = src->dqb_curspace;
3092 + dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
3093 + dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
3094 + dst->d_space = src->dqb_curspace;
3095 dst->d_ino_hardlimit = src->dqb_ihardlimit;
3096 dst->d_ino_softlimit = src->dqb_isoftlimit;
3097 - dst->d_icount = src->dqb_curinodes;
3098 - dst->d_btimer = src->dqb_btime;
3099 - dst->d_itimer = src->dqb_itime;
3100 + dst->d_ino_count = src->dqb_curinodes;
3101 + dst->d_spc_timer = src->dqb_btime;
3102 + dst->d_ino_timer = src->dqb_itime;
3103
3104 dst->d_fieldmask = 0;
3105 if (src->dqb_valid & QIF_BLIMITS)
3106 - dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
3107 + dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
3108 if (src->dqb_valid & QIF_SPACE)
3109 - dst->d_fieldmask |= FS_DQ_BCOUNT;
3110 + dst->d_fieldmask |= QC_SPACE;
3111 if (src->dqb_valid & QIF_ILIMITS)
3112 - dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
3113 + dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
3114 if (src->dqb_valid & QIF_INODES)
3115 - dst->d_fieldmask |= FS_DQ_ICOUNT;
3116 + dst->d_fieldmask |= QC_INO_COUNT;
3117 if (src->dqb_valid & QIF_BTIME)
3118 - dst->d_fieldmask |= FS_DQ_BTIMER;
3119 + dst->d_fieldmask |= QC_SPC_TIMER;
3120 if (src->dqb_valid & QIF_ITIME)
3121 - dst->d_fieldmask |= FS_DQ_ITIMER;
3122 + dst->d_fieldmask |= QC_INO_TIMER;
3123 }
3124
3125 static int quota_setquota(struct super_block *sb, int type, qid_t id,
3126 void __user *addr)
3127 {
3128 - struct fs_disk_quota fdq;
3129 + struct qc_dqblk fdq;
3130 struct if_dqblk idq;
3131 struct kqid qid;
3132
3133 @@ -244,10 +254,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
3134 return ret;
3135 }
3136
3137 +/*
3138 + * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
3139 + * out of there as xfsprogs rely on definitions being in that header file. So
3140 + * just define same functions here for quota purposes.
3141 + */
3142 +#define XFS_BB_SHIFT 9
3143 +
3144 +static inline u64 quota_bbtob(u64 blocks)
3145 +{
3146 + return blocks << XFS_BB_SHIFT;
3147 +}
3148 +
3149 +static inline u64 quota_btobb(u64 bytes)
3150 +{
3151 + return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
3152 +}
3153 +
3154 +static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
3155 +{
3156 + dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
3157 + dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
3158 + dst->d_ino_hardlimit = src->d_ino_hardlimit;
3159 + dst->d_ino_softlimit = src->d_ino_softlimit;
3160 + dst->d_space = quota_bbtob(src->d_bcount);
3161 + dst->d_ino_count = src->d_icount;
3162 + dst->d_ino_timer = src->d_itimer;
3163 + dst->d_spc_timer = src->d_btimer;
3164 + dst->d_ino_warns = src->d_iwarns;
3165 + dst->d_spc_warns = src->d_bwarns;
3166 + dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
3167 + dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
3168 + dst->d_rt_space = quota_bbtob(src->d_rtbcount);
3169 + dst->d_rt_spc_timer = src->d_rtbtimer;
3170 + dst->d_rt_spc_warns = src->d_rtbwarns;
3171 + dst->d_fieldmask = 0;
3172 + if (src->d_fieldmask & FS_DQ_ISOFT)
3173 + dst->d_fieldmask |= QC_INO_SOFT;
3174 + if (src->d_fieldmask & FS_DQ_IHARD)
3175 + dst->d_fieldmask |= QC_INO_HARD;
3176 + if (src->d_fieldmask & FS_DQ_BSOFT)
3177 + dst->d_fieldmask |= QC_SPC_SOFT;
3178 + if (src->d_fieldmask & FS_DQ_BHARD)
3179 + dst->d_fieldmask |= QC_SPC_HARD;
3180 + if (src->d_fieldmask & FS_DQ_RTBSOFT)
3181 + dst->d_fieldmask |= QC_RT_SPC_SOFT;
3182 + if (src->d_fieldmask & FS_DQ_RTBHARD)
3183 + dst->d_fieldmask |= QC_RT_SPC_HARD;
3184 + if (src->d_fieldmask & FS_DQ_BTIMER)
3185 + dst->d_fieldmask |= QC_SPC_TIMER;
3186 + if (src->d_fieldmask & FS_DQ_ITIMER)
3187 + dst->d_fieldmask |= QC_INO_TIMER;
3188 + if (src->d_fieldmask & FS_DQ_RTBTIMER)
3189 + dst->d_fieldmask |= QC_RT_SPC_TIMER;
3190 + if (src->d_fieldmask & FS_DQ_BWARNS)
3191 + dst->d_fieldmask |= QC_SPC_WARNS;
3192 + if (src->d_fieldmask & FS_DQ_IWARNS)
3193 + dst->d_fieldmask |= QC_INO_WARNS;
3194 + if (src->d_fieldmask & FS_DQ_RTBWARNS)
3195 + dst->d_fieldmask |= QC_RT_SPC_WARNS;
3196 + if (src->d_fieldmask & FS_DQ_BCOUNT)
3197 + dst->d_fieldmask |= QC_SPACE;
3198 + if (src->d_fieldmask & FS_DQ_ICOUNT)
3199 + dst->d_fieldmask |= QC_INO_COUNT;
3200 + if (src->d_fieldmask & FS_DQ_RTBCOUNT)
3201 + dst->d_fieldmask |= QC_RT_SPACE;
3202 +}
3203 +
3204 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
3205 void __user *addr)
3206 {
3207 struct fs_disk_quota fdq;
3208 + struct qc_dqblk qdq;
3209 struct kqid qid;
3210
3211 if (copy_from_user(&fdq, addr, sizeof(fdq)))
3212 @@ -257,13 +335,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
3213 qid = make_kqid(current_user_ns(), type, id);
3214 if (!qid_valid(qid))
3215 return -EINVAL;
3216 - return sb->s_qcop->set_dqblk(sb, qid, &fdq);
3217 + copy_from_xfs_dqblk(&qdq, &fdq);
3218 + return sb->s_qcop->set_dqblk(sb, qid, &qdq);
3219 +}
3220 +
3221 +static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
3222 + int type, qid_t id)
3223 +{
3224 + memset(dst, 0, sizeof(*dst));
3225 + dst->d_version = FS_DQUOT_VERSION;
3226 + dst->d_id = id;
3227 + if (type == USRQUOTA)
3228 + dst->d_flags = FS_USER_QUOTA;
3229 + else if (type == PRJQUOTA)
3230 + dst->d_flags = FS_PROJ_QUOTA;
3231 + else
3232 + dst->d_flags = FS_GROUP_QUOTA;
3233 + dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
3234 + dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
3235 + dst->d_ino_hardlimit = src->d_ino_hardlimit;
3236 + dst->d_ino_softlimit = src->d_ino_softlimit;
3237 + dst->d_bcount = quota_btobb(src->d_space);
3238 + dst->d_icount = src->d_ino_count;
3239 + dst->d_itimer = src->d_ino_timer;
3240 + dst->d_btimer = src->d_spc_timer;
3241 + dst->d_iwarns = src->d_ino_warns;
3242 + dst->d_bwarns = src->d_spc_warns;
3243 + dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
3244 + dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
3245 + dst->d_rtbcount = quota_btobb(src->d_rt_space);
3246 + dst->d_rtbtimer = src->d_rt_spc_timer;
3247 + dst->d_rtbwarns = src->d_rt_spc_warns;
3248 }
3249
3250 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
3251 void __user *addr)
3252 {
3253 struct fs_disk_quota fdq;
3254 + struct qc_dqblk qdq;
3255 struct kqid qid;
3256 int ret;
3257
3258 @@ -272,8 +381,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
3259 qid = make_kqid(current_user_ns(), type, id);
3260 if (!qid_valid(qid))
3261 return -EINVAL;
3262 - ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
3263 - if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
3264 + ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
3265 + if (ret)
3266 + return ret;
3267 + copy_to_xfs_dqblk(&fdq, &qdq, type, id);
3268 + if (copy_to_user(addr, &fdq, sizeof(fdq)))
3269 return -EFAULT;
3270 return ret;
3271 }
3272 diff --git a/fs/udf/file.c b/fs/udf/file.c
3273 index bb15771b92ae..08f3555fbeac 100644
3274 --- a/fs/udf/file.c
3275 +++ b/fs/udf/file.c
3276 @@ -224,7 +224,7 @@ out:
3277 static int udf_release_file(struct inode *inode, struct file *filp)
3278 {
3279 if (filp->f_mode & FMODE_WRITE &&
3280 - atomic_read(&inode->i_writecount) > 1) {
3281 + atomic_read(&inode->i_writecount) == 1) {
3282 /*
3283 * Grab i_mutex to avoid races with writes changing i_size
3284 * while we are running.
3285 diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
3286 index 3a07a937e232..41f6c0b9d51c 100644
3287 --- a/fs/xfs/xfs_qm.h
3288 +++ b/fs/xfs/xfs_qm.h
3289 @@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
3290 /* quota ops */
3291 extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
3292 extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
3293 - uint, struct fs_disk_quota *);
3294 + uint, struct qc_dqblk *);
3295 extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
3296 - struct fs_disk_quota *);
3297 + struct qc_dqblk *);
3298 extern int xfs_qm_scall_getqstat(struct xfs_mount *,
3299 struct fs_quota_stat *);
3300 extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
3301 diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
3302 index 80f2d77d929a..327f85abbea8 100644
3303 --- a/fs/xfs/xfs_qm_syscalls.c
3304 +++ b/fs/xfs/xfs_qm_syscalls.c
3305 @@ -40,7 +40,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
3306 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
3307 uint);
3308 STATIC uint xfs_qm_export_flags(uint);
3309 -STATIC uint xfs_qm_export_qtype_flags(uint);
3310
3311 /*
3312 * Turn off quota accounting and/or enforcement for all udquots and/or
3313 @@ -574,8 +573,8 @@ xfs_qm_scall_getqstatv(
3314 return 0;
3315 }
3316
3317 -#define XFS_DQ_MASK \
3318 - (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
3319 +#define XFS_QC_MASK \
3320 + (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
3321
3322 /*
3323 * Adjust quota limits, and start/stop timers accordingly.
3324 @@ -585,7 +584,7 @@ xfs_qm_scall_setqlim(
3325 struct xfs_mount *mp,
3326 xfs_dqid_t id,
3327 uint type,
3328 - fs_disk_quota_t *newlim)
3329 + struct qc_dqblk *newlim)
3330 {
3331 struct xfs_quotainfo *q = mp->m_quotainfo;
3332 struct xfs_disk_dquot *ddq;
3333 @@ -594,9 +593,9 @@ xfs_qm_scall_setqlim(
3334 int error;
3335 xfs_qcnt_t hard, soft;
3336
3337 - if (newlim->d_fieldmask & ~XFS_DQ_MASK)
3338 + if (newlim->d_fieldmask & ~XFS_QC_MASK)
3339 return -EINVAL;
3340 - if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
3341 + if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
3342 return 0;
3343
3344 /*
3345 @@ -634,11 +633,11 @@ xfs_qm_scall_setqlim(
3346 /*
3347 * Make sure that hardlimits are >= soft limits before changing.
3348 */
3349 - hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
3350 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
3351 + hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
3352 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
3353 be64_to_cpu(ddq->d_blk_hardlimit);
3354 - soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
3355 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
3356 + soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
3357 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
3358 be64_to_cpu(ddq->d_blk_softlimit);
3359 if (hard == 0 || hard >= soft) {
3360 ddq->d_blk_hardlimit = cpu_to_be64(hard);
3361 @@ -651,11 +650,11 @@ xfs_qm_scall_setqlim(
3362 } else {
3363 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
3364 }
3365 - hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
3366 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
3367 + hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
3368 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
3369 be64_to_cpu(ddq->d_rtb_hardlimit);
3370 - soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
3371 - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
3372 + soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
3373 + (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
3374 be64_to_cpu(ddq->d_rtb_softlimit);
3375 if (hard == 0 || hard >= soft) {
3376 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
3377 @@ -668,10 +667,10 @@ xfs_qm_scall_setqlim(
3378 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
3379 }
3380
3381 - hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
3382 + hard = (newlim->d_fieldmask & QC_INO_HARD) ?
3383 (xfs_qcnt_t) newlim->d_ino_hardlimit :
3384 be64_to_cpu(ddq->d_ino_hardlimit);
3385 - soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
3386 + soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
3387 (xfs_qcnt_t) newlim->d_ino_softlimit :
3388 be64_to_cpu(ddq->d_ino_softlimit);
3389 if (hard == 0 || hard >= soft) {
3390 @@ -688,12 +687,12 @@ xfs_qm_scall_setqlim(
3391 /*
3392 * Update warnings counter(s) if requested
3393 */
3394 - if (newlim->d_fieldmask & FS_DQ_BWARNS)
3395 - ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
3396 - if (newlim->d_fieldmask & FS_DQ_IWARNS)
3397 - ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
3398 - if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
3399 - ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
3400 + if (newlim->d_fieldmask & QC_SPC_WARNS)
3401 + ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
3402 + if (newlim->d_fieldmask & QC_INO_WARNS)
3403 + ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
3404 + if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
3405 + ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
3406
3407 if (id == 0) {
3408 /*
3409 @@ -703,24 +702,24 @@ xfs_qm_scall_setqlim(
3410 * soft and hard limit values (already done, above), and
3411 * for warnings.
3412 */
3413 - if (newlim->d_fieldmask & FS_DQ_BTIMER) {
3414 - q->qi_btimelimit = newlim->d_btimer;
3415 - ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
3416 + if (newlim->d_fieldmask & QC_SPC_TIMER) {
3417 + q->qi_btimelimit = newlim->d_spc_timer;
3418 + ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
3419 }
3420 - if (newlim->d_fieldmask & FS_DQ_ITIMER) {
3421 - q->qi_itimelimit = newlim->d_itimer;
3422 - ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
3423 + if (newlim->d_fieldmask & QC_INO_TIMER) {
3424 + q->qi_itimelimit = newlim->d_ino_timer;
3425 + ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
3426 }
3427 - if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
3428 - q->qi_rtbtimelimit = newlim->d_rtbtimer;
3429 - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
3430 + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
3431 + q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
3432 + ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
3433 }
3434 - if (newlim->d_fieldmask & FS_DQ_BWARNS)
3435 - q->qi_bwarnlimit = newlim->d_bwarns;
3436 - if (newlim->d_fieldmask & FS_DQ_IWARNS)
3437 - q->qi_iwarnlimit = newlim->d_iwarns;
3438 - if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
3439 - q->qi_rtbwarnlimit = newlim->d_rtbwarns;
3440 + if (newlim->d_fieldmask & QC_SPC_WARNS)
3441 + q->qi_bwarnlimit = newlim->d_spc_warns;
3442 + if (newlim->d_fieldmask & QC_INO_WARNS)
3443 + q->qi_iwarnlimit = newlim->d_ino_warns;
3444 + if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
3445 + q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
3446 } else {
3447 /*
3448 * If the user is now over quota, start the timelimit.
3449 @@ -831,7 +830,7 @@ xfs_qm_scall_getquota(
3450 struct xfs_mount *mp,
3451 xfs_dqid_t id,
3452 uint type,
3453 - struct fs_disk_quota *dst)
3454 + struct qc_dqblk *dst)
3455 {
3456 struct xfs_dquot *dqp;
3457 int error;
3458 @@ -855,28 +854,25 @@ xfs_qm_scall_getquota(
3459 }
3460
3461 memset(dst, 0, sizeof(*dst));
3462 - dst->d_version = FS_DQUOT_VERSION;
3463 - dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
3464 - dst->d_id = be32_to_cpu(dqp->q_core.d_id);
3465 - dst->d_blk_hardlimit =
3466 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
3467 - dst->d_blk_softlimit =
3468 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
3469 + dst->d_spc_hardlimit =
3470 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
3471 + dst->d_spc_softlimit =
3472 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
3473 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
3474 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
3475 - dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
3476 - dst->d_icount = dqp->q_res_icount;
3477 - dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
3478 - dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
3479 - dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
3480 - dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
3481 - dst->d_rtb_hardlimit =
3482 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
3483 - dst->d_rtb_softlimit =
3484 - XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
3485 - dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
3486 - dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
3487 - dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
3488 + dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
3489 + dst->d_ino_count = dqp->q_res_icount;
3490 + dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
3491 + dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
3492 + dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
3493 + dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
3494 + dst->d_rt_spc_hardlimit =
3495 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
3496 + dst->d_rt_spc_softlimit =
3497 + XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
3498 + dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
3499 + dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
3500 + dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
3501
3502 /*
3503 * Internally, we don't reset all the timers when quota enforcement
3504 @@ -889,23 +885,23 @@ xfs_qm_scall_getquota(
3505 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
3506 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
3507 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
3508 - dst->d_btimer = 0;
3509 - dst->d_itimer = 0;
3510 - dst->d_rtbtimer = 0;
3511 + dst->d_spc_timer = 0;
3512 + dst->d_ino_timer = 0;
3513 + dst->d_rt_spc_timer = 0;
3514 }
3515
3516 #ifdef DEBUG
3517 - if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
3518 - (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
3519 - (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
3520 - dst->d_id != 0) {
3521 - if ((dst->d_bcount > dst->d_blk_softlimit) &&
3522 - (dst->d_blk_softlimit > 0)) {
3523 - ASSERT(dst->d_btimer != 0);
3524 + if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
3525 + (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
3526 + (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
3527 + id != 0) {
3528 + if ((dst->d_space > dst->d_spc_softlimit) &&
3529 + (dst->d_spc_softlimit > 0)) {
3530 + ASSERT(dst->d_spc_timer != 0);
3531 }
3532 - if ((dst->d_icount > dst->d_ino_softlimit) &&
3533 + if ((dst->d_ino_count > dst->d_ino_softlimit) &&
3534 (dst->d_ino_softlimit > 0)) {
3535 - ASSERT(dst->d_itimer != 0);
3536 + ASSERT(dst->d_ino_timer != 0);
3537 }
3538 }
3539 #endif
3540 @@ -915,26 +911,6 @@ out_put:
3541 }
3542
3543 STATIC uint
3544 -xfs_qm_export_qtype_flags(
3545 - uint flags)
3546 -{
3547 - /*
3548 - * Can't be more than one, or none.
3549 - */
3550 - ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
3551 - (FS_PROJ_QUOTA | FS_USER_QUOTA));
3552 - ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
3553 - (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
3554 - ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
3555 - (FS_USER_QUOTA | FS_GROUP_QUOTA));
3556 - ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
3557 -
3558 - return (flags & XFS_DQ_USER) ?
3559 - FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
3560 - FS_PROJ_QUOTA : FS_GROUP_QUOTA;
3561 -}
3562 -
3563 -STATIC uint
3564 xfs_qm_export_flags(
3565 uint flags)
3566 {
3567 diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
3568 index b238027df987..320c814bb9a5 100644
3569 --- a/fs/xfs/xfs_quotaops.c
3570 +++ b/fs/xfs/xfs_quotaops.c
3571 @@ -133,7 +133,7 @@ STATIC int
3572 xfs_fs_get_dqblk(
3573 struct super_block *sb,
3574 struct kqid qid,
3575 - struct fs_disk_quota *fdq)
3576 + struct qc_dqblk *qdq)
3577 {
3578 struct xfs_mount *mp = XFS_M(sb);
3579
3580 @@ -143,14 +143,14 @@ xfs_fs_get_dqblk(
3581 return -ESRCH;
3582
3583 return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
3584 - xfs_quota_type(qid.type), fdq);
3585 + xfs_quota_type(qid.type), qdq);
3586 }
3587
3588 STATIC int
3589 xfs_fs_set_dqblk(
3590 struct super_block *sb,
3591 struct kqid qid,
3592 - struct fs_disk_quota *fdq)
3593 + struct qc_dqblk *qdq)
3594 {
3595 struct xfs_mount *mp = XFS_M(sb);
3596
3597 @@ -162,7 +162,7 @@ xfs_fs_set_dqblk(
3598 return -ESRCH;
3599
3600 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
3601 - xfs_quota_type(qid.type), fdq);
3602 + xfs_quota_type(qid.type), qdq);
3603 }
3604
3605 const struct quotactl_ops xfs_quotactl_operations = {
3606 diff --git a/include/linux/mm.h b/include/linux/mm.h
3607 index 5ab2da9811c1..86a977bf4f79 100644
3608 --- a/include/linux/mm.h
3609 +++ b/include/linux/mm.h
3610 @@ -1054,6 +1054,7 @@ static inline int page_mapped(struct page *page)
3611 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
3612 #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
3613 #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
3614 +#define VM_FAULT_SIGSEGV 0x0040
3615
3616 #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
3617 #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
3618 @@ -1062,8 +1063,9 @@ static inline int page_mapped(struct page *page)
3619
3620 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
3621
3622 -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
3623 - VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
3624 +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
3625 + VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
3626 + VM_FAULT_FALLBACK)
3627
3628 /* Encode hstate index for a hwpoisoned large page */
3629 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
3630 diff --git a/include/linux/quota.h b/include/linux/quota.h
3631 index 80d345a3524c..224fb8154f8f 100644
3632 --- a/include/linux/quota.h
3633 +++ b/include/linux/quota.h
3634 @@ -316,6 +316,49 @@ struct dquot_operations {
3635
3636 struct path;
3637
3638 +/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
3639 +struct qc_dqblk {
3640 + int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
3641 + u64 d_spc_hardlimit; /* absolute limit on used space */
3642 + u64 d_spc_softlimit; /* preferred limit on used space */
3643 + u64 d_ino_hardlimit; /* maximum # allocated inodes */
3644 + u64 d_ino_softlimit; /* preferred inode limit */
3645 + u64 d_space; /* Space owned by the user */
3646 + u64 d_ino_count; /* # inodes owned by the user */
3647 + s64 d_ino_timer; /* zero if within inode limits */
3648 + /* if not, we refuse service */
3649 + s64 d_spc_timer; /* similar to above; for space */
3650 + int d_ino_warns; /* # warnings issued wrt num inodes */
3651 + int d_spc_warns; /* # warnings issued wrt used space */
3652 + u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
3653 + u64 d_rt_spc_softlimit; /* preferred limit on RT space */
3654 + u64 d_rt_space; /* realtime space owned */
3655 + s64 d_rt_spc_timer; /* similar to above; for RT space */
3656 + int d_rt_spc_warns; /* # warnings issued wrt RT space */
3657 +};
3658 +
3659 +/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
3660 +#define QC_INO_SOFT (1<<0)
3661 +#define QC_INO_HARD (1<<1)
3662 +#define QC_SPC_SOFT (1<<2)
3663 +#define QC_SPC_HARD (1<<3)
3664 +#define QC_RT_SPC_SOFT (1<<4)
3665 +#define QC_RT_SPC_HARD (1<<5)
3666 +#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
3667 + QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
3668 +#define QC_SPC_TIMER (1<<6)
3669 +#define QC_INO_TIMER (1<<7)
3670 +#define QC_RT_SPC_TIMER (1<<8)
3671 +#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
3672 +#define QC_SPC_WARNS (1<<9)
3673 +#define QC_INO_WARNS (1<<10)
3674 +#define QC_RT_SPC_WARNS (1<<11)
3675 +#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
3676 +#define QC_SPACE (1<<12)
3677 +#define QC_INO_COUNT (1<<13)
3678 +#define QC_RT_SPACE (1<<14)
3679 +#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
3680 +
3681 /* Operations handling requests from userspace */
3682 struct quotactl_ops {
3683 int (*quota_on)(struct super_block *, int, int, struct path *);
3684 @@ -324,8 +367,8 @@ struct quotactl_ops {
3685 int (*quota_sync)(struct super_block *, int);
3686 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
3687 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
3688 - int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
3689 - int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
3690 + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
3691 + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
3692 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
3693 int (*set_xstate)(struct super_block *, unsigned int, int);
3694 int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
3695 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
3696 index 1d3eee594cd6..bfaf7138d5ee 100644
3697 --- a/include/linux/quotaops.h
3698 +++ b/include/linux/quotaops.h
3699 @@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
3700 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
3701 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
3702 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
3703 - struct fs_disk_quota *di);
3704 + struct qc_dqblk *di);
3705 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
3706 - struct fs_disk_quota *di);
3707 + struct qc_dqblk *di);
3708
3709 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
3710 int dquot_transfer(struct inode *inode, struct iattr *iattr);
3711 diff --git a/mm/gup.c b/mm/gup.c
3712 index cd62c8c90d4a..a0d57ec05510 100644
3713 --- a/mm/gup.c
3714 +++ b/mm/gup.c
3715 @@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
3716 return -ENOMEM;
3717 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3718 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
3719 - if (ret & VM_FAULT_SIGBUS)
3720 + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3721 return -EFAULT;
3722 BUG();
3723 }
3724 @@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
3725 return -ENOMEM;
3726 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3727 return -EHWPOISON;
3728 - if (ret & VM_FAULT_SIGBUS)
3729 + if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3730 return -EFAULT;
3731 BUG();
3732 }
3733 diff --git a/mm/ksm.c b/mm/ksm.c
3734 index 6b2e337bc03c..a0ed043a1096 100644
3735 --- a/mm/ksm.c
3736 +++ b/mm/ksm.c
3737 @@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
3738 else
3739 ret = VM_FAULT_WRITE;
3740 put_page(page);
3741 - } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
3742 + } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
3743 /*
3744 * We must loop because handle_mm_fault() may back out if there's
3745 * any difficulty e.g. if pte accessed bit gets updated concurrently.
3746 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3747 index d6ac0e33e150..4918b6eefae2 100644
3748 --- a/mm/memcontrol.c
3749 +++ b/mm/memcontrol.c
3750 @@ -1638,9 +1638,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
3751
3752 pr_info("Task in ");
3753 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
3754 - pr_info(" killed as a result of limit of ");
3755 + pr_cont(" killed as a result of limit of ");
3756 pr_cont_cgroup_path(memcg->css.cgroup);
3757 - pr_info("\n");
3758 + pr_cont("\n");
3759
3760 rcu_read_unlock();
3761
3762 diff --git a/mm/memory.c b/mm/memory.c
3763 index 7f86cf6252bd..d442584fd281 100644
3764 --- a/mm/memory.c
3765 +++ b/mm/memory.c
3766 @@ -2645,7 +2645,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
3767
3768 /* Check if we need to add a guard page to the stack */
3769 if (check_stack_guard_page(vma, address) < 0)
3770 - return VM_FAULT_SIGBUS;
3771 + return VM_FAULT_SIGSEGV;
3772
3773 /* Use the zero-page for reads */
3774 if (!(flags & FAULT_FLAG_WRITE)) {
3775 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
3776 index 4c5192e0d66c..4a95fe3cffbc 100644
3777 --- a/net/mac80211/pm.c
3778 +++ b/net/mac80211/pm.c
3779 @@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
3780 }
3781 }
3782
3783 - /* tear down aggregation sessions and remove STAs */
3784 - mutex_lock(&local->sta_mtx);
3785 - list_for_each_entry(sta, &local->sta_list, list) {
3786 - if (sta->uploaded) {
3787 - enum ieee80211_sta_state state;
3788 -
3789 - state = sta->sta_state;
3790 - for (; state > IEEE80211_STA_NOTEXIST; state--)
3791 - WARN_ON(drv_sta_state(local, sta->sdata, sta,
3792 - state, state - 1));
3793 - }
3794 - }
3795 - mutex_unlock(&local->sta_mtx);
3796 -
3797 /* remove all interfaces that were created in the driver */
3798 list_for_each_entry(sdata, &local->interfaces, list) {
3799 if (!ieee80211_sdata_running(sdata))
3800 @@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
3801 case NL80211_IFTYPE_STATION:
3802 ieee80211_mgd_quiesce(sdata);
3803 break;
3804 + case NL80211_IFTYPE_WDS:
3805 + /* tear down aggregation sessions and remove STAs */
3806 + mutex_lock(&local->sta_mtx);
3807 + sta = sdata->u.wds.sta;
3808 + if (sta && sta->uploaded) {
3809 + enum ieee80211_sta_state state;
3810 +
3811 + state = sta->sta_state;
3812 + for (; state > IEEE80211_STA_NOTEXIST; state--)
3813 + WARN_ON(drv_sta_state(local, sta->sdata,
3814 + sta, state,
3815 + state - 1));
3816 + }
3817 + mutex_unlock(&local->sta_mtx);
3818 + break;
3819 default:
3820 break;
3821 }
3822 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
3823 index e60da9a062c2..7d6379bd2cb8 100644
3824 --- a/net/mac80211/rx.c
3825 +++ b/net/mac80211/rx.c
3826 @@ -235,7 +235,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
3827 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
3828 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
3829 else if (rate)
3830 - channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
3831 + channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
3832 else
3833 channel_flags |= IEEE80211_CHAN_2GHZ;
3834 put_unaligned_le16(channel_flags, pos);
3835 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3836 index ea558e07981f..213048ad31c7 100644
3837 --- a/net/wireless/nl80211.c
3838 +++ b/net/wireless/nl80211.c
3839 @@ -2805,6 +2805,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
3840 if (!rdev->ops->get_key)
3841 return -EOPNOTSUPP;
3842
3843 + if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
3844 + return -ENOENT;
3845 +
3846 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3847 if (!msg)
3848 return -ENOMEM;
3849 @@ -2824,10 +2827,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
3850 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
3851 goto nla_put_failure;
3852
3853 - if (pairwise && mac_addr &&
3854 - !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
3855 - return -ENOENT;
3856 -
3857 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
3858 get_key_callback);
3859
3860 @@ -2998,7 +2997,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
3861 wdev_lock(dev->ieee80211_ptr);
3862 err = nl80211_key_allowed(dev->ieee80211_ptr);
3863
3864 - if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
3865 + if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
3866 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
3867 err = -ENOENT;
3868
3869 diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
3870 index ec667f158f19..5d905d90d504 100644
3871 --- a/sound/core/seq/seq_dummy.c
3872 +++ b/sound/core/seq/seq_dummy.c
3873 @@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
3874 static int my_client = -1;
3875
3876 /*
3877 - * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
3878 - * to subscribers.
3879 - * Note: this callback is called only after all subscribers are removed.
3880 - */
3881 -static int
3882 -dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
3883 -{
3884 - struct snd_seq_dummy_port *p;
3885 - int i;
3886 - struct snd_seq_event ev;
3887 -
3888 - p = private_data;
3889 - memset(&ev, 0, sizeof(ev));
3890 - if (p->duplex)
3891 - ev.source.port = p->connect;
3892 - else
3893 - ev.source.port = p->port;
3894 - ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
3895 - ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
3896 - for (i = 0; i < 16; i++) {
3897 - ev.data.control.channel = i;
3898 - ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
3899 - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
3900 - ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
3901 - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
3902 - }
3903 - return 0;
3904 -}
3905 -
3906 -/*
3907 * event input callback - just redirect events to subscribers
3908 */
3909 static int
3910 @@ -175,7 +145,6 @@ create_port(int idx, int type)
3911 | SNDRV_SEQ_PORT_TYPE_PORT;
3912 memset(&pcb, 0, sizeof(pcb));
3913 pcb.owner = THIS_MODULE;
3914 - pcb.unuse = dummy_unuse;
3915 pcb.event_input = dummy_input;
3916 pcb.private_free = dummy_free;
3917 pcb.private_data = rec;
3918 diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
3919 index 0c8aefab404c..640c99198cda 100644
3920 --- a/sound/soc/codecs/pcm512x.c
3921 +++ b/sound/soc/codecs/pcm512x.c
3922 @@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
3923 static const char * const pcm512x_dsp_program_texts[] = {
3924 "FIR interpolation with de-emphasis",
3925 "Low latency IIR with de-emphasis",
3926 - "Fixed process flow",
3927 "High attenuation with de-emphasis",
3928 + "Fixed process flow",
3929 "Ringing-less low latency FIR",
3930 };
3931
3932 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
3933 index 4dc4e85116cd..641f940c138d 100644
3934 --- a/sound/soc/codecs/wm8960.c
3935 +++ b/sound/soc/codecs/wm8960.c
3936 @@ -555,7 +555,7 @@ static struct {
3937 { 22050, 2 },
3938 { 24000, 2 },
3939 { 16000, 3 },
3940 - { 11250, 4 },
3941 + { 11025, 4 },
3942 { 12000, 4 },
3943 { 8000, 5 },
3944 };
3945 diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
3946 index 91a550f4a10d..5e793bbb6b02 100644
3947 --- a/sound/soc/fsl/fsl_esai.h
3948 +++ b/sound/soc/fsl/fsl_esai.h
3949 @@ -302,7 +302,7 @@
3950 #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
3951 #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
3952 #define ESAI_xCCR_xDC_SHIFT 9
3953 -#define ESAI_xCCR_xDC_WIDTH 4
3954 +#define ESAI_xCCR_xDC_WIDTH 5
3955 #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
3956 #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
3957 #define ESAI_xCCR_xPSR_SHIFT 8
3958 diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
3959 index d1b7293c133e..c6a6693bbfc9 100644
3960 --- a/sound/soc/generic/simple-card.c
3961 +++ b/sound/soc/generic/simple-card.c
3962 @@ -453,9 +453,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
3963 }
3964
3965 /* Decrease the reference count of the device nodes */
3966 -static int asoc_simple_card_unref(struct platform_device *pdev)
3967 +static int asoc_simple_card_unref(struct snd_soc_card *card)
3968 {
3969 - struct snd_soc_card *card = platform_get_drvdata(pdev);
3970 struct snd_soc_dai_link *dai_link;
3971 struct device_node *np;
3972 int num_links;
3973 @@ -562,7 +561,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
3974 return ret;
3975
3976 err:
3977 - asoc_simple_card_unref(pdev);
3978 + asoc_simple_card_unref(&priv->snd_card);
3979 return ret;
3980 }
3981
3982 @@ -578,7 +577,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
3983 snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
3984 &simple_card_mic_jack_gpio);
3985
3986 - return asoc_simple_card_unref(pdev);
3987 + return asoc_simple_card_unref(card);
3988 }
3989
3990 static const struct of_device_id asoc_simple_of_match[] = {
3991 diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
3992 index bd3ef2a88be0..aafc0686ab22 100644
3993 --- a/sound/soc/omap/omap-mcbsp.c
3994 +++ b/sound/soc/omap/omap-mcbsp.c
3995 @@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
3996 case SND_SOC_DAIFMT_CBM_CFS:
3997 /* McBSP slave. FS clock as output */
3998 regs->srgr2 |= FSGM;
3999 - regs->pcr0 |= FSXM;
4000 + regs->pcr0 |= FSXM | FSRM;
4001 break;
4002 case SND_SOC_DAIFMT_CBM_CFM:
4003 /* McBSP slave */
4004 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
4005 index cecfab3cc948..08e430d664cd 100644
4006 --- a/sound/soc/soc-compress.c
4007 +++ b/sound/soc/soc-compress.c
4008 @@ -666,7 +666,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
4009 rtd->dai_link->stream_name);
4010
4011 ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
4012 - 1, 0, &be_pcm);
4013 + rtd->dai_link->dpcm_playback,
4014 + rtd->dai_link->dpcm_capture, &be_pcm);
4015 if (ret < 0) {
4016 dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
4017 rtd->dai_link->name);
4018 @@ -675,8 +676,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
4019
4020 rtd->pcm = be_pcm;
4021 rtd->fe_compr = 1;
4022 - be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
4023 - be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
4024 + if (rtd->dai_link->dpcm_playback)
4025 + be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
4026 + else if (rtd->dai_link->dpcm_capture)
4027 + be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
4028 memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
4029 } else
4030 memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));