Contents of /trunk/kernel-alx/patches-3.14/0103-3.14.4-all-fixes.patch
Parent Directory | Revision Log
Revision 2506 -
(show annotations)
(download)
Fri Oct 17 07:55:45 2014 UTC (10 years ago) by niro
File size: 120899 byte(s)
Fri Oct 17 07:55:45 2014 UTC (10 years ago) by niro
File size: 120899 byte(s)
-patches for 3.14
1 | diff --git a/Makefile b/Makefile |
2 | index eed07f3f9308..d7c07fd8c944 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 14 |
8 | -SUBLEVEL = 3 |
9 | +SUBLEVEL = 4 |
10 | EXTRAVERSION = |
11 | NAME = Shuffling Zombie Juror |
12 | |
13 | diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S |
14 | index 47d09d07f093..6e8f83a32522 100644 |
15 | --- a/arch/arc/kernel/entry.S |
16 | +++ b/arch/arc/kernel/entry.S |
17 | @@ -614,11 +614,13 @@ resume_user_mode_begin: |
18 | |
19 | resume_kernel_mode: |
20 | |
21 | -#ifdef CONFIG_PREEMPT |
22 | - |
23 | - ; This is a must for preempt_schedule_irq() |
24 | + ; Disable Interrupts from this point on |
25 | + ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() |
26 | + ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe |
27 | IRQ_DISABLE r9 |
28 | |
29 | +#ifdef CONFIG_PREEMPT |
30 | + |
31 | ; Can't preempt if preemption disabled |
32 | GET_CURR_THR_INFO_FROM_SP r10 |
33 | ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] |
34 | diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig |
35 | index 295cefeb25d3..33058aa40e77 100644 |
36 | --- a/arch/arc/plat-arcfpga/Kconfig |
37 | +++ b/arch/arc/plat-arcfpga/Kconfig |
38 | @@ -33,7 +33,6 @@ config ISS_SMP_EXTN |
39 | bool "ARC SMP Extensions (ISS Models only)" |
40 | default n |
41 | depends on SMP |
42 | - select ARC_HAS_COH_RTSC |
43 | help |
44 | SMP Extensions to ARC700, in a "simulation only" Model, supported in |
45 | ARC ISS (Instruction Set Simulator). |
46 | diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
47 | index 7789857d1470..575d7904305b 100644 |
48 | --- a/arch/arm/kvm/mmu.c |
49 | +++ b/arch/arm/kvm/mmu.c |
50 | @@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start; |
51 | static unsigned long hyp_idmap_end; |
52 | static phys_addr_t hyp_idmap_vector; |
53 | |
54 | +#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
55 | + |
56 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) |
57 | |
58 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
59 | @@ -199,14 +201,14 @@ void free_boot_hyp_pgd(void) |
60 | if (boot_hyp_pgd) { |
61 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
62 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
63 | - kfree(boot_hyp_pgd); |
64 | + free_pages((unsigned long)boot_hyp_pgd, pgd_order); |
65 | boot_hyp_pgd = NULL; |
66 | } |
67 | |
68 | if (hyp_pgd) |
69 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
70 | |
71 | - kfree(init_bounce_page); |
72 | + free_page((unsigned long)init_bounce_page); |
73 | init_bounce_page = NULL; |
74 | |
75 | mutex_unlock(&kvm_hyp_pgd_mutex); |
76 | @@ -236,7 +238,7 @@ void free_hyp_pgds(void) |
77 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
78 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
79 | |
80 | - kfree(hyp_pgd); |
81 | + free_pages((unsigned long)hyp_pgd, pgd_order); |
82 | hyp_pgd = NULL; |
83 | } |
84 | |
85 | @@ -930,7 +932,7 @@ int kvm_mmu_init(void) |
86 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; |
87 | phys_addr_t phys_base; |
88 | |
89 | - init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); |
90 | + init_bounce_page = (void *)__get_free_page(GFP_KERNEL); |
91 | if (!init_bounce_page) { |
92 | kvm_err("Couldn't allocate HYP init bounce page\n"); |
93 | err = -ENOMEM; |
94 | @@ -956,8 +958,9 @@ int kvm_mmu_init(void) |
95 | (unsigned long)phys_base); |
96 | } |
97 | |
98 | - hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
99 | - boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
100 | + hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); |
101 | + boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); |
102 | + |
103 | if (!hyp_pgd || !boot_hyp_pgd) { |
104 | kvm_err("Hyp mode PGD not allocated\n"); |
105 | err = -ENOMEM; |
106 | diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c |
107 | index 4b6274b47f33..e75ef8219caf 100644 |
108 | --- a/arch/mips/kvm/kvm_mips_emul.c |
109 | +++ b/arch/mips/kvm/kvm_mips_emul.c |
110 | @@ -1571,17 +1571,17 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
111 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); |
112 | #else |
113 | /* UserLocal not implemented */ |
114 | - er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
115 | + er = EMULATE_FAIL; |
116 | #endif |
117 | break; |
118 | |
119 | default: |
120 | - printk("RDHWR not supported\n"); |
121 | + kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); |
122 | er = EMULATE_FAIL; |
123 | break; |
124 | } |
125 | } else { |
126 | - printk("Emulate RI not supported @ %p: %#x\n", opc, inst); |
127 | + kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); |
128 | er = EMULATE_FAIL; |
129 | } |
130 | |
131 | @@ -1590,6 +1590,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
132 | */ |
133 | if (er == EMULATE_FAIL) { |
134 | vcpu->arch.pc = curr_pc; |
135 | + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
136 | } |
137 | return er; |
138 | } |
139 | diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S |
140 | index 7e0277a1048f..32a7c828f073 100644 |
141 | --- a/arch/mips/power/hibernate.S |
142 | +++ b/arch/mips/power/hibernate.S |
143 | @@ -43,6 +43,7 @@ LEAF(swsusp_arch_resume) |
144 | bne t1, t3, 1b |
145 | PTR_L t0, PBE_NEXT(t0) |
146 | bnez t0, 0b |
147 | + jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ |
148 | PTR_LA t0, saved_regs |
149 | PTR_L ra, PT_R31(t0) |
150 | PTR_L sp, PT_R29(t0) |
151 | diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h |
152 | index a613d2c82fd9..b142b8e0ed9e 100644 |
153 | --- a/arch/powerpc/include/asm/compat.h |
154 | +++ b/arch/powerpc/include/asm/compat.h |
155 | @@ -8,7 +8,11 @@ |
156 | #include <linux/sched.h> |
157 | |
158 | #define COMPAT_USER_HZ 100 |
159 | +#ifdef __BIG_ENDIAN__ |
160 | #define COMPAT_UTS_MACHINE "ppc\0\0" |
161 | +#else |
162 | +#define COMPAT_UTS_MACHINE "ppcle\0\0" |
163 | +#endif |
164 | |
165 | typedef u32 compat_size_t; |
166 | typedef s32 compat_ssize_t; |
167 | diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
168 | index 90c06ec6eff5..ce17815b8b55 100644 |
169 | --- a/arch/powerpc/include/asm/reg.h |
170 | +++ b/arch/powerpc/include/asm/reg.h |
171 | @@ -213,6 +213,7 @@ |
172 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ |
173 | #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ |
174 | #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ |
175 | +#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */ |
176 | #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ |
177 | #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ |
178 | #define SPRN_CTRLF 0x088 |
179 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
180 | index af064d28b365..31d021506d21 100644 |
181 | --- a/arch/powerpc/kernel/process.c |
182 | +++ b/arch/powerpc/kernel/process.c |
183 | @@ -610,6 +610,31 @@ out_and_saveregs: |
184 | tm_save_sprs(thr); |
185 | } |
186 | |
187 | +extern void __tm_recheckpoint(struct thread_struct *thread, |
188 | + unsigned long orig_msr); |
189 | + |
190 | +void tm_recheckpoint(struct thread_struct *thread, |
191 | + unsigned long orig_msr) |
192 | +{ |
193 | + unsigned long flags; |
194 | + |
195 | + /* We really can't be interrupted here as the TEXASR registers can't |
196 | + * change and later in the trecheckpoint code, we have a userspace R1. |
197 | + * So let's hard disable over this region. |
198 | + */ |
199 | + local_irq_save(flags); |
200 | + hard_irq_disable(); |
201 | + |
202 | + /* The TM SPRs are restored here, so that TEXASR.FS can be set |
203 | + * before the trecheckpoint and no explosion occurs. |
204 | + */ |
205 | + tm_restore_sprs(thread); |
206 | + |
207 | + __tm_recheckpoint(thread, orig_msr); |
208 | + |
209 | + local_irq_restore(flags); |
210 | +} |
211 | + |
212 | static inline void tm_recheckpoint_new_task(struct task_struct *new) |
213 | { |
214 | unsigned long msr; |
215 | @@ -628,13 +653,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) |
216 | if (!new->thread.regs) |
217 | return; |
218 | |
219 | - /* The TM SPRs are restored here, so that TEXASR.FS can be set |
220 | - * before the trecheckpoint and no explosion occurs. |
221 | - */ |
222 | - tm_restore_sprs(&new->thread); |
223 | - |
224 | - if (!MSR_TM_ACTIVE(new->thread.regs->msr)) |
225 | + if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ |
226 | + tm_restore_sprs(&new->thread); |
227 | return; |
228 | + } |
229 | msr = new->thread.tm_orig_msr; |
230 | /* Recheckpoint to restore original checkpointed register state. */ |
231 | TM_DEBUG("*** tm_recheckpoint of pid %d " |
232 | diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c |
233 | index a67e00aa3caa..4e47db686b5d 100644 |
234 | --- a/arch/powerpc/kernel/signal_32.c |
235 | +++ b/arch/powerpc/kernel/signal_32.c |
236 | @@ -881,6 +881,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, |
237 | * transactional versions should be loaded. |
238 | */ |
239 | tm_enable(); |
240 | + /* Make sure the transaction is marked as failed */ |
241 | + current->thread.tm_texasr |= TEXASR_FS; |
242 | /* This loads the checkpointed FP/VEC state, if used */ |
243 | tm_recheckpoint(¤t->thread, msr); |
244 | /* Get the top half of the MSR */ |
245 | diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
246 | index 8d253c29649b..d501dc4dc3e6 100644 |
247 | --- a/arch/powerpc/kernel/signal_64.c |
248 | +++ b/arch/powerpc/kernel/signal_64.c |
249 | @@ -527,6 +527,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, |
250 | } |
251 | #endif |
252 | tm_enable(); |
253 | + /* Make sure the transaction is marked as failed */ |
254 | + current->thread.tm_texasr |= TEXASR_FS; |
255 | /* This loads the checkpointed FP/VEC state, if used */ |
256 | tm_recheckpoint(¤t->thread, msr); |
257 | |
258 | diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S |
259 | index ef47bcbd4352..03567c05950a 100644 |
260 | --- a/arch/powerpc/kernel/tm.S |
261 | +++ b/arch/powerpc/kernel/tm.S |
262 | @@ -307,7 +307,7 @@ dont_backup_fp: |
263 | * Call with IRQs off, stacks get all out of sync for |
264 | * some periods in here! |
265 | */ |
266 | -_GLOBAL(tm_recheckpoint) |
267 | +_GLOBAL(__tm_recheckpoint) |
268 | mfcr r5 |
269 | mflr r0 |
270 | stw r5, 8(r1) |
271 | diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
272 | index 17fc9496b6ac..7a25d9218a05 100644 |
273 | --- a/arch/powerpc/kvm/book3s_hv.c |
274 | +++ b/arch/powerpc/kvm/book3s_hv.c |
275 | @@ -86,7 +86,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
276 | |
277 | /* CPU points to the first thread of the core */ |
278 | if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { |
279 | -#ifdef CONFIG_KVM_XICS |
280 | +#ifdef CONFIG_PPC_ICP_NATIVE |
281 | int real_cpu = cpu + vcpu->arch.ptid; |
282 | if (paca[real_cpu].kvm_hstate.xics_phys) |
283 | xics_wake_cpu(real_cpu); |
284 | @@ -1360,9 +1360,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
285 | smp_wmb(); |
286 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
287 | if (cpu != smp_processor_id()) { |
288 | -#ifdef CONFIG_KVM_XICS |
289 | xics_wake_cpu(cpu); |
290 | -#endif |
291 | if (vcpu->arch.ptid) |
292 | ++vc->n_woken; |
293 | } |
294 | diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
295 | index 0ea99e3d4815..2d6fe89ff89d 100644 |
296 | --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c |
297 | +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
298 | @@ -88,13 +88,14 @@ void set_default_offline_state(int cpu) |
299 | |
300 | static void rtas_stop_self(void) |
301 | { |
302 | - struct rtas_args args = { |
303 | - .token = cpu_to_be32(rtas_stop_self_token), |
304 | + static struct rtas_args args = { |
305 | .nargs = 0, |
306 | .nret = 1, |
307 | .rets = &args.args[0], |
308 | }; |
309 | |
310 | + args.token = cpu_to_be32(rtas_stop_self_token); |
311 | + |
312 | local_irq_disable(); |
313 | |
314 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); |
315 | diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c |
316 | index 708d60e40066..a778ee27518a 100644 |
317 | --- a/arch/s390/net/bpf_jit_comp.c |
318 | +++ b/arch/s390/net/bpf_jit_comp.c |
319 | @@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) |
320 | case BPF_S_LD_W_IND: |
321 | case BPF_S_LD_H_IND: |
322 | case BPF_S_LD_B_IND: |
323 | - case BPF_S_LDX_B_MSH: |
324 | case BPF_S_LD_IMM: |
325 | case BPF_S_LD_MEM: |
326 | case BPF_S_MISC_TXA: |
327 | diff --git a/arch/x86/Makefile b/arch/x86/Makefile |
328 | index f8842c444560..0dd99ea1caf1 100644 |
329 | --- a/arch/x86/Makefile |
330 | +++ b/arch/x86/Makefile |
331 | @@ -79,6 +79,7 @@ else |
332 | UTS_MACHINE := x86_64 |
333 | CHECKFLAGS += -D__x86_64__ -m64 |
334 | |
335 | + biarch := -m64 |
336 | KBUILD_AFLAGS += -m64 |
337 | KBUILD_CFLAGS += -m64 |
338 | |
339 | diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c |
340 | index c29c2c3ec0ad..b06f5f55ada9 100644 |
341 | --- a/drivers/acpi/acpi_processor.c |
342 | +++ b/drivers/acpi/acpi_processor.c |
343 | @@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) |
344 | acpi_status status; |
345 | int ret; |
346 | |
347 | + if (pr->apic_id == -1) |
348 | + return -ENODEV; |
349 | + |
350 | status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); |
351 | if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) |
352 | return -ENODEV; |
353 | @@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device) |
354 | } |
355 | |
356 | apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); |
357 | - if (apic_id < 0) { |
358 | + if (apic_id < 0) |
359 | acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); |
360 | - return -ENODEV; |
361 | - } |
362 | pr->apic_id = apic_id; |
363 | |
364 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); |
365 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
366 | index c81d809c111b..8f18342540d8 100644 |
367 | --- a/drivers/ata/ahci.c |
368 | +++ b/drivers/ata/ahci.c |
369 | @@ -1163,7 +1163,7 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) |
370 | #endif |
371 | |
372 | static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, |
373 | - struct ahci_host_priv *hpriv) |
374 | + struct ahci_host_priv *hpriv) |
375 | { |
376 | int rc, nvec; |
377 | |
378 | @@ -1189,6 +1189,13 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, |
379 | else if (rc > 0) |
380 | goto single_msi; |
381 | |
382 | + /* fallback to single MSI mode if the controller enforced MRSM mode */ |
383 | + if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) { |
384 | + pci_disable_msi(pdev); |
385 | + printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); |
386 | + goto single_msi; |
387 | + } |
388 | + |
389 | return nvec; |
390 | |
391 | single_msi: |
392 | @@ -1233,18 +1240,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis) |
393 | return rc; |
394 | |
395 | for (i = 0; i < host->n_ports; i++) { |
396 | - const char* desc; |
397 | struct ahci_port_priv *pp = host->ports[i]->private_data; |
398 | |
399 | - /* pp is NULL for dummy ports */ |
400 | - if (pp) |
401 | - desc = pp->irq_desc; |
402 | - else |
403 | - desc = dev_driver_string(host->dev); |
404 | + /* Do not receive interrupts sent by dummy ports */ |
405 | + if (!pp) { |
406 | + disable_irq(irq + i); |
407 | + continue; |
408 | + } |
409 | |
410 | - rc = devm_request_threaded_irq(host->dev, |
411 | - irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, |
412 | - desc, host->ports[i]); |
413 | + rc = devm_request_threaded_irq(host->dev, irq + i, |
414 | + ahci_hw_interrupt, |
415 | + ahci_thread_fn, IRQF_SHARED, |
416 | + pp->irq_desc, host->ports[i]); |
417 | if (rc) |
418 | goto out_free_irqs; |
419 | } |
420 | diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h |
421 | index 2289efdf8203..ad36faf31dbd 100644 |
422 | --- a/drivers/ata/ahci.h |
423 | +++ b/drivers/ata/ahci.h |
424 | @@ -91,6 +91,7 @@ enum { |
425 | /* HOST_CTL bits */ |
426 | HOST_RESET = (1 << 0), /* reset controller; self-clear */ |
427 | HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ |
428 | + HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ |
429 | HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ |
430 | |
431 | /* HOST_CAP bits */ |
432 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
433 | index 8cb2522d592a..0a79c540169c 100644 |
434 | --- a/drivers/ata/libata-core.c |
435 | +++ b/drivers/ata/libata-core.c |
436 | @@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
437 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
438 | |
439 | /* devices that don't properly handle queued TRIM commands */ |
440 | - { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
441 | - { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
442 | + { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, |
443 | + { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, |
444 | + { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
445 | + { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
446 | |
447 | /* |
448 | * Some WD SATA-I drives spin up and down erratically when the link |
449 | @@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) |
450 | static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) |
451 | { |
452 | struct ata_queued_cmd *qc = NULL; |
453 | - unsigned int i; |
454 | + unsigned int i, tag; |
455 | |
456 | /* no command while frozen */ |
457 | if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) |
458 | return NULL; |
459 | |
460 | - /* the last tag is reserved for internal command. */ |
461 | - for (i = 0; i < ATA_MAX_QUEUE - 1; i++) |
462 | - if (!test_and_set_bit(i, &ap->qc_allocated)) { |
463 | - qc = __ata_qc_from_tag(ap, i); |
464 | + for (i = 0; i < ATA_MAX_QUEUE; i++) { |
465 | + tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; |
466 | + |
467 | + /* the last tag is reserved for internal command. */ |
468 | + if (tag == ATA_TAG_INTERNAL) |
469 | + continue; |
470 | + |
471 | + if (!test_and_set_bit(tag, &ap->qc_allocated)) { |
472 | + qc = __ata_qc_from_tag(ap, tag); |
473 | + qc->tag = tag; |
474 | + ap->last_tag = tag; |
475 | break; |
476 | } |
477 | - |
478 | - if (qc) |
479 | - qc->tag = i; |
480 | + } |
481 | |
482 | return qc; |
483 | } |
484 | diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c |
485 | index 2023043ce7c0..dab515cd77bf 100644 |
486 | --- a/drivers/block/floppy.c |
487 | +++ b/drivers/block/floppy.c |
488 | @@ -3053,7 +3053,10 @@ static int raw_cmd_copyout(int cmd, void __user *param, |
489 | int ret; |
490 | |
491 | while (ptr) { |
492 | - ret = copy_to_user(param, ptr, sizeof(*ptr)); |
493 | + struct floppy_raw_cmd cmd = *ptr; |
494 | + cmd.next = NULL; |
495 | + cmd.kernel_data = NULL; |
496 | + ret = copy_to_user(param, &cmd, sizeof(cmd)); |
497 | if (ret) |
498 | return -EFAULT; |
499 | param += sizeof(struct floppy_raw_cmd); |
500 | @@ -3107,10 +3110,11 @@ loop: |
501 | return -ENOMEM; |
502 | *rcmd = ptr; |
503 | ret = copy_from_user(ptr, param, sizeof(*ptr)); |
504 | - if (ret) |
505 | - return -EFAULT; |
506 | ptr->next = NULL; |
507 | ptr->buffer_length = 0; |
508 | + ptr->kernel_data = NULL; |
509 | + if (ret) |
510 | + return -EFAULT; |
511 | param += sizeof(struct floppy_raw_cmd); |
512 | if (ptr->cmd_count > 33) |
513 | /* the command may now also take up the space |
514 | @@ -3126,7 +3130,6 @@ loop: |
515 | for (i = 0; i < 16; i++) |
516 | ptr->reply[i] = 0; |
517 | ptr->resultcode = 0; |
518 | - ptr->kernel_data = NULL; |
519 | |
520 | if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { |
521 | if (ptr->length <= 0) |
522 | diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c |
523 | index a1c79f549edb..7b612c8bb09e 100644 |
524 | --- a/drivers/cpufreq/at32ap-cpufreq.c |
525 | +++ b/drivers/cpufreq/at32ap-cpufreq.c |
526 | @@ -52,7 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) |
527 | static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) |
528 | { |
529 | unsigned int frequency, rate, min_freq; |
530 | - static struct clk *cpuclk; |
531 | + struct clk *cpuclk; |
532 | int retval, steps, i; |
533 | |
534 | if (policy->cpu != 0) |
535 | diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c |
536 | index b6581abc9207..8dead6fb28e8 100644 |
537 | --- a/drivers/cpufreq/loongson2_cpufreq.c |
538 | +++ b/drivers/cpufreq/loongson2_cpufreq.c |
539 | @@ -69,7 +69,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, |
540 | |
541 | static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) |
542 | { |
543 | - static struct clk *cpuclk; |
544 | + struct clk *cpuclk; |
545 | int i; |
546 | unsigned long rate; |
547 | int ret; |
548 | diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c |
549 | index 36cc330b8747..99d280d6f370 100644 |
550 | --- a/drivers/cpufreq/unicore2-cpufreq.c |
551 | +++ b/drivers/cpufreq/unicore2-cpufreq.c |
552 | @@ -45,7 +45,7 @@ static int ucv2_target(struct cpufreq_policy *policy, |
553 | freqs.new = target_freq; |
554 | |
555 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
556 | - ret = clk_set_rate(policy->mclk, target_freq * 1000); |
557 | + ret = clk_set_rate(policy->clk, target_freq * 1000); |
558 | cpufreq_notify_post_transition(policy, &freqs, ret); |
559 | |
560 | return ret; |
561 | diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c |
562 | index cd8da451d199..bf5ad0f5a77a 100644 |
563 | --- a/drivers/dma/edma.c |
564 | +++ b/drivers/dma/edma.c |
565 | @@ -182,11 +182,13 @@ static void edma_execute(struct edma_chan *echan) |
566 | echan->ecc->dummy_slot); |
567 | } |
568 | |
569 | - edma_resume(echan->ch_num); |
570 | - |
571 | if (edesc->processed <= MAX_NR_SG) { |
572 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); |
573 | edma_start(echan->ch_num); |
574 | + } else { |
575 | + dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", |
576 | + echan->ch_num, edesc->processed); |
577 | + edma_resume(echan->ch_num); |
578 | } |
579 | |
580 | /* |
581 | diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c |
582 | index 532bcb336eff..8ffdd7d2bade 100644 |
583 | --- a/drivers/gpio/gpio-mxs.c |
584 | +++ b/drivers/gpio/gpio-mxs.c |
585 | @@ -214,7 +214,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) |
586 | ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR; |
587 | ct->regs.mask = PINCTRL_IRQEN(port); |
588 | |
589 | - irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); |
590 | + irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, |
591 | + IRQ_NOREQUEST, 0); |
592 | } |
593 | |
594 | static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) |
595 | diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h |
596 | index 741965c001a6..460820551b09 100644 |
597 | --- a/drivers/gpu/drm/bochs/bochs.h |
598 | +++ b/drivers/gpu/drm/bochs/bochs.h |
599 | @@ -1,5 +1,6 @@ |
600 | #include <linux/io.h> |
601 | #include <linux/fb.h> |
602 | +#include <linux/console.h> |
603 | |
604 | #include <drm/drmP.h> |
605 | #include <drm/drm_crtc.h> |
606 | diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c |
607 | index 395bba261c9a..9c13df29fd20 100644 |
608 | --- a/drivers/gpu/drm/bochs/bochs_drv.c |
609 | +++ b/drivers/gpu/drm/bochs/bochs_drv.c |
610 | @@ -95,6 +95,49 @@ static struct drm_driver bochs_driver = { |
611 | }; |
612 | |
613 | /* ---------------------------------------------------------------------- */ |
614 | +/* pm interface */ |
615 | + |
616 | +static int bochs_pm_suspend(struct device *dev) |
617 | +{ |
618 | + struct pci_dev *pdev = to_pci_dev(dev); |
619 | + struct drm_device *drm_dev = pci_get_drvdata(pdev); |
620 | + struct bochs_device *bochs = drm_dev->dev_private; |
621 | + |
622 | + drm_kms_helper_poll_disable(drm_dev); |
623 | + |
624 | + if (bochs->fb.initialized) { |
625 | + console_lock(); |
626 | + fb_set_suspend(bochs->fb.helper.fbdev, 1); |
627 | + console_unlock(); |
628 | + } |
629 | + |
630 | + return 0; |
631 | +} |
632 | + |
633 | +static int bochs_pm_resume(struct device *dev) |
634 | +{ |
635 | + struct pci_dev *pdev = to_pci_dev(dev); |
636 | + struct drm_device *drm_dev = pci_get_drvdata(pdev); |
637 | + struct bochs_device *bochs = drm_dev->dev_private; |
638 | + |
639 | + drm_helper_resume_force_mode(drm_dev); |
640 | + |
641 | + if (bochs->fb.initialized) { |
642 | + console_lock(); |
643 | + fb_set_suspend(bochs->fb.helper.fbdev, 0); |
644 | + console_unlock(); |
645 | + } |
646 | + |
647 | + drm_kms_helper_poll_enable(drm_dev); |
648 | + return 0; |
649 | +} |
650 | + |
651 | +static const struct dev_pm_ops bochs_pm_ops = { |
652 | + SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, |
653 | + bochs_pm_resume) |
654 | +}; |
655 | + |
656 | +/* ---------------------------------------------------------------------- */ |
657 | /* pci interface */ |
658 | |
659 | static int bochs_kick_out_firmware_fb(struct pci_dev *pdev) |
660 | @@ -155,6 +198,7 @@ static struct pci_driver bochs_pci_driver = { |
661 | .id_table = bochs_pci_tbl, |
662 | .probe = bochs_pci_probe, |
663 | .remove = bochs_pci_remove, |
664 | + .driver.pm = &bochs_pm_ops, |
665 | }; |
666 | |
667 | /* ---------------------------------------------------------------------- */ |
668 | diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c |
669 | index 953fc8aea69c..08ce520f61a5 100644 |
670 | --- a/drivers/gpu/drm/cirrus/cirrus_drv.c |
671 | +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c |
672 | @@ -11,6 +11,7 @@ |
673 | #include <linux/module.h> |
674 | #include <linux/console.h> |
675 | #include <drm/drmP.h> |
676 | +#include <drm/drm_crtc_helper.h> |
677 | |
678 | #include "cirrus_drv.h" |
679 | |
680 | @@ -75,6 +76,41 @@ static void cirrus_pci_remove(struct pci_dev *pdev) |
681 | drm_put_dev(dev); |
682 | } |
683 | |
684 | +static int cirrus_pm_suspend(struct device *dev) |
685 | +{ |
686 | + struct pci_dev *pdev = to_pci_dev(dev); |
687 | + struct drm_device *drm_dev = pci_get_drvdata(pdev); |
688 | + struct cirrus_device *cdev = drm_dev->dev_private; |
689 | + |
690 | + drm_kms_helper_poll_disable(drm_dev); |
691 | + |
692 | + if (cdev->mode_info.gfbdev) { |
693 | + console_lock(); |
694 | + fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1); |
695 | + console_unlock(); |
696 | + } |
697 | + |
698 | + return 0; |
699 | +} |
700 | + |
701 | +static int cirrus_pm_resume(struct device *dev) |
702 | +{ |
703 | + struct pci_dev *pdev = to_pci_dev(dev); |
704 | + struct drm_device *drm_dev = pci_get_drvdata(pdev); |
705 | + struct cirrus_device *cdev = drm_dev->dev_private; |
706 | + |
707 | + drm_helper_resume_force_mode(drm_dev); |
708 | + |
709 | + if (cdev->mode_info.gfbdev) { |
710 | + console_lock(); |
711 | + fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); |
712 | + console_unlock(); |
713 | + } |
714 | + |
715 | + drm_kms_helper_poll_enable(drm_dev); |
716 | + return 0; |
717 | +} |
718 | + |
719 | static const struct file_operations cirrus_driver_fops = { |
720 | .owner = THIS_MODULE, |
721 | .open = drm_open, |
722 | @@ -103,11 +139,17 @@ static struct drm_driver driver = { |
723 | .dumb_destroy = drm_gem_dumb_destroy, |
724 | }; |
725 | |
726 | +static const struct dev_pm_ops cirrus_pm_ops = { |
727 | + SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend, |
728 | + cirrus_pm_resume) |
729 | +}; |
730 | + |
731 | static struct pci_driver cirrus_pci_driver = { |
732 | .name = DRIVER_NAME, |
733 | .id_table = pciidlist, |
734 | .probe = cirrus_pci_probe, |
735 | .remove = cirrus_pci_remove, |
736 | + .driver.pm = &cirrus_pm_ops, |
737 | }; |
738 | |
739 | static int __init cirrus_init(void) |
740 | diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c |
741 | index 530f78f84dee..e9c89df482b5 100644 |
742 | --- a/drivers/gpu/drm/cirrus/cirrus_mode.c |
743 | +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c |
744 | @@ -308,6 +308,9 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc, |
745 | |
746 | WREG_HDR(hdr); |
747 | cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0); |
748 | + |
749 | + /* Unblank (needed on S3 resume, vgabios doesn't do it then) */ |
750 | + outb(0x20, 0x3c0); |
751 | return 0; |
752 | } |
753 | |
754 | diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c |
755 | index e2e39e65f109..a60a755f9977 100644 |
756 | --- a/drivers/gpu/drm/i915/intel_crt.c |
757 | +++ b/drivers/gpu/drm/i915/intel_crt.c |
758 | @@ -765,6 +765,14 @@ static const struct dmi_system_id intel_no_crt[] = { |
759 | DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), |
760 | }, |
761 | }, |
762 | + { |
763 | + .callback = intel_no_crt_dmi_callback, |
764 | + .ident = "DELL XPS 8700", |
765 | + .matches = { |
766 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
767 | + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"), |
768 | + }, |
769 | + }, |
770 | { } |
771 | }; |
772 | |
773 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
774 | index d8d49d10f9bb..3c511c4adaca 100644 |
775 | --- a/drivers/input/mouse/synaptics.c |
776 | +++ b/drivers/input/mouse/synaptics.c |
777 | @@ -1515,6 +1515,22 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = { |
778 | .driver_data = (int []){1232, 5710, 1156, 4696}, |
779 | }, |
780 | { |
781 | + /* Lenovo ThinkPad Edge E431 */ |
782 | + .matches = { |
783 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
784 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"), |
785 | + }, |
786 | + .driver_data = (int []){1024, 5022, 2508, 4832}, |
787 | + }, |
788 | + { |
789 | + /* Lenovo ThinkPad T431s */ |
790 | + .matches = { |
791 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
792 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"), |
793 | + }, |
794 | + .driver_data = (int []){1024, 5112, 2024, 4832}, |
795 | + }, |
796 | + { |
797 | /* Lenovo ThinkPad T440s */ |
798 | .matches = { |
799 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
800 | @@ -1523,6 +1539,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = { |
801 | .driver_data = (int []){1024, 5112, 2024, 4832}, |
802 | }, |
803 | { |
804 | + /* Lenovo ThinkPad L440 */ |
805 | + .matches = { |
806 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
807 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"), |
808 | + }, |
809 | + .driver_data = (int []){1024, 5112, 2024, 4832}, |
810 | + }, |
811 | + { |
812 | /* Lenovo ThinkPad T540p */ |
813 | .matches = { |
814 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
815 | @@ -1530,6 +1554,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = { |
816 | }, |
817 | .driver_data = (int []){1024, 5056, 2058, 4832}, |
818 | }, |
819 | + { |
820 | + /* Lenovo ThinkPad L540 */ |
821 | + .matches = { |
822 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
823 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"), |
824 | + }, |
825 | + .driver_data = (int []){1024, 5112, 2024, 4832}, |
826 | + }, |
827 | + { |
828 | + /* Lenovo Yoga S1 */ |
829 | + .matches = { |
830 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
831 | + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, |
832 | + "ThinkPad S1 Yoga"), |
833 | + }, |
834 | + .driver_data = (int []){1232, 5710, 1156, 4696}, |
835 | + }, |
836 | + { |
837 | + /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */ |
838 | + .matches = { |
839 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
840 | + DMI_MATCH(DMI_PRODUCT_VERSION, |
841 | + "ThinkPad X1 Carbon 2nd"), |
842 | + }, |
843 | + .driver_data = (int []){1024, 5112, 2024, 4832}, |
844 | + }, |
845 | #endif |
846 | { } |
847 | }; |
848 | diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c |
849 | index 9ef0752e8a08..5320332390b7 100644 |
850 | --- a/drivers/md/dm-cache-metadata.c |
851 | +++ b/drivers/md/dm-cache-metadata.c |
852 | @@ -120,6 +120,12 @@ struct dm_cache_metadata { |
853 | unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; |
854 | size_t policy_hint_size; |
855 | struct dm_cache_statistics stats; |
856 | + |
857 | + /* |
858 | + * Reading the space map root can fail, so we read it into this |
859 | + * buffer before the superblock is locked and updated. |
860 | + */ |
861 | + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; |
862 | }; |
863 | |
864 | /*------------------------------------------------------------------- |
865 | @@ -260,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd) |
866 | } |
867 | } |
868 | |
869 | +static int __save_sm_root(struct dm_cache_metadata *cmd) |
870 | +{ |
871 | + int r; |
872 | + size_t metadata_len; |
873 | + |
874 | + r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); |
875 | + if (r < 0) |
876 | + return r; |
877 | + |
878 | + return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root, |
879 | + metadata_len); |
880 | +} |
881 | + |
882 | +static void __copy_sm_root(struct dm_cache_metadata *cmd, |
883 | + struct cache_disk_superblock *disk_super) |
884 | +{ |
885 | + memcpy(&disk_super->metadata_space_map_root, |
886 | + &cmd->metadata_space_map_root, |
887 | + sizeof(cmd->metadata_space_map_root)); |
888 | +} |
889 | + |
890 | static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
891 | { |
892 | int r; |
893 | struct dm_block *sblock; |
894 | - size_t metadata_len; |
895 | struct cache_disk_superblock *disk_super; |
896 | sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; |
897 | |
898 | @@ -272,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
899 | if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) |
900 | bdev_size = DM_CACHE_METADATA_MAX_SECTORS; |
901 | |
902 | - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); |
903 | + r = dm_tm_pre_commit(cmd->tm); |
904 | if (r < 0) |
905 | return r; |
906 | |
907 | - r = dm_tm_pre_commit(cmd->tm); |
908 | - if (r < 0) |
909 | + /* |
910 | + * dm_sm_copy_root() can fail. So we need to do it before we start |
911 | + * updating the superblock. |
912 | + */ |
913 | + r = __save_sm_root(cmd); |
914 | + if (r) |
915 | return r; |
916 | |
917 | r = superblock_lock_zero(cmd, &sblock); |
918 | @@ -293,10 +323,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
919 | memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); |
920 | disk_super->policy_hint_size = 0; |
921 | |
922 | - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, |
923 | - metadata_len); |
924 | - if (r < 0) |
925 | - goto bad_locked; |
926 | + __copy_sm_root(cmd, disk_super); |
927 | |
928 | disk_super->mapping_root = cpu_to_le64(cmd->root); |
929 | disk_super->hint_root = cpu_to_le64(cmd->hint_root); |
930 | @@ -313,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
931 | disk_super->write_misses = cpu_to_le32(0); |
932 | |
933 | return dm_tm_commit(cmd->tm, sblock); |
934 | - |
935 | -bad_locked: |
936 | - dm_bm_unlock(sblock); |
937 | - return r; |
938 | } |
939 | |
940 | static int __format_metadata(struct dm_cache_metadata *cmd) |
941 | @@ -530,8 +553,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd, |
942 | disk_super = dm_block_data(sblock); |
943 | update_flags(disk_super, mutator); |
944 | read_superblock_fields(cmd, disk_super); |
945 | + dm_bm_unlock(sblock); |
946 | |
947 | - return dm_bm_flush_and_unlock(cmd->bm, sblock); |
948 | + return dm_bm_flush(cmd->bm); |
949 | } |
950 | |
951 | static int __begin_transaction(struct dm_cache_metadata *cmd) |
952 | @@ -559,7 +583,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, |
953 | flags_mutator mutator) |
954 | { |
955 | int r; |
956 | - size_t metadata_len; |
957 | struct cache_disk_superblock *disk_super; |
958 | struct dm_block *sblock; |
959 | |
960 | @@ -577,8 +600,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, |
961 | if (r < 0) |
962 | return r; |
963 | |
964 | - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); |
965 | - if (r < 0) |
966 | + r = __save_sm_root(cmd); |
967 | + if (r) |
968 | return r; |
969 | |
970 | r = superblock_lock(cmd, &sblock); |
971 | @@ -605,13 +628,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, |
972 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); |
973 | disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits); |
974 | disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); |
975 | - |
976 | - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, |
977 | - metadata_len); |
978 | - if (r < 0) { |
979 | - dm_bm_unlock(sblock); |
980 | - return r; |
981 | - } |
982 | + __copy_sm_root(cmd, disk_super); |
983 | |
984 | return dm_tm_commit(cmd->tm, sblock); |
985 | } |
986 | @@ -1228,22 +1245,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po |
987 | return 0; |
988 | } |
989 | |
990 | -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) |
991 | +static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) |
992 | { |
993 | + struct dm_cache_metadata *cmd = context; |
994 | + __le32 value = cpu_to_le32(hint); |
995 | int r; |
996 | |
997 | - down_write(&cmd->root_lock); |
998 | - r = begin_hints(cmd, policy); |
999 | - up_write(&cmd->root_lock); |
1000 | - |
1001 | - return r; |
1002 | -} |
1003 | - |
1004 | -static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, |
1005 | - uint32_t hint) |
1006 | -{ |
1007 | - int r; |
1008 | - __le32 value = cpu_to_le32(hint); |
1009 | __dm_bless_for_disk(&value); |
1010 | |
1011 | r = dm_array_set_value(&cmd->hint_info, cmd->hint_root, |
1012 | @@ -1253,16 +1260,25 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, |
1013 | return r; |
1014 | } |
1015 | |
1016 | -int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, |
1017 | - uint32_t hint) |
1018 | +static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) |
1019 | { |
1020 | int r; |
1021 | |
1022 | - if (!hints_array_initialized(cmd)) |
1023 | - return 0; |
1024 | + r = begin_hints(cmd, policy); |
1025 | + if (r) { |
1026 | + DMERR("begin_hints failed"); |
1027 | + return r; |
1028 | + } |
1029 | + |
1030 | + return policy_walk_mappings(policy, save_hint, cmd); |
1031 | +} |
1032 | + |
1033 | +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) |
1034 | +{ |
1035 | + int r; |
1036 | |
1037 | down_write(&cmd->root_lock); |
1038 | - r = save_hint(cmd, cblock, hint); |
1039 | + r = write_hints(cmd, policy); |
1040 | up_write(&cmd->root_lock); |
1041 | |
1042 | return r; |
1043 | diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h |
1044 | index cd906f14f98d..f0fb1dd26524 100644 |
1045 | --- a/drivers/md/dm-cache-metadata.h |
1046 | +++ b/drivers/md/dm-cache-metadata.h |
1047 | @@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd); |
1048 | * rather than querying the policy for each cblock, we let it walk its data |
1049 | * structures and fill in the hints in whatever order it wishes. |
1050 | */ |
1051 | - |
1052 | -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); |
1053 | - |
1054 | -/* |
1055 | - * requests hints for every cblock and stores in the metadata device. |
1056 | - */ |
1057 | -int dm_cache_save_hint(struct dm_cache_metadata *cmd, |
1058 | - dm_cblock_t cblock, uint32_t hint); |
1059 | +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); |
1060 | |
1061 | /* |
1062 | * Query method. Are all the blocks in the cache clean? |
1063 | diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c |
1064 | index 074b9c8e4cf0..b82b58f0bb85 100644 |
1065 | --- a/drivers/md/dm-cache-target.c |
1066 | +++ b/drivers/md/dm-cache-target.c |
1067 | @@ -239,7 +239,7 @@ struct cache { |
1068 | */ |
1069 | dm_dblock_t discard_nr_blocks; |
1070 | unsigned long *discard_bitset; |
1071 | - uint32_t discard_block_size; /* a power of 2 times sectors per block */ |
1072 | + uint32_t discard_block_size; |
1073 | |
1074 | /* |
1075 | * Rather than reconstructing the table line for the status we just |
1076 | @@ -2171,35 +2171,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, |
1077 | return 0; |
1078 | } |
1079 | |
1080 | -/* |
1081 | - * We want the discard block size to be a power of two, at least the size |
1082 | - * of the cache block size, and have no more than 2^14 discard blocks |
1083 | - * across the origin. |
1084 | - */ |
1085 | -#define MAX_DISCARD_BLOCKS (1 << 14) |
1086 | - |
1087 | -static bool too_many_discard_blocks(sector_t discard_block_size, |
1088 | - sector_t origin_size) |
1089 | -{ |
1090 | - (void) sector_div(origin_size, discard_block_size); |
1091 | - |
1092 | - return origin_size > MAX_DISCARD_BLOCKS; |
1093 | -} |
1094 | - |
1095 | -static sector_t calculate_discard_block_size(sector_t cache_block_size, |
1096 | - sector_t origin_size) |
1097 | -{ |
1098 | - sector_t discard_block_size; |
1099 | - |
1100 | - discard_block_size = roundup_pow_of_two(cache_block_size); |
1101 | - |
1102 | - if (origin_size) |
1103 | - while (too_many_discard_blocks(discard_block_size, origin_size)) |
1104 | - discard_block_size *= 2; |
1105 | - |
1106 | - return discard_block_size; |
1107 | -} |
1108 | - |
1109 | #define DEFAULT_MIGRATION_THRESHOLD 2048 |
1110 | |
1111 | static int cache_create(struct cache_args *ca, struct cache **result) |
1112 | @@ -2321,9 +2292,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) |
1113 | } |
1114 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); |
1115 | |
1116 | - cache->discard_block_size = |
1117 | - calculate_discard_block_size(cache->sectors_per_block, |
1118 | - cache->origin_sectors); |
1119 | + cache->discard_block_size = cache->sectors_per_block; |
1120 | cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); |
1121 | cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); |
1122 | if (!cache->discard_bitset) { |
1123 | @@ -2631,30 +2600,6 @@ static int write_discard_bitset(struct cache *cache) |
1124 | return 0; |
1125 | } |
1126 | |
1127 | -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, |
1128 | - uint32_t hint) |
1129 | -{ |
1130 | - struct cache *cache = context; |
1131 | - return dm_cache_save_hint(cache->cmd, cblock, hint); |
1132 | -} |
1133 | - |
1134 | -static int write_hints(struct cache *cache) |
1135 | -{ |
1136 | - int r; |
1137 | - |
1138 | - r = dm_cache_begin_hints(cache->cmd, cache->policy); |
1139 | - if (r) { |
1140 | - DMERR("dm_cache_begin_hints failed"); |
1141 | - return r; |
1142 | - } |
1143 | - |
1144 | - r = policy_walk_mappings(cache->policy, save_hint, cache); |
1145 | - if (r) |
1146 | - DMERR("policy_walk_mappings failed"); |
1147 | - |
1148 | - return r; |
1149 | -} |
1150 | - |
1151 | /* |
1152 | * returns true on success |
1153 | */ |
1154 | @@ -2672,7 +2617,7 @@ static bool sync_metadata(struct cache *cache) |
1155 | |
1156 | save_stats(cache); |
1157 | |
1158 | - r3 = write_hints(cache); |
1159 | + r3 = dm_cache_write_hints(cache->cmd, cache->policy); |
1160 | if (r3) |
1161 | DMERR("could not write hints"); |
1162 | |
1163 | @@ -3120,7 +3065,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) |
1164 | /* |
1165 | * FIXME: these limits may be incompatible with the cache device |
1166 | */ |
1167 | - limits->max_discard_sectors = cache->discard_block_size * 1024; |
1168 | + limits->max_discard_sectors = cache->discard_block_size; |
1169 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; |
1170 | } |
1171 | |
1172 | @@ -3145,7 +3090,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) |
1173 | |
1174 | static struct target_type cache_target = { |
1175 | .name = "cache", |
1176 | - .version = {1, 3, 0}, |
1177 | + .version = {1, 4, 0}, |
1178 | .module = THIS_MODULE, |
1179 | .ctr = cache_ctr, |
1180 | .dtr = cache_dtr, |
1181 | diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c |
1182 | index fb9efc829182..b086a945edcb 100644 |
1183 | --- a/drivers/md/dm-thin-metadata.c |
1184 | +++ b/drivers/md/dm-thin-metadata.c |
1185 | @@ -192,6 +192,13 @@ struct dm_pool_metadata { |
1186 | * operation possible in this state is the closing of the device. |
1187 | */ |
1188 | bool fail_io:1; |
1189 | + |
1190 | + /* |
1191 | + * Reading the space map roots can fail, so we read it into these |
1192 | + * buffers before the superblock is locked and updated. |
1193 | + */ |
1194 | + __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; |
1195 | + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; |
1196 | }; |
1197 | |
1198 | struct dm_thin_device { |
1199 | @@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) |
1200 | pmd->details_info.value_type.equal = NULL; |
1201 | } |
1202 | |
1203 | +static int save_sm_roots(struct dm_pool_metadata *pmd) |
1204 | +{ |
1205 | + int r; |
1206 | + size_t len; |
1207 | + |
1208 | + r = dm_sm_root_size(pmd->metadata_sm, &len); |
1209 | + if (r < 0) |
1210 | + return r; |
1211 | + |
1212 | + r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); |
1213 | + if (r < 0) |
1214 | + return r; |
1215 | + |
1216 | + r = dm_sm_root_size(pmd->data_sm, &len); |
1217 | + if (r < 0) |
1218 | + return r; |
1219 | + |
1220 | + return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); |
1221 | +} |
1222 | + |
1223 | +static void copy_sm_roots(struct dm_pool_metadata *pmd, |
1224 | + struct thin_disk_superblock *disk) |
1225 | +{ |
1226 | + memcpy(&disk->metadata_space_map_root, |
1227 | + &pmd->metadata_space_map_root, |
1228 | + sizeof(pmd->metadata_space_map_root)); |
1229 | + |
1230 | + memcpy(&disk->data_space_map_root, |
1231 | + &pmd->data_space_map_root, |
1232 | + sizeof(pmd->data_space_map_root)); |
1233 | +} |
1234 | + |
1235 | static int __write_initial_superblock(struct dm_pool_metadata *pmd) |
1236 | { |
1237 | int r; |
1238 | struct dm_block *sblock; |
1239 | - size_t metadata_len, data_len; |
1240 | struct thin_disk_superblock *disk_super; |
1241 | sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; |
1242 | |
1243 | if (bdev_size > THIN_METADATA_MAX_SECTORS) |
1244 | bdev_size = THIN_METADATA_MAX_SECTORS; |
1245 | |
1246 | - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); |
1247 | - if (r < 0) |
1248 | - return r; |
1249 | - |
1250 | - r = dm_sm_root_size(pmd->data_sm, &data_len); |
1251 | + r = dm_sm_commit(pmd->data_sm); |
1252 | if (r < 0) |
1253 | return r; |
1254 | |
1255 | - r = dm_sm_commit(pmd->data_sm); |
1256 | + r = save_sm_roots(pmd); |
1257 | if (r < 0) |
1258 | return r; |
1259 | |
1260 | @@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) |
1261 | disk_super->trans_id = 0; |
1262 | disk_super->held_root = 0; |
1263 | |
1264 | - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, |
1265 | - metadata_len); |
1266 | - if (r < 0) |
1267 | - goto bad_locked; |
1268 | - |
1269 | - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, |
1270 | - data_len); |
1271 | - if (r < 0) |
1272 | - goto bad_locked; |
1273 | + copy_sm_roots(pmd, disk_super); |
1274 | |
1275 | disk_super->data_mapping_root = cpu_to_le64(pmd->root); |
1276 | disk_super->device_details_root = cpu_to_le64(pmd->details_root); |
1277 | @@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) |
1278 | disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); |
1279 | |
1280 | return dm_tm_commit(pmd->tm, sblock); |
1281 | - |
1282 | -bad_locked: |
1283 | - dm_bm_unlock(sblock); |
1284 | - return r; |
1285 | } |
1286 | |
1287 | static int __format_metadata(struct dm_pool_metadata *pmd) |
1288 | @@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) |
1289 | if (r < 0) |
1290 | return r; |
1291 | |
1292 | + r = save_sm_roots(pmd); |
1293 | + if (r < 0) |
1294 | + return r; |
1295 | + |
1296 | r = superblock_lock(pmd, &sblock); |
1297 | if (r) |
1298 | return r; |
1299 | @@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) |
1300 | disk_super->trans_id = cpu_to_le64(pmd->trans_id); |
1301 | disk_super->flags = cpu_to_le32(pmd->flags); |
1302 | |
1303 | - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, |
1304 | - metadata_len); |
1305 | - if (r < 0) |
1306 | - goto out_locked; |
1307 | - |
1308 | - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, |
1309 | - data_len); |
1310 | - if (r < 0) |
1311 | - goto out_locked; |
1312 | + copy_sm_roots(pmd, disk_super); |
1313 | |
1314 | return dm_tm_commit(pmd->tm, sblock); |
1315 | - |
1316 | -out_locked: |
1317 | - dm_bm_unlock(sblock); |
1318 | - return r; |
1319 | } |
1320 | |
1321 | struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, |
1322 | diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c |
1323 | index be70d38745f7..60cc50685c14 100644 |
1324 | --- a/drivers/md/dm-thin.c |
1325 | +++ b/drivers/md/dm-thin.c |
1326 | @@ -1392,9 +1392,9 @@ static void process_deferred_bios(struct pool *pool) |
1327 | */ |
1328 | if (ensure_next_mapping(pool)) { |
1329 | spin_lock_irqsave(&pool->lock, flags); |
1330 | + bio_list_add(&pool->deferred_bios, bio); |
1331 | bio_list_merge(&pool->deferred_bios, &bios); |
1332 | spin_unlock_irqrestore(&pool->lock, flags); |
1333 | - |
1334 | break; |
1335 | } |
1336 | |
1337 | diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c |
1338 | index 455f79279a16..087411c95ffc 100644 |
1339 | --- a/drivers/md/persistent-data/dm-block-manager.c |
1340 | +++ b/drivers/md/persistent-data/dm-block-manager.c |
1341 | @@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b) |
1342 | } |
1343 | EXPORT_SYMBOL_GPL(dm_bm_unlock); |
1344 | |
1345 | -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, |
1346 | - struct dm_block *superblock) |
1347 | +int dm_bm_flush(struct dm_block_manager *bm) |
1348 | { |
1349 | - int r; |
1350 | - |
1351 | if (bm->read_only) |
1352 | return -EPERM; |
1353 | |
1354 | - r = dm_bufio_write_dirty_buffers(bm->bufio); |
1355 | - if (unlikely(r)) { |
1356 | - dm_bm_unlock(superblock); |
1357 | - return r; |
1358 | - } |
1359 | - |
1360 | - dm_bm_unlock(superblock); |
1361 | - |
1362 | return dm_bufio_write_dirty_buffers(bm->bufio); |
1363 | } |
1364 | -EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); |
1365 | +EXPORT_SYMBOL_GPL(dm_bm_flush); |
1366 | |
1367 | void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) |
1368 | { |
1369 | diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h |
1370 | index 13cd58e1fe69..1b95dfc17786 100644 |
1371 | --- a/drivers/md/persistent-data/dm-block-manager.h |
1372 | +++ b/drivers/md/persistent-data/dm-block-manager.h |
1373 | @@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b); |
1374 | * |
1375 | * This method always blocks. |
1376 | */ |
1377 | -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, |
1378 | - struct dm_block *superblock); |
1379 | +int dm_bm_flush(struct dm_block_manager *bm); |
1380 | |
1381 | /* |
1382 | * Request data is prefetched into the cache. |
1383 | diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c |
1384 | index 81da1a26042e..3bc30a0ae3d6 100644 |
1385 | --- a/drivers/md/persistent-data/dm-transaction-manager.c |
1386 | +++ b/drivers/md/persistent-data/dm-transaction-manager.c |
1387 | @@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm) |
1388 | if (r < 0) |
1389 | return r; |
1390 | |
1391 | - return 0; |
1392 | + return dm_bm_flush(tm->bm); |
1393 | } |
1394 | EXPORT_SYMBOL_GPL(dm_tm_pre_commit); |
1395 | |
1396 | @@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root) |
1397 | return -EWOULDBLOCK; |
1398 | |
1399 | wipe_shadow_table(tm); |
1400 | + dm_bm_unlock(root); |
1401 | |
1402 | - return dm_bm_flush_and_unlock(tm->bm, root); |
1403 | + return dm_bm_flush(tm->bm); |
1404 | } |
1405 | EXPORT_SYMBOL_GPL(dm_tm_commit); |
1406 | |
1407 | diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h |
1408 | index b5b139076ca5..2772ed2a781a 100644 |
1409 | --- a/drivers/md/persistent-data/dm-transaction-manager.h |
1410 | +++ b/drivers/md/persistent-data/dm-transaction-manager.h |
1411 | @@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac |
1412 | /* |
1413 | * We use a 2-phase commit here. |
1414 | * |
1415 | - * i) In the first phase the block manager is told to start flushing, and |
1416 | - * the changes to the space map are written to disk. You should interrogate |
1417 | - * your particular space map to get detail of its root node etc. to be |
1418 | - * included in your superblock. |
1419 | + * i) Make all changes for the transaction *except* for the superblock. |
1420 | + * Then call dm_tm_pre_commit() to flush them to disk. |
1421 | * |
1422 | - * ii) @root will be committed last. You shouldn't use more than the |
1423 | - * first 512 bytes of @root if you wish the transaction to survive a power |
1424 | - * failure. You *must* have a write lock held on @root for both stage (i) |
1425 | - * and (ii). The commit will drop the write lock. |
1426 | + * ii) Lock your superblock. Update. Then call dm_tm_commit() which will |
1427 | + * unlock the superblock and flush it. No other blocks should be updated |
1428 | + * during this period. Care should be taken to never unlock a partially |
1429 | + * updated superblock; perform any operations that could fail *before* you |
1430 | + * take the superblock lock. |
1431 | */ |
1432 | int dm_tm_pre_commit(struct dm_transaction_manager *tm); |
1433 | -int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root); |
1434 | +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock); |
1435 | |
1436 | /* |
1437 | * These methods are the only way to get hold of a writeable block. |
1438 | diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c |
1439 | index 7a190fe4dff1..e5565aa5ed36 100644 |
1440 | --- a/drivers/mmc/host/sdhci-bcm-kona.c |
1441 | +++ b/drivers/mmc/host/sdhci-bcm-kona.c |
1442 | @@ -314,7 +314,7 @@ err_pltfm_free: |
1443 | return ret; |
1444 | } |
1445 | |
1446 | -static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev) |
1447 | +static int sdhci_bcm_kona_remove(struct platform_device *pdev) |
1448 | { |
1449 | return sdhci_pltfm_unregister(pdev); |
1450 | } |
1451 | diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c |
1452 | index c36e9b84487c..2c059890a394 100644 |
1453 | --- a/drivers/mtd/nand/atmel_nand.c |
1454 | +++ b/drivers/mtd/nand/atmel_nand.c |
1455 | @@ -1220,6 +1220,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, |
1456 | goto err; |
1457 | } |
1458 | |
1459 | + nand_chip->options |= NAND_NO_SUBPAGE_WRITE; |
1460 | nand_chip->ecc.read_page = atmel_nand_pmecc_read_page; |
1461 | nand_chip->ecc.write_page = atmel_nand_pmecc_write_page; |
1462 | |
1463 | diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c |
1464 | index fec31d71b84e..fe8a020ecf25 100644 |
1465 | --- a/drivers/mtd/nand/diskonchip.c |
1466 | +++ b/drivers/mtd/nand/diskonchip.c |
1467 | @@ -1438,7 +1438,7 @@ static int __init doc_probe(unsigned long physadr) |
1468 | int reg, len, numchips; |
1469 | int ret = 0; |
1470 | |
1471 | - if (!request_mem_region(physadr, DOC_IOREMAP_LEN, NULL)) |
1472 | + if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip")) |
1473 | return -EBUSY; |
1474 | virtadr = ioremap(physadr, DOC_IOREMAP_LEN); |
1475 | if (!virtadr) { |
1476 | diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c |
1477 | index 9ee09a8177c6..7360f7872d9a 100644 |
1478 | --- a/drivers/mtd/nand/nuc900_nand.c |
1479 | +++ b/drivers/mtd/nand/nuc900_nand.c |
1480 | @@ -225,7 +225,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand) |
1481 | val = __raw_readl(nand->reg + REG_FMICSR); |
1482 | |
1483 | if (!(val & NAND_EN)) |
1484 | - __raw_writel(val | NAND_EN, REG_FMICSR); |
1485 | + __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR); |
1486 | |
1487 | val = __raw_readl(nand->reg + REG_SMCSR); |
1488 | |
1489 | diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c |
1490 | index 4b8e89583f2a..cf49c22673b9 100644 |
1491 | --- a/drivers/mtd/sm_ftl.c |
1492 | +++ b/drivers/mtd/sm_ftl.c |
1493 | @@ -59,15 +59,12 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) |
1494 | struct attribute_group *attr_group; |
1495 | struct attribute **attributes; |
1496 | struct sm_sysfs_attribute *vendor_attribute; |
1497 | + char *vendor; |
1498 | |
1499 | - int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, |
1500 | - SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); |
1501 | - |
1502 | - char *vendor = kmalloc(vendor_len, GFP_KERNEL); |
1503 | + vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, |
1504 | + SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL); |
1505 | if (!vendor) |
1506 | goto error1; |
1507 | - memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); |
1508 | - vendor[vendor_len] = 0; |
1509 | |
1510 | /* Initialize sysfs attributes */ |
1511 | vendor_attribute = |
1512 | @@ -78,7 +75,7 @@ static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) |
1513 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); |
1514 | |
1515 | vendor_attribute->data = vendor; |
1516 | - vendor_attribute->len = vendor_len; |
1517 | + vendor_attribute->len = strlen(vendor); |
1518 | vendor_attribute->dev_attr.attr.name = "vendor"; |
1519 | vendor_attribute->dev_attr.attr.mode = S_IRUGO; |
1520 | vendor_attribute->dev_attr.show = sm_attr_show; |
1521 | diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c |
1522 | index 8d76fca7fde7..ca2dfbe01598 100644 |
1523 | --- a/drivers/net/ethernet/marvell/mvneta.c |
1524 | +++ b/drivers/net/ethernet/marvell/mvneta.c |
1525 | @@ -89,9 +89,8 @@ |
1526 | #define MVNETA_TX_IN_PRGRS BIT(1) |
1527 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
1528 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
1529 | -#define MVNETA_SERDES_CFG 0x24A0 |
1530 | +#define MVNETA_SGMII_SERDES_CFG 0x24A0 |
1531 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
1532 | -#define MVNETA_RGMII_SERDES_PROTO 0x0667 |
1533 | #define MVNETA_TYPE_PRIO 0x24bc |
1534 | #define MVNETA_FORCE_UNI BIT(21) |
1535 | #define MVNETA_TXQ_CMD_1 0x24e4 |
1536 | @@ -712,6 +711,35 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, |
1537 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); |
1538 | } |
1539 | |
1540 | + |
1541 | + |
1542 | +/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ |
1543 | +static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) |
1544 | +{ |
1545 | + u32 val; |
1546 | + |
1547 | + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
1548 | + |
1549 | + if (enable) |
1550 | + val |= MVNETA_GMAC2_PORT_RGMII; |
1551 | + else |
1552 | + val &= ~MVNETA_GMAC2_PORT_RGMII; |
1553 | + |
1554 | + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
1555 | +} |
1556 | + |
1557 | +/* Config SGMII port */ |
1558 | +static void mvneta_port_sgmii_config(struct mvneta_port *pp) |
1559 | +{ |
1560 | + u32 val; |
1561 | + |
1562 | + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
1563 | + val |= MVNETA_GMAC2_PCS_ENABLE; |
1564 | + mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
1565 | + |
1566 | + mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
1567 | +} |
1568 | + |
1569 | /* Start the Ethernet port RX and TX activity */ |
1570 | static void mvneta_port_up(struct mvneta_port *pp) |
1571 | { |
1572 | @@ -2729,15 +2757,12 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
1573 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
1574 | |
1575 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) |
1576 | - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
1577 | - else |
1578 | - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO); |
1579 | + mvneta_port_sgmii_config(pp); |
1580 | |
1581 | - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
1582 | - |
1583 | - val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
1584 | + mvneta_gmac_rgmii_set(pp, 1); |
1585 | |
1586 | /* Cancel Port Reset */ |
1587 | + val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
1588 | val &= ~MVNETA_GMAC2_PORT_RESET; |
1589 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
1590 | |
1591 | diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c |
1592 | index 55897d508a76..0526ddff977d 100644 |
1593 | --- a/drivers/net/wireless/ath/ath9k/xmit.c |
1594 | +++ b/drivers/net/wireless/ath/ath9k/xmit.c |
1595 | @@ -1698,7 +1698,7 @@ int ath_cabq_update(struct ath_softc *sc) |
1596 | |
1597 | ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); |
1598 | |
1599 | - qi.tqi_readyTime = (cur_conf->beacon_interval * |
1600 | + qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) * |
1601 | ATH_CABQ_READY_TIME) / 100; |
1602 | ath_txq_update(sc, qnum, &qi); |
1603 | |
1604 | diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c |
1605 | index 05ee7f10cc8f..24ccbe96e0c8 100644 |
1606 | --- a/drivers/net/wireless/b43/phy_n.c |
1607 | +++ b/drivers/net/wireless/b43/phy_n.c |
1608 | @@ -5176,22 +5176,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, |
1609 | int ch = new_channel->hw_value; |
1610 | |
1611 | u16 old_band_5ghz; |
1612 | - u32 tmp32; |
1613 | + u16 tmp16; |
1614 | |
1615 | old_band_5ghz = |
1616 | b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; |
1617 | if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { |
1618 | - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); |
1619 | - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); |
1620 | + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); |
1621 | + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); |
1622 | b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); |
1623 | - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); |
1624 | + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); |
1625 | b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); |
1626 | } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { |
1627 | b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); |
1628 | - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); |
1629 | - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); |
1630 | + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); |
1631 | + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); |
1632 | b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); |
1633 | - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); |
1634 | + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); |
1635 | } |
1636 | |
1637 | b43_chantab_phy_upload(dev, e); |
1638 | diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c |
1639 | index ba1b1ea54252..ea7e70cb34f0 100644 |
1640 | --- a/drivers/net/wireless/iwlwifi/dvm/main.c |
1641 | +++ b/drivers/net/wireless/iwlwifi/dvm/main.c |
1642 | @@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work) |
1643 | struct iwl_priv *priv = |
1644 | container_of(work, struct iwl_priv, bt_runtime_config); |
1645 | |
1646 | + mutex_lock(&priv->mutex); |
1647 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
1648 | - return; |
1649 | + goto out; |
1650 | |
1651 | /* dont send host command if rf-kill is on */ |
1652 | if (!iwl_is_ready_rf(priv)) |
1653 | - return; |
1654 | + goto out; |
1655 | + |
1656 | iwlagn_send_advance_bt_config(priv); |
1657 | +out: |
1658 | + mutex_unlock(&priv->mutex); |
1659 | } |
1660 | |
1661 | static void iwl_bg_bt_full_concurrency(struct work_struct *work) |
1662 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
1663 | index c35b8661b395..9a856e5031f1 100644 |
1664 | --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
1665 | +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
1666 | @@ -179,7 +179,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) |
1667 | !iwlwifi_mod_params.sw_crypto) |
1668 | hw->flags |= IEEE80211_HW_MFP_CAPABLE; |
1669 | |
1670 | - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { |
1671 | + if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { |
1672 | hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; |
1673 | hw->uapsd_queues = IWL_UAPSD_AC_INFO; |
1674 | hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; |
1675 | diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c |
1676 | index e06971be7df7..f923d8c9a296 100644 |
1677 | --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c |
1678 | +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c |
1679 | @@ -1025,9 +1025,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) |
1680 | bool rtstatus = true; |
1681 | int err = 0; |
1682 | u8 tmp_u1b, u1byte; |
1683 | + unsigned long flags; |
1684 | |
1685 | RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n"); |
1686 | rtlpriv->rtlhal.being_init_adapter = true; |
1687 | + /* As this function can take a very long time (up to 350 ms) |
1688 | + * and can be called with irqs disabled, reenable the irqs |
1689 | + * to let the other devices continue being serviced. |
1690 | + * |
1691 | + * It is safe doing so since our own interrupts will only be enabled |
1692 | + * in a subsequent step. |
1693 | + */ |
1694 | + local_save_flags(flags); |
1695 | + local_irq_enable(); |
1696 | + |
1697 | rtlpriv->intf_ops->disable_aspm(hw); |
1698 | |
1699 | tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1); |
1700 | @@ -1043,7 +1054,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) |
1701 | if (rtstatus != true) { |
1702 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); |
1703 | err = 1; |
1704 | - return err; |
1705 | + goto exit; |
1706 | } |
1707 | |
1708 | err = rtl88e_download_fw(hw, false); |
1709 | @@ -1051,8 +1062,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) |
1710 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
1711 | "Failed to download FW. Init HW without FW now..\n"); |
1712 | err = 1; |
1713 | - rtlhal->fw_ready = false; |
1714 | - return err; |
1715 | + goto exit; |
1716 | } else { |
1717 | rtlhal->fw_ready = true; |
1718 | } |
1719 | @@ -1135,10 +1145,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) |
1720 | } |
1721 | rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128)); |
1722 | rtl88e_dm_init(hw); |
1723 | +exit: |
1724 | + local_irq_restore(flags); |
1725 | rtlpriv->rtlhal.being_init_adapter = false; |
1726 | RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n", |
1727 | err); |
1728 | - return 0; |
1729 | + return err; |
1730 | } |
1731 | |
1732 | static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw) |
1733 | diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c |
1734 | index aece6c9cccf1..7622e44567cc 100644 |
1735 | --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c |
1736 | +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c |
1737 | @@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, |
1738 | u8 *psaddr; |
1739 | __le16 fc; |
1740 | u16 type, ufc; |
1741 | - bool match_bssid, packet_toself, packet_beacon, addr; |
1742 | + bool match_bssid, packet_toself, packet_beacon = false, addr; |
1743 | |
1744 | tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; |
1745 | |
1746 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c |
1747 | index 468bf73cc883..cd17c642e746 100644 |
1748 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c |
1749 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c |
1750 | @@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) |
1751 | struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); |
1752 | int err = 0; |
1753 | static bool iqk_initialized; |
1754 | + unsigned long flags; |
1755 | + |
1756 | + /* As this function can take a very long time (up to 350 ms) |
1757 | + * and can be called with irqs disabled, reenable the irqs |
1758 | + * to let the other devices continue being serviced. |
1759 | + * |
1760 | + * It is safe doing so since our own interrupts will only be enabled |
1761 | + * in a subsequent step. |
1762 | + */ |
1763 | + local_save_flags(flags); |
1764 | + local_irq_enable(); |
1765 | |
1766 | rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; |
1767 | err = _rtl92cu_init_mac(hw); |
1768 | @@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) |
1769 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
1770 | "Failed to download FW. Init HW without FW now..\n"); |
1771 | err = 1; |
1772 | - return err; |
1773 | + goto exit; |
1774 | } |
1775 | rtlhal->last_hmeboxnum = 0; /* h2c */ |
1776 | _rtl92cu_phy_param_tab_init(hw); |
1777 | @@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) |
1778 | _InitPABias(hw); |
1779 | _update_mac_setting(hw); |
1780 | rtl92c_dm_init(hw); |
1781 | +exit: |
1782 | + local_irq_restore(flags); |
1783 | return err; |
1784 | } |
1785 | |
1786 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c |
1787 | index 4f461786a7eb..c471400fe8f0 100644 |
1788 | --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c |
1789 | +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c |
1790 | @@ -955,7 +955,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1791 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
1792 | struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); |
1793 | u8 tmp_byte = 0; |
1794 | - |
1795 | + unsigned long flags; |
1796 | bool rtstatus = true; |
1797 | u8 tmp_u1b; |
1798 | int err = false; |
1799 | @@ -967,6 +967,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1800 | |
1801 | rtlpci->being_init_adapter = true; |
1802 | |
1803 | + /* As this function can take a very long time (up to 350 ms) |
1804 | + * and can be called with irqs disabled, reenable the irqs |
1805 | + * to let the other devices continue being serviced. |
1806 | + * |
1807 | + * It is safe doing so since our own interrupts will only be enabled |
1808 | + * in a subsequent step. |
1809 | + */ |
1810 | + local_save_flags(flags); |
1811 | + local_irq_enable(); |
1812 | + |
1813 | rtlpriv->intf_ops->disable_aspm(hw); |
1814 | |
1815 | /* 1. MAC Initialize */ |
1816 | @@ -984,7 +994,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1817 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
1818 | "Failed to download FW. Init HW without FW now... " |
1819 | "Please copy FW into /lib/firmware/rtlwifi\n"); |
1820 | - return 1; |
1821 | + err = 1; |
1822 | + goto exit; |
1823 | } |
1824 | |
1825 | /* After FW download, we have to reset MAC register */ |
1826 | @@ -997,7 +1008,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1827 | /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */ |
1828 | if (!rtl92s_phy_mac_config(hw)) { |
1829 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n"); |
1830 | - return rtstatus; |
1831 | + err = rtstatus; |
1832 | + goto exit; |
1833 | } |
1834 | |
1835 | /* because last function modify RCR, so we update |
1836 | @@ -1016,7 +1028,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1837 | /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */ |
1838 | if (!rtl92s_phy_bb_config(hw)) { |
1839 | RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n"); |
1840 | - return rtstatus; |
1841 | + err = rtstatus; |
1842 | + goto exit; |
1843 | } |
1844 | |
1845 | /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */ |
1846 | @@ -1033,7 +1046,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1847 | |
1848 | if (!rtl92s_phy_rf_config(hw)) { |
1849 | RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n"); |
1850 | - return rtstatus; |
1851 | + err = rtstatus; |
1852 | + goto exit; |
1853 | } |
1854 | |
1855 | /* After read predefined TXT, we must set BB/MAC/RF |
1856 | @@ -1122,8 +1136,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw) |
1857 | |
1858 | rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON); |
1859 | rtl92s_dm_init(hw); |
1860 | +exit: |
1861 | + local_irq_restore(flags); |
1862 | rtlpci->being_init_adapter = false; |
1863 | - |
1864 | return err; |
1865 | } |
1866 | |
1867 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c |
1868 | index 27efbcdac6a9..d27abef140f0 100644 |
1869 | --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c |
1870 | +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c |
1871 | @@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue) |
1872 | if (ieee80211_is_nullfunc(fc)) |
1873 | return QSLT_HIGH; |
1874 | |
1875 | + /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use |
1876 | + * queue V0 at priority 7; however, the RTL8192SE appears to have |
1877 | + * that queue at priority 6 |
1878 | + */ |
1879 | + if (skb->priority == 7) |
1880 | + return QSLT_VO; |
1881 | return skb->priority; |
1882 | } |
1883 | |
1884 | diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c |
1885 | index c333dfd116b8..99f6bc5fa986 100644 |
1886 | --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c |
1887 | +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c |
1888 | @@ -880,14 +880,25 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) |
1889 | bool rtstatus = true; |
1890 | int err; |
1891 | u8 tmp_u1b; |
1892 | + unsigned long flags; |
1893 | |
1894 | rtlpriv->rtlhal.being_init_adapter = true; |
1895 | + /* As this function can take a very long time (up to 350 ms) |
1896 | + * and can be called with irqs disabled, reenable the irqs |
1897 | + * to let the other devices continue being serviced. |
1898 | + * |
1899 | + * It is safe doing so since our own interrupts will only be enabled |
1900 | + * in a subsequent step. |
1901 | + */ |
1902 | + local_save_flags(flags); |
1903 | + local_irq_enable(); |
1904 | + |
1905 | rtlpriv->intf_ops->disable_aspm(hw); |
1906 | rtstatus = _rtl8712e_init_mac(hw); |
1907 | if (rtstatus != true) { |
1908 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); |
1909 | err = 1; |
1910 | - return err; |
1911 | + goto exit; |
1912 | } |
1913 | |
1914 | err = rtl8723ae_download_fw(hw); |
1915 | @@ -895,8 +906,7 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) |
1916 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
1917 | "Failed to download FW. Init HW without FW now..\n"); |
1918 | err = 1; |
1919 | - rtlhal->fw_ready = false; |
1920 | - return err; |
1921 | + goto exit; |
1922 | } else { |
1923 | rtlhal->fw_ready = true; |
1924 | } |
1925 | @@ -971,6 +981,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) |
1926 | RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); |
1927 | } |
1928 | rtl8723ae_dm_init(hw); |
1929 | +exit: |
1930 | + local_irq_restore(flags); |
1931 | rtlpriv->rtlhal.being_init_adapter = false; |
1932 | return err; |
1933 | } |
1934 | diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c |
1935 | index 92ed4b2e3c07..c862f9c0e9ce 100644 |
1936 | --- a/drivers/pinctrl/pinctrl-as3722.c |
1937 | +++ b/drivers/pinctrl/pinctrl-as3722.c |
1938 | @@ -64,7 +64,6 @@ struct as3722_pin_function { |
1939 | }; |
1940 | |
1941 | struct as3722_gpio_pin_control { |
1942 | - bool enable_gpio_invert; |
1943 | unsigned mode_prop; |
1944 | int io_function; |
1945 | }; |
1946 | @@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev, |
1947 | return mode; |
1948 | } |
1949 | |
1950 | - if (as_pci->gpio_control[offset].enable_gpio_invert) |
1951 | - mode |= AS3722_GPIO_INV; |
1952 | - |
1953 | - return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode); |
1954 | + return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset), |
1955 | + AS3722_GPIO_MODE_MASK, mode); |
1956 | } |
1957 | |
1958 | static const struct pinmux_ops as3722_pinmux_ops = { |
1959 | @@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset, |
1960 | { |
1961 | struct as3722_pctrl_info *as_pci = to_as_pci(chip); |
1962 | struct as3722 *as3722 = as_pci->as3722; |
1963 | - int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; |
1964 | + int en_invert; |
1965 | u32 val; |
1966 | int ret; |
1967 | |
1968 | + ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val); |
1969 | + if (ret < 0) { |
1970 | + dev_err(as_pci->dev, |
1971 | + "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret); |
1972 | + return; |
1973 | + } |
1974 | + en_invert = !!(val & AS3722_GPIO_INV); |
1975 | + |
1976 | if (value) |
1977 | val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); |
1978 | else |
1979 | diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c |
1980 | index defb6afc1409..e2a91c845ac9 100644 |
1981 | --- a/drivers/platform/x86/thinkpad_acpi.c |
1982 | +++ b/drivers/platform/x86/thinkpad_acpi.c |
1983 | @@ -8447,9 +8447,21 @@ static void mute_led_exit(void) |
1984 | tpacpi_led_set(i, false); |
1985 | } |
1986 | |
1987 | +static void mute_led_resume(void) |
1988 | +{ |
1989 | + int i; |
1990 | + |
1991 | + for (i = 0; i < TPACPI_LED_MAX; i++) { |
1992 | + struct tp_led_table *t = &led_tables[i]; |
1993 | + if (t->state >= 0) |
1994 | + mute_led_on_off(t, t->state); |
1995 | + } |
1996 | +} |
1997 | + |
1998 | static struct ibm_struct mute_led_driver_data = { |
1999 | .name = "mute_led", |
2000 | .exit = mute_led_exit, |
2001 | + .resume = mute_led_resume, |
2002 | }; |
2003 | |
2004 | /**************************************************************************** |
2005 | diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c |
2006 | index 9f0ea6cb6922..e3bf885f4a6c 100644 |
2007 | --- a/drivers/s390/cio/chsc.c |
2008 | +++ b/drivers/s390/cio/chsc.c |
2009 | @@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) |
2010 | |
2011 | static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) |
2012 | { |
2013 | - do { |
2014 | + static int ntsm_unsupported; |
2015 | + |
2016 | + while (true) { |
2017 | memset(sei, 0, sizeof(*sei)); |
2018 | sei->request.length = 0x0010; |
2019 | sei->request.code = 0x000e; |
2020 | - sei->ntsm = ntsm; |
2021 | + if (!ntsm_unsupported) |
2022 | + sei->ntsm = ntsm; |
2023 | |
2024 | if (chsc(sei)) |
2025 | break; |
2026 | |
2027 | if (sei->response.code != 0x0001) { |
2028 | - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
2029 | - sei->response.code); |
2030 | + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", |
2031 | + sei->response.code, sei->ntsm); |
2032 | + |
2033 | + if (sei->response.code == 3 && sei->ntsm) { |
2034 | + /* Fallback for old firmware. */ |
2035 | + ntsm_unsupported = 1; |
2036 | + continue; |
2037 | + } |
2038 | break; |
2039 | } |
2040 | |
2041 | @@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) |
2042 | CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); |
2043 | break; |
2044 | } |
2045 | - } while (sei->u.nt0_area.flags & 0x80); |
2046 | + |
2047 | + if (!(sei->u.nt0_area.flags & 0x80)) |
2048 | + break; |
2049 | + } |
2050 | } |
2051 | |
2052 | /* |
2053 | diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
2054 | index 7f0af4fcc001..6fd7d40b2c4d 100644 |
2055 | --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
2056 | +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
2057 | @@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) |
2058 | |
2059 | mpt2sas_base_free_resources(ioc); |
2060 | pci_save_state(pdev); |
2061 | - pci_disable_device(pdev); |
2062 | pci_set_power_state(pdev, device_state); |
2063 | return 0; |
2064 | } |
2065 | diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c |
2066 | index 16bfd50cd3fe..db3b494e5926 100644 |
2067 | --- a/drivers/scsi/virtio_scsi.c |
2068 | +++ b/drivers/scsi/virtio_scsi.c |
2069 | @@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) |
2070 | |
2071 | vscsi->affinity_hint_set = true; |
2072 | } else { |
2073 | - for (i = 0; i < vscsi->num_queues; i++) |
2074 | + for (i = 0; i < vscsi->num_queues; i++) { |
2075 | + if (!vscsi->req_vqs[i].vq) |
2076 | + continue; |
2077 | + |
2078 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); |
2079 | + } |
2080 | |
2081 | vscsi->affinity_hint_set = false; |
2082 | } |
2083 | diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c |
2084 | index 94f9e3a38412..0ff7fda0742f 100644 |
2085 | --- a/drivers/tty/hvc/hvc_console.c |
2086 | +++ b/drivers/tty/hvc/hvc_console.c |
2087 | @@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index) |
2088 | return hvc_driver; |
2089 | } |
2090 | |
2091 | -static int __init hvc_console_setup(struct console *co, char *options) |
2092 | +static int hvc_console_setup(struct console *co, char *options) |
2093 | { |
2094 | if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) |
2095 | return -ENODEV; |
2096 | diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c |
2097 | index d15624c1b751..e36d1f5ca191 100644 |
2098 | --- a/drivers/tty/n_tty.c |
2099 | +++ b/drivers/tty/n_tty.c |
2100 | @@ -2356,8 +2356,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, |
2101 | if (tty->ops->flush_chars) |
2102 | tty->ops->flush_chars(tty); |
2103 | } else { |
2104 | + struct n_tty_data *ldata = tty->disc_data; |
2105 | + |
2106 | while (nr > 0) { |
2107 | + mutex_lock(&ldata->output_lock); |
2108 | c = tty->ops->write(tty, b, nr); |
2109 | + mutex_unlock(&ldata->output_lock); |
2110 | if (c < 0) { |
2111 | retval = c; |
2112 | goto break_out; |
2113 | diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c |
2114 | index 69932b7556cf..2798a23a0834 100644 |
2115 | --- a/drivers/tty/serial/8250/8250_core.c |
2116 | +++ b/drivers/tty/serial/8250/8250_core.c |
2117 | @@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) |
2118 | */ |
2119 | if ((p->port.type == PORT_XR17V35X) || |
2120 | (p->port.type == PORT_XR17D15X)) { |
2121 | - serial_out(p, UART_EXAR_SLEEP, 0xff); |
2122 | + serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0); |
2123 | return; |
2124 | } |
2125 | |
2126 | diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c |
2127 | index 8ebd9f88a6f6..cf78d1985cd8 100644 |
2128 | --- a/drivers/tty/tty_buffer.c |
2129 | +++ b/drivers/tty/tty_buffer.c |
2130 | @@ -258,7 +258,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, |
2131 | n->flags = flags; |
2132 | buf->tail = n; |
2133 | b->commit = b->used; |
2134 | - smp_mb(); |
2135 | + /* paired w/ barrier in flush_to_ldisc(); ensures the |
2136 | + * latest commit value can be read before the head is |
2137 | + * advanced to the next buffer |
2138 | + */ |
2139 | + smp_wmb(); |
2140 | b->next = n; |
2141 | } else if (change) |
2142 | size = 0; |
2143 | @@ -444,17 +448,24 @@ static void flush_to_ldisc(struct work_struct *work) |
2144 | |
2145 | while (1) { |
2146 | struct tty_buffer *head = buf->head; |
2147 | + struct tty_buffer *next; |
2148 | int count; |
2149 | |
2150 | /* Ldisc or user is trying to gain exclusive access */ |
2151 | if (atomic_read(&buf->priority)) |
2152 | break; |
2153 | |
2154 | + next = head->next; |
2155 | + /* paired w/ barrier in __tty_buffer_request_room(); |
2156 | + * ensures commit value read is not stale if the head |
2157 | + * is advancing to the next buffer |
2158 | + */ |
2159 | + smp_rmb(); |
2160 | count = head->commit - head->read; |
2161 | if (!count) { |
2162 | - if (head->next == NULL) |
2163 | + if (next == NULL) |
2164 | break; |
2165 | - buf->head = head->next; |
2166 | + buf->head = next; |
2167 | tty_buffer_free(port, head); |
2168 | continue; |
2169 | } |
2170 | diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c |
2171 | index e45833ce975b..182bd680141f 100644 |
2172 | --- a/drivers/video/aty/mach64_accel.c |
2173 | +++ b/drivers/video/aty/mach64_accel.c |
2174 | @@ -4,6 +4,7 @@ |
2175 | */ |
2176 | |
2177 | #include <linux/delay.h> |
2178 | +#include <asm/unaligned.h> |
2179 | #include <linux/fb.h> |
2180 | #include <video/mach64.h> |
2181 | #include "atyfb.h" |
2182 | @@ -419,7 +420,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
2183 | u32 *pbitmap, dwords = (src_bytes + 3) / 4; |
2184 | for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) { |
2185 | wait_for_fifo(1, par); |
2186 | - aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par); |
2187 | + aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par); |
2188 | } |
2189 | } |
2190 | |
2191 | diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c |
2192 | index 95ec042ddbf8..0fe02e22d9a4 100644 |
2193 | --- a/drivers/video/aty/mach64_cursor.c |
2194 | +++ b/drivers/video/aty/mach64_cursor.c |
2195 | @@ -5,6 +5,7 @@ |
2196 | #include <linux/fb.h> |
2197 | #include <linux/init.h> |
2198 | #include <linux/string.h> |
2199 | +#include "../fb_draw.h" |
2200 | |
2201 | #include <asm/io.h> |
2202 | |
2203 | @@ -157,24 +158,33 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor) |
2204 | |
2205 | for (i = 0; i < height; i++) { |
2206 | for (j = 0; j < width; j++) { |
2207 | + u16 l = 0xaaaa; |
2208 | b = *src++; |
2209 | m = *msk++; |
2210 | switch (cursor->rop) { |
2211 | case ROP_XOR: |
2212 | // Upper 4 bits of mask data |
2213 | - fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++); |
2214 | + l = cursor_bits_lookup[(b ^ m) >> 4] | |
2215 | // Lower 4 bits of mask |
2216 | - fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f], |
2217 | - dst++); |
2218 | + (cursor_bits_lookup[(b ^ m) & 0x0f] << 8); |
2219 | break; |
2220 | case ROP_COPY: |
2221 | // Upper 4 bits of mask data |
2222 | - fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++); |
2223 | + l = cursor_bits_lookup[(b & m) >> 4] | |
2224 | // Lower 4 bits of mask |
2225 | - fb_writeb(cursor_bits_lookup[(b & m) & 0x0f], |
2226 | - dst++); |
2227 | + (cursor_bits_lookup[(b & m) & 0x0f] << 8); |
2228 | break; |
2229 | } |
2230 | + /* |
2231 | + * If cursor size is not a multiple of 8 characters |
2232 | + * we must pad it with transparent pattern (0xaaaa). |
2233 | + */ |
2234 | + if ((j + 1) * 8 > cursor->image.width) { |
2235 | + l = comp(l, 0xaaaa, |
2236 | + (1 << ((cursor->image.width & 7) * 2)) - 1); |
2237 | + } |
2238 | + fb_writeb(l & 0xff, dst++); |
2239 | + fb_writeb(l >> 8, dst++); |
2240 | } |
2241 | dst += offset; |
2242 | } |
2243 | diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c |
2244 | index bb5a96b1645d..bcb57235fcc7 100644 |
2245 | --- a/drivers/video/cfbcopyarea.c |
2246 | +++ b/drivers/video/cfbcopyarea.c |
2247 | @@ -43,13 +43,22 @@ |
2248 | */ |
2249 | |
2250 | static void |
2251 | -bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2252 | - const unsigned long __iomem *src, int src_idx, int bits, |
2253 | +bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx, |
2254 | + const unsigned long __iomem *src, unsigned src_idx, int bits, |
2255 | unsigned n, u32 bswapmask) |
2256 | { |
2257 | unsigned long first, last; |
2258 | int const shift = dst_idx-src_idx; |
2259 | - int left, right; |
2260 | + |
2261 | +#if 0 |
2262 | + /* |
2263 | + * If you suspect bug in this function, compare it with this simple |
2264 | + * memmove implementation. |
2265 | + */ |
2266 | + fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8, |
2267 | + (char *)src + ((src_idx & (bits - 1))) / 8, n / 8); |
2268 | + return; |
2269 | +#endif |
2270 | |
2271 | first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask); |
2272 | last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); |
2273 | @@ -98,9 +107,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2274 | unsigned long d0, d1; |
2275 | int m; |
2276 | |
2277 | - right = shift & (bits - 1); |
2278 | - left = -shift & (bits - 1); |
2279 | - bswapmask &= shift; |
2280 | + int const left = shift & (bits - 1); |
2281 | + int const right = -shift & (bits - 1); |
2282 | |
2283 | if (dst_idx+n <= bits) { |
2284 | // Single destination word |
2285 | @@ -110,15 +118,15 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2286 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2287 | if (shift > 0) { |
2288 | // Single source word |
2289 | - d0 >>= right; |
2290 | + d0 <<= left; |
2291 | } else if (src_idx+n <= bits) { |
2292 | // Single source word |
2293 | - d0 <<= left; |
2294 | + d0 >>= right; |
2295 | } else { |
2296 | // 2 source words |
2297 | d1 = FB_READL(src + 1); |
2298 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2299 | - d0 = d0<<left | d1>>right; |
2300 | + d0 = d0 >> right | d1 << left; |
2301 | } |
2302 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2303 | FB_WRITEL(comp(d0, FB_READL(dst), first), dst); |
2304 | @@ -135,60 +143,59 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2305 | if (shift > 0) { |
2306 | // Single source word |
2307 | d1 = d0; |
2308 | - d0 >>= right; |
2309 | - dst++; |
2310 | + d0 <<= left; |
2311 | n -= bits - dst_idx; |
2312 | } else { |
2313 | // 2 source words |
2314 | d1 = FB_READL(src++); |
2315 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2316 | |
2317 | - d0 = d0<<left | d1>>right; |
2318 | - dst++; |
2319 | + d0 = d0 >> right | d1 << left; |
2320 | n -= bits - dst_idx; |
2321 | } |
2322 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2323 | FB_WRITEL(comp(d0, FB_READL(dst), first), dst); |
2324 | d0 = d1; |
2325 | + dst++; |
2326 | |
2327 | // Main chunk |
2328 | m = n % bits; |
2329 | n /= bits; |
2330 | while ((n >= 4) && !bswapmask) { |
2331 | d1 = FB_READL(src++); |
2332 | - FB_WRITEL(d0 << left | d1 >> right, dst++); |
2333 | + FB_WRITEL(d0 >> right | d1 << left, dst++); |
2334 | d0 = d1; |
2335 | d1 = FB_READL(src++); |
2336 | - FB_WRITEL(d0 << left | d1 >> right, dst++); |
2337 | + FB_WRITEL(d0 >> right | d1 << left, dst++); |
2338 | d0 = d1; |
2339 | d1 = FB_READL(src++); |
2340 | - FB_WRITEL(d0 << left | d1 >> right, dst++); |
2341 | + FB_WRITEL(d0 >> right | d1 << left, dst++); |
2342 | d0 = d1; |
2343 | d1 = FB_READL(src++); |
2344 | - FB_WRITEL(d0 << left | d1 >> right, dst++); |
2345 | + FB_WRITEL(d0 >> right | d1 << left, dst++); |
2346 | d0 = d1; |
2347 | n -= 4; |
2348 | } |
2349 | while (n--) { |
2350 | d1 = FB_READL(src++); |
2351 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2352 | - d0 = d0 << left | d1 >> right; |
2353 | + d0 = d0 >> right | d1 << left; |
2354 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2355 | FB_WRITEL(d0, dst++); |
2356 | d0 = d1; |
2357 | } |
2358 | |
2359 | // Trailing bits |
2360 | - if (last) { |
2361 | - if (m <= right) { |
2362 | + if (m) { |
2363 | + if (m <= bits - right) { |
2364 | // Single source word |
2365 | - d0 <<= left; |
2366 | + d0 >>= right; |
2367 | } else { |
2368 | // 2 source words |
2369 | d1 = FB_READL(src); |
2370 | d1 = fb_rev_pixels_in_long(d1, |
2371 | bswapmask); |
2372 | - d0 = d0<<left | d1>>right; |
2373 | + d0 = d0 >> right | d1 << left; |
2374 | } |
2375 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2376 | FB_WRITEL(comp(d0, FB_READL(dst), last), dst); |
2377 | @@ -202,43 +209,46 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2378 | */ |
2379 | |
2380 | static void |
2381 | -bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2382 | - const unsigned long __iomem *src, int src_idx, int bits, |
2383 | +bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx, |
2384 | + const unsigned long __iomem *src, unsigned src_idx, int bits, |
2385 | unsigned n, u32 bswapmask) |
2386 | { |
2387 | unsigned long first, last; |
2388 | int shift; |
2389 | |
2390 | - dst += (n-1)/bits; |
2391 | - src += (n-1)/bits; |
2392 | - if ((n-1) % bits) { |
2393 | - dst_idx += (n-1) % bits; |
2394 | - dst += dst_idx >> (ffs(bits) - 1); |
2395 | - dst_idx &= bits - 1; |
2396 | - src_idx += (n-1) % bits; |
2397 | - src += src_idx >> (ffs(bits) - 1); |
2398 | - src_idx &= bits - 1; |
2399 | - } |
2400 | +#if 0 |
2401 | + /* |
2402 | + * If you suspect bug in this function, compare it with this simple |
2403 | + * memmove implementation. |
2404 | + */ |
2405 | + fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8, |
2406 | + (char *)src + ((src_idx & (bits - 1))) / 8, n / 8); |
2407 | + return; |
2408 | +#endif |
2409 | + |
2410 | + dst += (dst_idx + n - 1) / bits; |
2411 | + src += (src_idx + n - 1) / bits; |
2412 | + dst_idx = (dst_idx + n - 1) % bits; |
2413 | + src_idx = (src_idx + n - 1) % bits; |
2414 | |
2415 | shift = dst_idx-src_idx; |
2416 | |
2417 | - first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask); |
2418 | - last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits), |
2419 | - bswapmask); |
2420 | + first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask); |
2421 | + last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask); |
2422 | |
2423 | if (!shift) { |
2424 | // Same alignment for source and dest |
2425 | |
2426 | if ((unsigned long)dst_idx+1 >= n) { |
2427 | // Single word |
2428 | - if (last) |
2429 | - first &= last; |
2430 | - FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); |
2431 | + if (first) |
2432 | + last &= first; |
2433 | + FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); |
2434 | } else { |
2435 | // Multiple destination words |
2436 | |
2437 | // Leading bits |
2438 | - if (first != ~0UL) { |
2439 | + if (first) { |
2440 | FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst); |
2441 | dst--; |
2442 | src--; |
2443 | @@ -262,7 +272,7 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2444 | FB_WRITEL(FB_READL(src--), dst--); |
2445 | |
2446 | // Trailing bits |
2447 | - if (last) |
2448 | + if (last != -1UL) |
2449 | FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst); |
2450 | } |
2451 | } else { |
2452 | @@ -270,29 +280,28 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2453 | unsigned long d0, d1; |
2454 | int m; |
2455 | |
2456 | - int const left = -shift & (bits-1); |
2457 | - int const right = shift & (bits-1); |
2458 | - bswapmask &= shift; |
2459 | + int const left = shift & (bits-1); |
2460 | + int const right = -shift & (bits-1); |
2461 | |
2462 | if ((unsigned long)dst_idx+1 >= n) { |
2463 | // Single destination word |
2464 | - if (last) |
2465 | - first &= last; |
2466 | + if (first) |
2467 | + last &= first; |
2468 | d0 = FB_READL(src); |
2469 | if (shift < 0) { |
2470 | // Single source word |
2471 | - d0 <<= left; |
2472 | + d0 >>= right; |
2473 | } else if (1+(unsigned long)src_idx >= n) { |
2474 | // Single source word |
2475 | - d0 >>= right; |
2476 | + d0 <<= left; |
2477 | } else { |
2478 | // 2 source words |
2479 | d1 = FB_READL(src - 1); |
2480 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2481 | - d0 = d0>>right | d1<<left; |
2482 | + d0 = d0 << left | d1 >> right; |
2483 | } |
2484 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2485 | - FB_WRITEL(comp(d0, FB_READL(dst), first), dst); |
2486 | + FB_WRITEL(comp(d0, FB_READL(dst), last), dst); |
2487 | } else { |
2488 | // Multiple destination words |
2489 | /** We must always remember the last value read, because in case |
2490 | @@ -307,12 +316,12 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2491 | if (shift < 0) { |
2492 | // Single source word |
2493 | d1 = d0; |
2494 | - d0 <<= left; |
2495 | + d0 >>= right; |
2496 | } else { |
2497 | // 2 source words |
2498 | d1 = FB_READL(src--); |
2499 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2500 | - d0 = d0>>right | d1<<left; |
2501 | + d0 = d0 << left | d1 >> right; |
2502 | } |
2503 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2504 | FB_WRITEL(comp(d0, FB_READL(dst), first), dst); |
2505 | @@ -325,39 +334,39 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx, |
2506 | n /= bits; |
2507 | while ((n >= 4) && !bswapmask) { |
2508 | d1 = FB_READL(src--); |
2509 | - FB_WRITEL(d0 >> right | d1 << left, dst--); |
2510 | + FB_WRITEL(d0 << left | d1 >> right, dst--); |
2511 | d0 = d1; |
2512 | d1 = FB_READL(src--); |
2513 | - FB_WRITEL(d0 >> right | d1 << left, dst--); |
2514 | + FB_WRITEL(d0 << left | d1 >> right, dst--); |
2515 | d0 = d1; |
2516 | d1 = FB_READL(src--); |
2517 | - FB_WRITEL(d0 >> right | d1 << left, dst--); |
2518 | + FB_WRITEL(d0 << left | d1 >> right, dst--); |
2519 | d0 = d1; |
2520 | d1 = FB_READL(src--); |
2521 | - FB_WRITEL(d0 >> right | d1 << left, dst--); |
2522 | + FB_WRITEL(d0 << left | d1 >> right, dst--); |
2523 | d0 = d1; |
2524 | n -= 4; |
2525 | } |
2526 | while (n--) { |
2527 | d1 = FB_READL(src--); |
2528 | d1 = fb_rev_pixels_in_long(d1, bswapmask); |
2529 | - d0 = d0 >> right | d1 << left; |
2530 | + d0 = d0 << left | d1 >> right; |
2531 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2532 | FB_WRITEL(d0, dst--); |
2533 | d0 = d1; |
2534 | } |
2535 | |
2536 | // Trailing bits |
2537 | - if (last) { |
2538 | - if (m <= left) { |
2539 | + if (m) { |
2540 | + if (m <= bits - left) { |
2541 | // Single source word |
2542 | - d0 >>= right; |
2543 | + d0 <<= left; |
2544 | } else { |
2545 | // 2 source words |
2546 | d1 = FB_READL(src); |
2547 | d1 = fb_rev_pixels_in_long(d1, |
2548 | bswapmask); |
2549 | - d0 = d0>>right | d1<<left; |
2550 | + d0 = d0 << left | d1 >> right; |
2551 | } |
2552 | d0 = fb_rev_pixels_in_long(d0, bswapmask); |
2553 | FB_WRITEL(comp(d0, FB_READL(dst), last), dst); |
2554 | @@ -371,9 +380,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) |
2555 | u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; |
2556 | u32 height = area->height, width = area->width; |
2557 | unsigned long const bits_per_line = p->fix.line_length*8u; |
2558 | - unsigned long __iomem *dst = NULL, *src = NULL; |
2559 | + unsigned long __iomem *base = NULL; |
2560 | int bits = BITS_PER_LONG, bytes = bits >> 3; |
2561 | - int dst_idx = 0, src_idx = 0, rev_copy = 0; |
2562 | + unsigned dst_idx = 0, src_idx = 0, rev_copy = 0; |
2563 | u32 bswapmask = fb_compute_bswapmask(p); |
2564 | |
2565 | if (p->state != FBINFO_STATE_RUNNING) |
2566 | @@ -389,7 +398,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) |
2567 | |
2568 | // split the base of the framebuffer into a long-aligned address and the |
2569 | // index of the first bit |
2570 | - dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); |
2571 | + base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1)); |
2572 | dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1)); |
2573 | // add offset of source and target area |
2574 | dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel; |
2575 | @@ -402,20 +411,14 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) |
2576 | while (height--) { |
2577 | dst_idx -= bits_per_line; |
2578 | src_idx -= bits_per_line; |
2579 | - dst += dst_idx >> (ffs(bits) - 1); |
2580 | - dst_idx &= (bytes - 1); |
2581 | - src += src_idx >> (ffs(bits) - 1); |
2582 | - src_idx &= (bytes - 1); |
2583 | - bitcpy_rev(p, dst, dst_idx, src, src_idx, bits, |
2584 | + bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits, |
2585 | + base + (src_idx / bits), src_idx % bits, bits, |
2586 | width*p->var.bits_per_pixel, bswapmask); |
2587 | } |
2588 | } else { |
2589 | while (height--) { |
2590 | - dst += dst_idx >> (ffs(bits) - 1); |
2591 | - dst_idx &= (bytes - 1); |
2592 | - src += src_idx >> (ffs(bits) - 1); |
2593 | - src_idx &= (bytes - 1); |
2594 | - bitcpy(p, dst, dst_idx, src, src_idx, bits, |
2595 | + bitcpy(p, base + (dst_idx / bits), dst_idx % bits, |
2596 | + base + (src_idx / bits), src_idx % bits, bits, |
2597 | width*p->var.bits_per_pixel, bswapmask); |
2598 | dst_idx += bits_per_line; |
2599 | src_idx += bits_per_line; |
2600 | diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c |
2601 | index 8335a6fe303e..0d5cb85d071a 100644 |
2602 | --- a/drivers/video/matrox/matroxfb_accel.c |
2603 | +++ b/drivers/video/matrox/matroxfb_accel.c |
2604 | @@ -192,10 +192,18 @@ void matrox_cfbX_init(struct matrox_fb_info *minfo) |
2605 | minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO; |
2606 | if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC; |
2607 | minfo->accel.m_opmode = mopmode; |
2608 | + minfo->accel.m_access = maccess; |
2609 | + minfo->accel.m_pitch = mpitch; |
2610 | } |
2611 | |
2612 | EXPORT_SYMBOL(matrox_cfbX_init); |
2613 | |
2614 | +static void matrox_accel_restore_maccess(struct matrox_fb_info *minfo) |
2615 | +{ |
2616 | + mga_outl(M_MACCESS, minfo->accel.m_access); |
2617 | + mga_outl(M_PITCH, minfo->accel.m_pitch); |
2618 | +} |
2619 | + |
2620 | static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy, |
2621 | int sx, int dy, int dx, int height, int width) |
2622 | { |
2623 | @@ -207,7 +215,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy, |
2624 | CRITBEGIN |
2625 | |
2626 | if ((dy < sy) || ((dy == sy) && (dx <= sx))) { |
2627 | - mga_fifo(2); |
2628 | + mga_fifo(4); |
2629 | + matrox_accel_restore_maccess(minfo); |
2630 | mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO | |
2631 | M_DWG_BFCOL | M_DWG_REPLACE); |
2632 | mga_outl(M_AR5, vxres); |
2633 | @@ -215,7 +224,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy, |
2634 | start = sy*vxres+sx+curr_ydstorg(minfo); |
2635 | end = start+width; |
2636 | } else { |
2637 | - mga_fifo(3); |
2638 | + mga_fifo(5); |
2639 | + matrox_accel_restore_maccess(minfo); |
2640 | mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE); |
2641 | mga_outl(M_SGN, 5); |
2642 | mga_outl(M_AR5, -vxres); |
2643 | @@ -224,7 +234,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy, |
2644 | start = end+width; |
2645 | dy += height-1; |
2646 | } |
2647 | - mga_fifo(4); |
2648 | + mga_fifo(6); |
2649 | + matrox_accel_restore_maccess(minfo); |
2650 | mga_outl(M_AR0, end); |
2651 | mga_outl(M_AR3, start); |
2652 | mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx); |
2653 | @@ -246,7 +257,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres, |
2654 | CRITBEGIN |
2655 | |
2656 | if ((dy < sy) || ((dy == sy) && (dx <= sx))) { |
2657 | - mga_fifo(2); |
2658 | + mga_fifo(4); |
2659 | + matrox_accel_restore_maccess(minfo); |
2660 | mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO | |
2661 | M_DWG_BFCOL | M_DWG_REPLACE); |
2662 | mga_outl(M_AR5, vxres); |
2663 | @@ -254,7 +266,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres, |
2664 | start = sy*vxres+sx+curr_ydstorg(minfo); |
2665 | end = start+width; |
2666 | } else { |
2667 | - mga_fifo(3); |
2668 | + mga_fifo(5); |
2669 | + matrox_accel_restore_maccess(minfo); |
2670 | mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE); |
2671 | mga_outl(M_SGN, 5); |
2672 | mga_outl(M_AR5, -vxres); |
2673 | @@ -263,7 +276,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres, |
2674 | start = end+width; |
2675 | dy += height-1; |
2676 | } |
2677 | - mga_fifo(5); |
2678 | + mga_fifo(7); |
2679 | + matrox_accel_restore_maccess(minfo); |
2680 | mga_outl(M_AR0, end); |
2681 | mga_outl(M_AR3, start); |
2682 | mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx); |
2683 | @@ -298,7 +312,8 @@ static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color, |
2684 | |
2685 | CRITBEGIN |
2686 | |
2687 | - mga_fifo(5); |
2688 | + mga_fifo(7); |
2689 | + matrox_accel_restore_maccess(minfo); |
2690 | mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE); |
2691 | mga_outl(M_FCOL, color); |
2692 | mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx); |
2693 | @@ -341,7 +356,8 @@ static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx, |
2694 | width >>= 1; |
2695 | sx >>= 1; |
2696 | if (width) { |
2697 | - mga_fifo(5); |
2698 | + mga_fifo(7); |
2699 | + matrox_accel_restore_maccess(minfo); |
2700 | mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2); |
2701 | mga_outl(M_FCOL, bgx); |
2702 | mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx); |
2703 | @@ -415,7 +431,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx, |
2704 | |
2705 | CRITBEGIN |
2706 | |
2707 | - mga_fifo(3); |
2708 | + mga_fifo(5); |
2709 | + matrox_accel_restore_maccess(minfo); |
2710 | if (easy) |
2711 | mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE); |
2712 | else |
2713 | @@ -425,7 +442,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx, |
2714 | fxbndry = ((xx + width - 1) << 16) | xx; |
2715 | mmio = minfo->mmio.vbase; |
2716 | |
2717 | - mga_fifo(6); |
2718 | + mga_fifo(8); |
2719 | + matrox_accel_restore_maccess(minfo); |
2720 | mga_writel(mmio, M_FXBNDRY, fxbndry); |
2721 | mga_writel(mmio, M_AR0, ar0); |
2722 | mga_writel(mmio, M_AR3, 0); |
2723 | diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h |
2724 | index 11ed57bb704e..556d96ce40bf 100644 |
2725 | --- a/drivers/video/matrox/matroxfb_base.h |
2726 | +++ b/drivers/video/matrox/matroxfb_base.h |
2727 | @@ -307,6 +307,8 @@ struct matrox_accel_data { |
2728 | #endif |
2729 | u_int32_t m_dwg_rect; |
2730 | u_int32_t m_opmode; |
2731 | + u_int32_t m_access; |
2732 | + u_int32_t m_pitch; |
2733 | }; |
2734 | |
2735 | struct v4l2_queryctrl; |
2736 | diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c |
2737 | index 07c7df9ee77b..3d5bce517d8e 100644 |
2738 | --- a/drivers/video/tgafb.c |
2739 | +++ b/drivers/video/tgafb.c |
2740 | @@ -182,6 +182,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) |
2741 | |
2742 | if (var->xres_virtual != var->xres || var->yres_virtual != var->yres) |
2743 | return -EINVAL; |
2744 | + if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len) |
2745 | + return -EINVAL; |
2746 | if (var->nonstd) |
2747 | return -EINVAL; |
2748 | if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ) |
2749 | @@ -262,6 +264,7 @@ tgafb_set_par(struct fb_info *info) |
2750 | par->yres = info->var.yres; |
2751 | par->pll_freq = pll_freq = 1000000000 / info->var.pixclock; |
2752 | par->bits_per_pixel = info->var.bits_per_pixel; |
2753 | + info->fix.line_length = par->xres * (par->bits_per_pixel >> 3); |
2754 | |
2755 | tga_type = par->tga_type; |
2756 | |
2757 | @@ -1136,222 +1139,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy, |
2758 | __raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG); |
2759 | } |
2760 | |
2761 | -/* The general case of forward copy in 8bpp mode. */ |
2762 | +/* The (almost) general case of backward copy in 8bpp mode. */ |
2763 | static inline void |
2764 | -copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy, |
2765 | - u32 height, u32 width, u32 line_length) |
2766 | +copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy, |
2767 | + u32 height, u32 width, u32 line_length, |
2768 | + const struct fb_copyarea *area) |
2769 | { |
2770 | struct tga_par *par = (struct tga_par *) info->par; |
2771 | - unsigned long i, copied, left; |
2772 | - unsigned long dpos, spos, dalign, salign, yincr; |
2773 | - u32 smask_first, dmask_first, dmask_last; |
2774 | - int pixel_shift, need_prime, need_second; |
2775 | - unsigned long n64, n32, xincr_first; |
2776 | + unsigned i, yincr; |
2777 | + int depos, sepos, backward, last_step, step; |
2778 | + u32 mask_last; |
2779 | + unsigned n32; |
2780 | void __iomem *tga_regs; |
2781 | void __iomem *tga_fb; |
2782 | |
2783 | - yincr = line_length; |
2784 | - if (dy > sy) { |
2785 | - dy += height - 1; |
2786 | - sy += height - 1; |
2787 | - yincr = -yincr; |
2788 | - } |
2789 | - |
2790 | - /* Compute the offsets and alignments in the frame buffer. |
2791 | - More than anything else, these control how we do copies. */ |
2792 | - dpos = dy * line_length + dx; |
2793 | - spos = sy * line_length + sx; |
2794 | - dalign = dpos & 7; |
2795 | - salign = spos & 7; |
2796 | - dpos &= -8; |
2797 | - spos &= -8; |
2798 | - |
2799 | - /* Compute the value for the PIXELSHIFT register. This controls |
2800 | - both non-co-aligned source and destination and copy direction. */ |
2801 | - if (dalign >= salign) |
2802 | - pixel_shift = dalign - salign; |
2803 | - else |
2804 | - pixel_shift = 8 - (salign - dalign); |
2805 | - |
2806 | - /* Figure out if we need an additional priming step for the |
2807 | - residue register. */ |
2808 | - need_prime = (salign > dalign); |
2809 | - if (need_prime) |
2810 | - dpos -= 8; |
2811 | - |
2812 | - /* Begin by copying the leading unaligned destination. Copy enough |
2813 | - to make the next destination address 32-byte aligned. */ |
2814 | - copied = 32 - (dalign + (dpos & 31)); |
2815 | - if (copied == 32) |
2816 | - copied = 0; |
2817 | - xincr_first = (copied + 7) & -8; |
2818 | - smask_first = dmask_first = (1ul << copied) - 1; |
2819 | - smask_first <<= salign; |
2820 | - dmask_first <<= dalign + need_prime*8; |
2821 | - if (need_prime && copied > 24) |
2822 | - copied -= 8; |
2823 | - left = width - copied; |
2824 | - |
2825 | - /* Care for small copies. */ |
2826 | - if (copied > width) { |
2827 | - u32 t; |
2828 | - t = (1ul << width) - 1; |
2829 | - t <<= dalign + need_prime*8; |
2830 | - dmask_first &= t; |
2831 | - left = 0; |
2832 | - } |
2833 | - |
2834 | - /* Attempt to use 64-byte copies. This is only possible if the |
2835 | - source and destination are co-aligned at 64 bytes. */ |
2836 | - n64 = need_second = 0; |
2837 | - if ((dpos & 63) == (spos & 63) |
2838 | - && (height == 1 || line_length % 64 == 0)) { |
2839 | - /* We may need a 32-byte copy to ensure 64 byte alignment. */ |
2840 | - need_second = (dpos + xincr_first) & 63; |
2841 | - if ((need_second & 32) != need_second) |
2842 | - printk(KERN_ERR "tgafb: need_second wrong\n"); |
2843 | - if (left >= need_second + 64) { |
2844 | - left -= need_second; |
2845 | - n64 = left / 64; |
2846 | - left %= 64; |
2847 | - } else |
2848 | - need_second = 0; |
2849 | - } |
2850 | - |
2851 | - /* Copy trailing full 32-byte sections. This will be the main |
2852 | - loop if the 64 byte loop can't be used. */ |
2853 | - n32 = left / 32; |
2854 | - left %= 32; |
2855 | - |
2856 | - /* Copy the trailing unaligned destination. */ |
2857 | - dmask_last = (1ul << left) - 1; |
2858 | - |
2859 | - tga_regs = par->tga_regs_base; |
2860 | - tga_fb = par->tga_fb_base; |
2861 | - |
2862 | - /* Set up the MODE and PIXELSHIFT registers. */ |
2863 | - __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG); |
2864 | - __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG); |
2865 | - wmb(); |
2866 | - |
2867 | - for (i = 0; i < height; ++i) { |
2868 | - unsigned long j; |
2869 | - void __iomem *sfb; |
2870 | - void __iomem *dfb; |
2871 | - |
2872 | - sfb = tga_fb + spos; |
2873 | - dfb = tga_fb + dpos; |
2874 | - if (dmask_first) { |
2875 | - __raw_writel(smask_first, sfb); |
2876 | - wmb(); |
2877 | - __raw_writel(dmask_first, dfb); |
2878 | - wmb(); |
2879 | - sfb += xincr_first; |
2880 | - dfb += xincr_first; |
2881 | - } |
2882 | - |
2883 | - if (need_second) { |
2884 | - __raw_writel(0xffffffff, sfb); |
2885 | - wmb(); |
2886 | - __raw_writel(0xffffffff, dfb); |
2887 | - wmb(); |
2888 | - sfb += 32; |
2889 | - dfb += 32; |
2890 | - } |
2891 | - |
2892 | - if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63)) |
2893 | - printk(KERN_ERR |
2894 | - "tgafb: misaligned copy64 (s:%p, d:%p)\n", |
2895 | - sfb, dfb); |
2896 | - |
2897 | - for (j = 0; j < n64; ++j) { |
2898 | - __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC); |
2899 | - wmb(); |
2900 | - __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST); |
2901 | - wmb(); |
2902 | - sfb += 64; |
2903 | - dfb += 64; |
2904 | - } |
2905 | - |
2906 | - for (j = 0; j < n32; ++j) { |
2907 | - __raw_writel(0xffffffff, sfb); |
2908 | - wmb(); |
2909 | - __raw_writel(0xffffffff, dfb); |
2910 | - wmb(); |
2911 | - sfb += 32; |
2912 | - dfb += 32; |
2913 | - } |
2914 | - |
2915 | - if (dmask_last) { |
2916 | - __raw_writel(0xffffffff, sfb); |
2917 | - wmb(); |
2918 | - __raw_writel(dmask_last, dfb); |
2919 | - wmb(); |
2920 | - } |
2921 | - |
2922 | - spos += yincr; |
2923 | - dpos += yincr; |
2924 | + /* Do acceleration only if we are aligned on 8 pixels */ |
2925 | + if ((dx | sx | width) & 7) { |
2926 | + cfb_copyarea(info, area); |
2927 | + return; |
2928 | } |
2929 | |
2930 | - /* Reset the MODE register to normal. */ |
2931 | - __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG); |
2932 | -} |
2933 | - |
2934 | -/* The (almost) general case of backward copy in 8bpp mode. */ |
2935 | -static inline void |
2936 | -copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy, |
2937 | - u32 height, u32 width, u32 line_length, |
2938 | - const struct fb_copyarea *area) |
2939 | -{ |
2940 | - struct tga_par *par = (struct tga_par *) info->par; |
2941 | - unsigned long i, left, yincr; |
2942 | - unsigned long depos, sepos, dealign, sealign; |
2943 | - u32 mask_first, mask_last; |
2944 | - unsigned long n32; |
2945 | - void __iomem *tga_regs; |
2946 | - void __iomem *tga_fb; |
2947 | - |
2948 | yincr = line_length; |
2949 | if (dy > sy) { |
2950 | dy += height - 1; |
2951 | sy += height - 1; |
2952 | yincr = -yincr; |
2953 | } |
2954 | + backward = dy == sy && dx > sx && dx < sx + width; |
2955 | |
2956 | /* Compute the offsets and alignments in the frame buffer. |
2957 | More than anything else, these control how we do copies. */ |
2958 | - depos = dy * line_length + dx + width; |
2959 | - sepos = sy * line_length + sx + width; |
2960 | - dealign = depos & 7; |
2961 | - sealign = sepos & 7; |
2962 | - |
2963 | - /* ??? The documentation appears to be incorrect (or very |
2964 | - misleading) wrt how pixel shifting works in backward copy |
2965 | - mode, i.e. when PIXELSHIFT is negative. I give up for now. |
2966 | - Do handle the common case of co-aligned backward copies, |
2967 | - but frob everything else back on generic code. */ |
2968 | - if (dealign != sealign) { |
2969 | - cfb_copyarea(info, area); |
2970 | - return; |
2971 | - } |
2972 | - |
2973 | - /* We begin the copy with the trailing pixels of the |
2974 | - unaligned destination. */ |
2975 | - mask_first = (1ul << dealign) - 1; |
2976 | - left = width - dealign; |
2977 | - |
2978 | - /* Care for small copies. */ |
2979 | - if (dealign > width) { |
2980 | - mask_first ^= (1ul << (dealign - width)) - 1; |
2981 | - left = 0; |
2982 | - } |
2983 | + depos = dy * line_length + dx; |
2984 | + sepos = sy * line_length + sx; |
2985 | + if (backward) |
2986 | + depos += width, sepos += width; |
2987 | |
2988 | /* Next copy full words at a time. */ |
2989 | - n32 = left / 32; |
2990 | - left %= 32; |
2991 | + n32 = width / 32; |
2992 | + last_step = width % 32; |
2993 | |
2994 | /* Finally copy the unaligned head of the span. */ |
2995 | - mask_last = -1 << (32 - left); |
2996 | + mask_last = (1ul << last_step) - 1; |
2997 | + |
2998 | + if (!backward) { |
2999 | + step = 32; |
3000 | + last_step = 32; |
3001 | + } else { |
3002 | + step = -32; |
3003 | + last_step = -last_step; |
3004 | + sepos -= 32; |
3005 | + depos -= 32; |
3006 | + } |
3007 | |
3008 | tga_regs = par->tga_regs_base; |
3009 | tga_fb = par->tga_fb_base; |
3010 | @@ -1368,25 +1206,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy, |
3011 | |
3012 | sfb = tga_fb + sepos; |
3013 | dfb = tga_fb + depos; |
3014 | - if (mask_first) { |
3015 | - __raw_writel(mask_first, sfb); |
3016 | - wmb(); |
3017 | - __raw_writel(mask_first, dfb); |
3018 | - wmb(); |
3019 | - } |
3020 | |
3021 | - for (j = 0; j < n32; ++j) { |
3022 | - sfb -= 32; |
3023 | - dfb -= 32; |
3024 | + for (j = 0; j < n32; j++) { |
3025 | + if (j < 2 && j + 1 < n32 && !backward && |
3026 | + !(((unsigned long)sfb | (unsigned long)dfb) & 63)) { |
3027 | + do { |
3028 | + __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC); |
3029 | + wmb(); |
3030 | + __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST); |
3031 | + wmb(); |
3032 | + sfb += 64; |
3033 | + dfb += 64; |
3034 | + j += 2; |
3035 | + } while (j + 1 < n32); |
3036 | + j--; |
3037 | + continue; |
3038 | + } |
3039 | __raw_writel(0xffffffff, sfb); |
3040 | wmb(); |
3041 | __raw_writel(0xffffffff, dfb); |
3042 | wmb(); |
3043 | + sfb += step; |
3044 | + dfb += step; |
3045 | } |
3046 | |
3047 | if (mask_last) { |
3048 | - sfb -= 32; |
3049 | - dfb -= 32; |
3050 | + sfb += last_step - step; |
3051 | + dfb += last_step - step; |
3052 | __raw_writel(mask_last, sfb); |
3053 | wmb(); |
3054 | __raw_writel(mask_last, dfb); |
3055 | @@ -1447,14 +1293,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) |
3056 | else if (bpp == 32) |
3057 | cfb_copyarea(info, area); |
3058 | |
3059 | - /* Detect overlapping source and destination that requires |
3060 | - a backward copy. */ |
3061 | - else if (dy == sy && dx > sx && dx < sx + width) |
3062 | - copyarea_backward_8bpp(info, dx, dy, sx, sy, height, |
3063 | - width, line_length, area); |
3064 | else |
3065 | - copyarea_foreward_8bpp(info, dx, dy, sx, sy, height, |
3066 | - width, line_length); |
3067 | + copyarea_8bpp(info, dx, dy, sx, sy, height, |
3068 | + width, line_length, area); |
3069 | } |
3070 | |
3071 | |
3072 | @@ -1470,6 +1311,7 @@ tgafb_init_fix(struct fb_info *info) |
3073 | int tga_bus_tc = TGA_BUS_TC(par->dev); |
3074 | u8 tga_type = par->tga_type; |
3075 | const char *tga_type_name = NULL; |
3076 | + unsigned memory_size; |
3077 | |
3078 | switch (tga_type) { |
3079 | case TGA_TYPE_8PLANE: |
3080 | @@ -1477,22 +1319,27 @@ tgafb_init_fix(struct fb_info *info) |
3081 | tga_type_name = "Digital ZLXp-E1"; |
3082 | if (tga_bus_tc) |
3083 | tga_type_name = "Digital ZLX-E1"; |
3084 | + memory_size = 2097152; |
3085 | break; |
3086 | case TGA_TYPE_24PLANE: |
3087 | if (tga_bus_pci) |
3088 | tga_type_name = "Digital ZLXp-E2"; |
3089 | if (tga_bus_tc) |
3090 | tga_type_name = "Digital ZLX-E2"; |
3091 | + memory_size = 8388608; |
3092 | break; |
3093 | case TGA_TYPE_24PLUSZ: |
3094 | if (tga_bus_pci) |
3095 | tga_type_name = "Digital ZLXp-E3"; |
3096 | if (tga_bus_tc) |
3097 | tga_type_name = "Digital ZLX-E3"; |
3098 | + memory_size = 16777216; |
3099 | break; |
3100 | } |
3101 | - if (!tga_type_name) |
3102 | + if (!tga_type_name) { |
3103 | tga_type_name = "Unknown"; |
3104 | + memory_size = 16777216; |
3105 | + } |
3106 | |
3107 | strlcpy(info->fix.id, tga_type_name, sizeof(info->fix.id)); |
3108 | |
3109 | @@ -1502,9 +1349,8 @@ tgafb_init_fix(struct fb_info *info) |
3110 | ? FB_VISUAL_PSEUDOCOLOR |
3111 | : FB_VISUAL_DIRECTCOLOR); |
3112 | |
3113 | - info->fix.line_length = par->xres * (par->bits_per_pixel >> 3); |
3114 | info->fix.smem_start = (size_t) par->tga_fb_base; |
3115 | - info->fix.smem_len = info->fix.line_length * par->yres; |
3116 | + info->fix.smem_len = memory_size; |
3117 | info->fix.mmio_start = (size_t) par->tga_regs_base; |
3118 | info->fix.mmio_len = 512; |
3119 | |
3120 | @@ -1628,6 +1474,9 @@ static int tgafb_register(struct device *dev) |
3121 | modedb_tga = &modedb_tc; |
3122 | modedbsize_tga = 1; |
3123 | } |
3124 | + |
3125 | + tgafb_init_fix(info); |
3126 | + |
3127 | ret = fb_find_mode(&info->var, info, |
3128 | mode_option ? mode_option : mode_option_tga, |
3129 | modedb_tga, modedbsize_tga, NULL, |
3130 | @@ -1645,7 +1494,6 @@ static int tgafb_register(struct device *dev) |
3131 | } |
3132 | |
3133 | tgafb_set_par(info); |
3134 | - tgafb_init_fix(info); |
3135 | |
3136 | if (register_framebuffer(info) < 0) { |
3137 | printk(KERN_ERR "tgafb: Could not register framebuffer\n"); |
3138 | diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
3139 | index 34bdabaecbd6..36e7859a31aa 100644 |
3140 | --- a/drivers/virtio/virtio_balloon.c |
3141 | +++ b/drivers/virtio/virtio_balloon.c |
3142 | @@ -310,6 +310,12 @@ static int balloon(void *_vballoon) |
3143 | else if (diff < 0) |
3144 | leak_balloon(vb, -diff); |
3145 | update_balloon_size(vb); |
3146 | + |
3147 | + /* |
3148 | + * For large balloon changes, we could spend a lot of time |
3149 | + * and always have work to do. Be nice if preempt disabled. |
3150 | + */ |
3151 | + cond_resched(); |
3152 | } |
3153 | return 0; |
3154 | } |
3155 | diff --git a/fs/aio.c b/fs/aio.c |
3156 | index 062a5f6a1448..12a3de0ee6da 100644 |
3157 | --- a/fs/aio.c |
3158 | +++ b/fs/aio.c |
3159 | @@ -52,7 +52,8 @@ |
3160 | struct aio_ring { |
3161 | unsigned id; /* kernel internal index number */ |
3162 | unsigned nr; /* number of io_events */ |
3163 | - unsigned head; |
3164 | + unsigned head; /* Written to by userland or under ring_lock |
3165 | + * mutex by aio_read_events_ring(). */ |
3166 | unsigned tail; |
3167 | |
3168 | unsigned magic; |
3169 | @@ -243,6 +244,11 @@ static void aio_free_ring(struct kioctx *ctx) |
3170 | { |
3171 | int i; |
3172 | |
3173 | + /* Disconnect the kiotx from the ring file. This prevents future |
3174 | + * accesses to the kioctx from page migration. |
3175 | + */ |
3176 | + put_aio_ring_file(ctx); |
3177 | + |
3178 | for (i = 0; i < ctx->nr_pages; i++) { |
3179 | struct page *page; |
3180 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
3181 | @@ -254,8 +260,6 @@ static void aio_free_ring(struct kioctx *ctx) |
3182 | put_page(page); |
3183 | } |
3184 | |
3185 | - put_aio_ring_file(ctx); |
3186 | - |
3187 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
3188 | kfree(ctx->ring_pages); |
3189 | ctx->ring_pages = NULL; |
3190 | @@ -283,29 +287,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, |
3191 | { |
3192 | struct kioctx *ctx; |
3193 | unsigned long flags; |
3194 | + pgoff_t idx; |
3195 | int rc; |
3196 | |
3197 | rc = 0; |
3198 | |
3199 | - /* Make sure the old page hasn't already been changed */ |
3200 | + /* mapping->private_lock here protects against the kioctx teardown. */ |
3201 | spin_lock(&mapping->private_lock); |
3202 | ctx = mapping->private_data; |
3203 | - if (ctx) { |
3204 | - pgoff_t idx; |
3205 | - spin_lock_irqsave(&ctx->completion_lock, flags); |
3206 | - idx = old->index; |
3207 | - if (idx < (pgoff_t)ctx->nr_pages) { |
3208 | - if (ctx->ring_pages[idx] != old) |
3209 | - rc = -EAGAIN; |
3210 | - } else |
3211 | - rc = -EINVAL; |
3212 | - spin_unlock_irqrestore(&ctx->completion_lock, flags); |
3213 | + if (!ctx) { |
3214 | + rc = -EINVAL; |
3215 | + goto out; |
3216 | + } |
3217 | + |
3218 | + /* The ring_lock mutex. The prevents aio_read_events() from writing |
3219 | + * to the ring's head, and prevents page migration from mucking in |
3220 | + * a partially initialized kiotx. |
3221 | + */ |
3222 | + if (!mutex_trylock(&ctx->ring_lock)) { |
3223 | + rc = -EAGAIN; |
3224 | + goto out; |
3225 | + } |
3226 | + |
3227 | + idx = old->index; |
3228 | + if (idx < (pgoff_t)ctx->nr_pages) { |
3229 | + /* Make sure the old page hasn't already been changed */ |
3230 | + if (ctx->ring_pages[idx] != old) |
3231 | + rc = -EAGAIN; |
3232 | } else |
3233 | rc = -EINVAL; |
3234 | - spin_unlock(&mapping->private_lock); |
3235 | |
3236 | if (rc != 0) |
3237 | - return rc; |
3238 | + goto out_unlock; |
3239 | |
3240 | /* Writeback must be complete */ |
3241 | BUG_ON(PageWriteback(old)); |
3242 | @@ -314,38 +327,26 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, |
3243 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
3244 | if (rc != MIGRATEPAGE_SUCCESS) { |
3245 | put_page(new); |
3246 | - return rc; |
3247 | + goto out_unlock; |
3248 | } |
3249 | |
3250 | - /* We can potentially race against kioctx teardown here. Use the |
3251 | - * address_space's private data lock to protect the mapping's |
3252 | - * private_data. |
3253 | + /* Take completion_lock to prevent other writes to the ring buffer |
3254 | + * while the old page is copied to the new. This prevents new |
3255 | + * events from being lost. |
3256 | */ |
3257 | - spin_lock(&mapping->private_lock); |
3258 | - ctx = mapping->private_data; |
3259 | - if (ctx) { |
3260 | - pgoff_t idx; |
3261 | - spin_lock_irqsave(&ctx->completion_lock, flags); |
3262 | - migrate_page_copy(new, old); |
3263 | - idx = old->index; |
3264 | - if (idx < (pgoff_t)ctx->nr_pages) { |
3265 | - /* And only do the move if things haven't changed */ |
3266 | - if (ctx->ring_pages[idx] == old) |
3267 | - ctx->ring_pages[idx] = new; |
3268 | - else |
3269 | - rc = -EAGAIN; |
3270 | - } else |
3271 | - rc = -EINVAL; |
3272 | - spin_unlock_irqrestore(&ctx->completion_lock, flags); |
3273 | - } else |
3274 | - rc = -EBUSY; |
3275 | - spin_unlock(&mapping->private_lock); |
3276 | + spin_lock_irqsave(&ctx->completion_lock, flags); |
3277 | + migrate_page_copy(new, old); |
3278 | + BUG_ON(ctx->ring_pages[idx] != old); |
3279 | + ctx->ring_pages[idx] = new; |
3280 | + spin_unlock_irqrestore(&ctx->completion_lock, flags); |
3281 | |
3282 | - if (rc == MIGRATEPAGE_SUCCESS) |
3283 | - put_page(old); |
3284 | - else |
3285 | - put_page(new); |
3286 | + /* The old page is no longer accessible. */ |
3287 | + put_page(old); |
3288 | |
3289 | +out_unlock: |
3290 | + mutex_unlock(&ctx->ring_lock); |
3291 | +out: |
3292 | + spin_unlock(&mapping->private_lock); |
3293 | return rc; |
3294 | } |
3295 | #endif |
3296 | @@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx) |
3297 | file = aio_private_file(ctx, nr_pages); |
3298 | if (IS_ERR(file)) { |
3299 | ctx->aio_ring_file = NULL; |
3300 | - return -EAGAIN; |
3301 | + return -ENOMEM; |
3302 | } |
3303 | |
3304 | ctx->aio_ring_file = file; |
3305 | @@ -415,7 +416,7 @@ static int aio_setup_ring(struct kioctx *ctx) |
3306 | |
3307 | if (unlikely(i != nr_pages)) { |
3308 | aio_free_ring(ctx); |
3309 | - return -EAGAIN; |
3310 | + return -ENOMEM; |
3311 | } |
3312 | |
3313 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
3314 | @@ -429,7 +430,7 @@ static int aio_setup_ring(struct kioctx *ctx) |
3315 | if (IS_ERR((void *)ctx->mmap_base)) { |
3316 | ctx->mmap_size = 0; |
3317 | aio_free_ring(ctx); |
3318 | - return -EAGAIN; |
3319 | + return -ENOMEM; |
3320 | } |
3321 | |
3322 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
3323 | @@ -556,6 +557,10 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
3324 | rcu_read_unlock(); |
3325 | spin_unlock(&mm->ioctx_lock); |
3326 | |
3327 | + /* While kioctx setup is in progress, |
3328 | + * we are protected from page migration |
3329 | + * changes ring_pages by ->ring_lock. |
3330 | + */ |
3331 | ring = kmap_atomic(ctx->ring_pages[0]); |
3332 | ring->id = ctx->id; |
3333 | kunmap_atomic(ring); |
3334 | @@ -640,24 +645,28 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) |
3335 | |
3336 | ctx->max_reqs = nr_events; |
3337 | |
3338 | - if (percpu_ref_init(&ctx->users, free_ioctx_users)) |
3339 | - goto err; |
3340 | - |
3341 | - if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs)) |
3342 | - goto err; |
3343 | - |
3344 | spin_lock_init(&ctx->ctx_lock); |
3345 | spin_lock_init(&ctx->completion_lock); |
3346 | mutex_init(&ctx->ring_lock); |
3347 | + /* Protect against page migration throughout kiotx setup by keeping |
3348 | + * the ring_lock mutex held until setup is complete. */ |
3349 | + mutex_lock(&ctx->ring_lock); |
3350 | init_waitqueue_head(&ctx->wait); |
3351 | |
3352 | INIT_LIST_HEAD(&ctx->active_reqs); |
3353 | |
3354 | + if (percpu_ref_init(&ctx->users, free_ioctx_users)) |
3355 | + goto err; |
3356 | + |
3357 | + if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs)) |
3358 | + goto err; |
3359 | + |
3360 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
3361 | if (!ctx->cpu) |
3362 | goto err; |
3363 | |
3364 | - if (aio_setup_ring(ctx) < 0) |
3365 | + err = aio_setup_ring(ctx); |
3366 | + if (err < 0) |
3367 | goto err; |
3368 | |
3369 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
3370 | @@ -683,6 +692,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) |
3371 | if (err) |
3372 | goto err_cleanup; |
3373 | |
3374 | + /* Release the ring_lock mutex now that all setup is complete. */ |
3375 | + mutex_unlock(&ctx->ring_lock); |
3376 | + |
3377 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
3378 | ctx, ctx->user_id, mm, ctx->nr_events); |
3379 | return ctx; |
3380 | @@ -692,6 +704,7 @@ err_cleanup: |
3381 | err_ctx: |
3382 | aio_free_ring(ctx); |
3383 | err: |
3384 | + mutex_unlock(&ctx->ring_lock); |
3385 | free_percpu(ctx->cpu); |
3386 | free_percpu(ctx->reqs.pcpu_count); |
3387 | free_percpu(ctx->users.pcpu_count); |
3388 | @@ -1024,6 +1037,7 @@ static long aio_read_events_ring(struct kioctx *ctx, |
3389 | |
3390 | mutex_lock(&ctx->ring_lock); |
3391 | |
3392 | + /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
3393 | ring = kmap_atomic(ctx->ring_pages[0]); |
3394 | head = ring->head; |
3395 | tail = ring->tail; |
3396 | diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c |
3397 | index 10d6c41aecad..6bf06a07f3e0 100644 |
3398 | --- a/fs/lockd/svc.c |
3399 | +++ b/fs/lockd/svc.c |
3400 | @@ -235,6 +235,7 @@ out_err: |
3401 | if (warned++ == 0) |
3402 | printk(KERN_WARNING |
3403 | "lockd_up: makesock failed, error=%d\n", err); |
3404 | + svc_shutdown_net(serv, net); |
3405 | return err; |
3406 | } |
3407 | |
3408 | diff --git a/fs/locks.c b/fs/locks.c |
3409 | index 92a0f0a52b06..4dd39b98a6a3 100644 |
3410 | --- a/fs/locks.c |
3411 | +++ b/fs/locks.c |
3412 | @@ -1376,11 +1376,10 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) |
3413 | |
3414 | restart: |
3415 | break_time = flock->fl_break_time; |
3416 | - if (break_time != 0) { |
3417 | + if (break_time != 0) |
3418 | break_time -= jiffies; |
3419 | - if (break_time == 0) |
3420 | - break_time++; |
3421 | - } |
3422 | + if (break_time == 0) |
3423 | + break_time++; |
3424 | locks_insert_block(flock, new_fl); |
3425 | spin_unlock(&inode->i_lock); |
3426 | error = wait_event_interruptible_timeout(new_fl->fl_wait, |
3427 | diff --git a/include/linux/libata.h b/include/linux/libata.h |
3428 | index bec6dbe939a0..3fee55e73e5e 100644 |
3429 | --- a/include/linux/libata.h |
3430 | +++ b/include/linux/libata.h |
3431 | @@ -822,6 +822,7 @@ struct ata_port { |
3432 | unsigned long qc_allocated; |
3433 | unsigned int qc_active; |
3434 | int nr_active_links; /* #links with active qcs */ |
3435 | + unsigned int last_tag; /* track next tag hw expects */ |
3436 | |
3437 | struct ata_link link; /* host default link */ |
3438 | struct ata_link *slave_link; /* see ata_slave_link_init() */ |
3439 | diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c |
3440 | index 8280a5dd1727..7dd33577b905 100644 |
3441 | --- a/lib/percpu_counter.c |
3442 | +++ b/lib/percpu_counter.c |
3443 | @@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, |
3444 | struct percpu_counter *fbc; |
3445 | |
3446 | compute_batch_value(); |
3447 | - if (action != CPU_DEAD) |
3448 | + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) |
3449 | return NOTIFY_OK; |
3450 | |
3451 | cpu = (unsigned long)hcpu; |
3452 | diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c |
3453 | index b703790b4e44..074bb2a5e675 100644 |
3454 | --- a/net/ceph/crush/mapper.c |
3455 | +++ b/net/ceph/crush/mapper.c |
3456 | @@ -292,8 +292,8 @@ static int is_out(const struct crush_map *map, |
3457 | * @outpos: our position in that vector |
3458 | * @tries: number of attempts to make |
3459 | * @recurse_tries: number of attempts to have recursive chooseleaf make |
3460 | - * @local_tries: localized retries |
3461 | - * @local_fallback_tries: localized fallback retries |
3462 | + * @local_retries: localized retries |
3463 | + * @local_fallback_retries: localized fallback retries |
3464 | * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose) |
3465 | * @out2: second output vector for leaf items (if @recurse_to_leaf) |
3466 | */ |
3467 | @@ -304,8 +304,8 @@ static int crush_choose_firstn(const struct crush_map *map, |
3468 | int *out, int outpos, |
3469 | unsigned int tries, |
3470 | unsigned int recurse_tries, |
3471 | - unsigned int local_tries, |
3472 | - unsigned int local_fallback_tries, |
3473 | + unsigned int local_retries, |
3474 | + unsigned int local_fallback_retries, |
3475 | int recurse_to_leaf, |
3476 | int *out2) |
3477 | { |
3478 | @@ -344,9 +344,9 @@ static int crush_choose_firstn(const struct crush_map *map, |
3479 | reject = 1; |
3480 | goto reject; |
3481 | } |
3482 | - if (local_fallback_tries > 0 && |
3483 | + if (local_fallback_retries > 0 && |
3484 | flocal >= (in->size>>1) && |
3485 | - flocal > local_fallback_tries) |
3486 | + flocal > local_fallback_retries) |
3487 | item = bucket_perm_choose(in, x, r); |
3488 | else |
3489 | item = crush_bucket_choose(in, x, r); |
3490 | @@ -393,8 +393,8 @@ static int crush_choose_firstn(const struct crush_map *map, |
3491 | x, outpos+1, 0, |
3492 | out2, outpos, |
3493 | recurse_tries, 0, |
3494 | - local_tries, |
3495 | - local_fallback_tries, |
3496 | + local_retries, |
3497 | + local_fallback_retries, |
3498 | 0, |
3499 | NULL) <= outpos) |
3500 | /* didn't get leaf */ |
3501 | @@ -420,14 +420,14 @@ reject: |
3502 | ftotal++; |
3503 | flocal++; |
3504 | |
3505 | - if (collide && flocal <= local_tries) |
3506 | + if (collide && flocal <= local_retries) |
3507 | /* retry locally a few times */ |
3508 | retry_bucket = 1; |
3509 | - else if (local_fallback_tries > 0 && |
3510 | - flocal <= in->size + local_fallback_tries) |
3511 | + else if (local_fallback_retries > 0 && |
3512 | + flocal <= in->size + local_fallback_retries) |
3513 | /* exhaustive bucket search */ |
3514 | retry_bucket = 1; |
3515 | - else if (ftotal <= tries) |
3516 | + else if (ftotal < tries) |
3517 | /* then retry descent */ |
3518 | retry_descent = 1; |
3519 | else |
3520 | @@ -640,10 +640,18 @@ int crush_do_rule(const struct crush_map *map, |
3521 | __u32 step; |
3522 | int i, j; |
3523 | int numrep; |
3524 | - int choose_tries = map->choose_total_tries; |
3525 | - int choose_local_tries = map->choose_local_tries; |
3526 | - int choose_local_fallback_tries = map->choose_local_fallback_tries; |
3527 | + /* |
3528 | + * the original choose_total_tries value was off by one (it |
3529 | + * counted "retries" and not "tries"). add one. |
3530 | + */ |
3531 | + int choose_tries = map->choose_total_tries + 1; |
3532 | int choose_leaf_tries = 0; |
3533 | + /* |
3534 | + * the local tries values were counted as "retries", though, |
3535 | + * and need no adjustment |
3536 | + */ |
3537 | + int choose_local_retries = map->choose_local_tries; |
3538 | + int choose_local_fallback_retries = map->choose_local_fallback_tries; |
3539 | |
3540 | if ((__u32)ruleno >= map->max_rules) { |
3541 | dprintk(" bad ruleno %d\n", ruleno); |
3542 | @@ -677,12 +685,12 @@ int crush_do_rule(const struct crush_map *map, |
3543 | |
3544 | case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES: |
3545 | if (curstep->arg1 > 0) |
3546 | - choose_local_tries = curstep->arg1; |
3547 | + choose_local_retries = curstep->arg1; |
3548 | break; |
3549 | |
3550 | case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES: |
3551 | if (curstep->arg1 > 0) |
3552 | - choose_local_fallback_tries = curstep->arg1; |
3553 | + choose_local_fallback_retries = curstep->arg1; |
3554 | break; |
3555 | |
3556 | case CRUSH_RULE_CHOOSELEAF_FIRSTN: |
3557 | @@ -734,8 +742,8 @@ int crush_do_rule(const struct crush_map *map, |
3558 | o+osize, j, |
3559 | choose_tries, |
3560 | recurse_tries, |
3561 | - choose_local_tries, |
3562 | - choose_local_fallback_tries, |
3563 | + choose_local_retries, |
3564 | + choose_local_fallback_retries, |
3565 | recurse_to_leaf, |
3566 | c+osize); |
3567 | } else { |
3568 | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h |
3569 | index 5e44e3179e02..6bd498470138 100644 |
3570 | --- a/net/mac80211/ieee80211_i.h |
3571 | +++ b/net/mac80211/ieee80211_i.h |
3572 | @@ -1385,6 +1385,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); |
3573 | void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); |
3574 | void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, |
3575 | __le16 fc, bool acked); |
3576 | +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata); |
3577 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); |
3578 | |
3579 | /* IBSS code */ |
3580 | diff --git a/net/mac80211/main.c b/net/mac80211/main.c |
3581 | index d767cfb9b45f..c7a7a86afdb8 100644 |
3582 | --- a/net/mac80211/main.c |
3583 | +++ b/net/mac80211/main.c |
3584 | @@ -148,6 +148,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) |
3585 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3586 | if (!rcu_access_pointer(sdata->vif.chanctx_conf)) |
3587 | continue; |
3588 | + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
3589 | + continue; |
3590 | power = min(power, sdata->vif.bss_conf.txpower); |
3591 | } |
3592 | rcu_read_unlock(); |
3593 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
3594 | index 245dce969b31..47059ac44e24 100644 |
3595 | --- a/net/mac80211/mlme.c |
3596 | +++ b/net/mac80211/mlme.c |
3597 | @@ -2708,8 +2708,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, |
3598 | bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, |
3599 | channel); |
3600 | if (bss) { |
3601 | - ieee80211_rx_bss_put(local, bss); |
3602 | sdata->vif.bss_conf.beacon_rate = bss->beacon_rate; |
3603 | + ieee80211_rx_bss_put(local, bss); |
3604 | } |
3605 | } |
3606 | |
3607 | @@ -3504,6 +3504,32 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) |
3608 | } |
3609 | |
3610 | #ifdef CONFIG_PM |
3611 | +void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) |
3612 | +{ |
3613 | + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
3614 | + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
3615 | + |
3616 | + sdata_lock(sdata); |
3617 | + |
3618 | + if (ifmgd->auth_data) { |
3619 | + /* |
3620 | + * If we are trying to authenticate while suspending, cfg80211 |
3621 | + * won't know and won't actually abort those attempts, thus we |
3622 | + * need to do that ourselves. |
3623 | + */ |
3624 | + ieee80211_send_deauth_disassoc(sdata, |
3625 | + ifmgd->auth_data->bss->bssid, |
3626 | + IEEE80211_STYPE_DEAUTH, |
3627 | + WLAN_REASON_DEAUTH_LEAVING, |
3628 | + false, frame_buf); |
3629 | + ieee80211_destroy_auth_data(sdata, false); |
3630 | + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
3631 | + IEEE80211_DEAUTH_FRAME_LEN); |
3632 | + } |
3633 | + |
3634 | + sdata_unlock(sdata); |
3635 | +} |
3636 | + |
3637 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) |
3638 | { |
3639 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
3640 | diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c |
3641 | index 0c2a29484c07..6fb38558a5e6 100644 |
3642 | --- a/net/mac80211/offchannel.c |
3643 | +++ b/net/mac80211/offchannel.c |
3644 | @@ -355,6 +355,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) |
3645 | struct ieee80211_roc_work *dep; |
3646 | |
3647 | /* start this ROC */ |
3648 | + ieee80211_offchannel_stop_vifs(local); |
3649 | |
3650 | /* switch channel etc */ |
3651 | ieee80211_recalc_idle(local); |
3652 | diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c |
3653 | index af64fb8e8add..d478b880a0af 100644 |
3654 | --- a/net/mac80211/pm.c |
3655 | +++ b/net/mac80211/pm.c |
3656 | @@ -100,10 +100,18 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) |
3657 | |
3658 | /* remove all interfaces that were created in the driver */ |
3659 | list_for_each_entry(sdata, &local->interfaces, list) { |
3660 | - if (!ieee80211_sdata_running(sdata) || |
3661 | - sdata->vif.type == NL80211_IFTYPE_AP_VLAN || |
3662 | - sdata->vif.type == NL80211_IFTYPE_MONITOR) |
3663 | + if (!ieee80211_sdata_running(sdata)) |
3664 | continue; |
3665 | + switch (sdata->vif.type) { |
3666 | + case NL80211_IFTYPE_AP_VLAN: |
3667 | + case NL80211_IFTYPE_MONITOR: |
3668 | + continue; |
3669 | + case NL80211_IFTYPE_STATION: |
3670 | + ieee80211_mgd_quiesce(sdata); |
3671 | + break; |
3672 | + default: |
3673 | + break; |
3674 | + } |
3675 | |
3676 | drv_remove_interface(local, sdata); |
3677 | } |
3678 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
3679 | index 4080c615636f..c14c16a6d62d 100644 |
3680 | --- a/net/mac80211/tx.c |
3681 | +++ b/net/mac80211/tx.c |
3682 | @@ -2909,7 +2909,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, |
3683 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
3684 | } |
3685 | |
3686 | - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
3687 | + if (sdata->vif.type == NL80211_IFTYPE_AP) |
3688 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); |
3689 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) |
3690 | break; |
3691 | diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c |
3692 | index 0edada973434..3ea5cda787c7 100644 |
3693 | --- a/net/sunrpc/clnt.c |
3694 | +++ b/net/sunrpc/clnt.c |
3695 | @@ -1798,21 +1798,19 @@ call_connect_status(struct rpc_task *task) |
3696 | trace_rpc_connect_status(task, status); |
3697 | task->tk_status = 0; |
3698 | switch (status) { |
3699 | - /* if soft mounted, test if we've timed out */ |
3700 | - case -ETIMEDOUT: |
3701 | - task->tk_action = call_timeout; |
3702 | - return; |
3703 | case -ECONNREFUSED: |
3704 | case -ECONNRESET: |
3705 | case -ECONNABORTED: |
3706 | case -ENETUNREACH: |
3707 | case -EHOSTUNREACH: |
3708 | - /* retry with existing socket, after a delay */ |
3709 | - rpc_delay(task, 3*HZ); |
3710 | if (RPC_IS_SOFTCONN(task)) |
3711 | break; |
3712 | + /* retry with existing socket, after a delay */ |
3713 | + rpc_delay(task, 3*HZ); |
3714 | case -EAGAIN: |
3715 | - task->tk_action = call_bind; |
3716 | + /* Check for timeouts before looping back to call_bind */ |
3717 | + case -ETIMEDOUT: |
3718 | + task->tk_action = call_timeout; |
3719 | return; |
3720 | case 0: |
3721 | clnt->cl_stats->netreconn++; |
3722 | diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c |
3723 | index b9dc6acbba8c..910223782c5c 100644 |
3724 | --- a/sound/soc/soc-dapm.c |
3725 | +++ b/sound/soc/soc-dapm.c |
3726 | @@ -251,7 +251,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, |
3727 | static void dapm_kcontrol_free(struct snd_kcontrol *kctl) |
3728 | { |
3729 | struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); |
3730 | - kfree(data->widget); |
3731 | kfree(data->wlist); |
3732 | kfree(data); |
3733 | } |
3734 | diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c |
3735 | index bdb71a26ae35..00ea679b3826 100644 |
3736 | --- a/tools/virtio/virtio_test.c |
3737 | +++ b/tools/virtio/virtio_test.c |
3738 | @@ -172,7 +172,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, |
3739 | GFP_ATOMIC); |
3740 | if (likely(r == 0)) { |
3741 | ++started; |
3742 | - if (unlikely(!virtqueue_kick(vq->vq)) |
3743 | + if (unlikely(!virtqueue_kick(vq->vq))) |
3744 | r = -1; |
3745 | } |
3746 | } else |
3747 | diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c |
3748 | index 8ca405cd7c1a..26954a7d9b03 100644 |
3749 | --- a/virt/kvm/arm/vgic.c |
3750 | +++ b/virt/kvm/arm/vgic.c |
3751 | @@ -916,6 +916,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) |
3752 | case 0: |
3753 | if (!target_cpus) |
3754 | return; |
3755 | + break; |
3756 | |
3757 | case 1: |
3758 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; |
3759 | diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c |
3760 | index 8631d9c14320..f2c80d5451c3 100644 |
3761 | --- a/virt/kvm/async_pf.c |
3762 | +++ b/virt/kvm/async_pf.c |
3763 | @@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) |
3764 | if (waitqueue_active(&vcpu->wq)) |
3765 | wake_up_interruptible(&vcpu->wq); |
3766 | |
3767 | - mmdrop(mm); |
3768 | + mmput(mm); |
3769 | kvm_put_kvm(vcpu->kvm); |
3770 | } |
3771 | |
3772 | @@ -98,7 +98,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) |
3773 | typeof(*work), queue); |
3774 | list_del(&work->queue); |
3775 | if (cancel_work_sync(&work->work)) { |
3776 | - mmdrop(work->mm); |
3777 | + mmput(work->mm); |
3778 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ |
3779 | kmem_cache_free(async_pf_cache, work); |
3780 | } |
3781 | @@ -162,7 +162,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
3782 | work->addr = gfn_to_hva(vcpu->kvm, gfn); |
3783 | work->arch = *arch; |
3784 | work->mm = current->mm; |
3785 | - atomic_inc(&work->mm->mm_count); |
3786 | + atomic_inc(&work->mm->mm_users); |
3787 | kvm_get_kvm(work->vcpu->kvm); |
3788 | |
3789 | /* this can't really happen otherwise gfn_to_pfn_async |
3790 | @@ -180,7 +180,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
3791 | return 1; |
3792 | retry_sync: |
3793 | kvm_put_kvm(work->vcpu->kvm); |
3794 | - mmdrop(work->mm); |
3795 | + mmput(work->mm); |
3796 | kmem_cache_free(async_pf_cache, work); |
3797 | return 0; |
3798 | } |
3799 | diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c |
3800 | index ce9ed99ad7dc..8c805a071507 100644 |
3801 | --- a/virt/kvm/ioapic.c |
3802 | +++ b/virt/kvm/ioapic.c |
3803 | @@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) |
3804 | BUG_ON(ioapic->rtc_status.pending_eoi != 0); |
3805 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, |
3806 | ioapic->rtc_status.dest_map); |
3807 | - ioapic->rtc_status.pending_eoi = ret; |
3808 | + ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); |
3809 | } else |
3810 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); |
3811 |