Magellan Linux

Contents of /trunk/kernel26-magellan-server/patches-2.6.31-r3/0100-2.6.31.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 946 - (show annotations) (download)
Thu Dec 10 13:02:09 2009 UTC (14 years, 4 months ago) by niro
File size: 83954 byte(s)
-2.6.31-magellan-r3: updated to linux-2.6.31.7

1 diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
2 index a34954d..73cae57 100644
3 --- a/arch/arm/mm/highmem.c
4 +++ b/arch/arm/mm/highmem.c
5 @@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
6 {
7 unsigned int idx;
8 unsigned long vaddr;
9 + void *kmap;
10
11 pagefault_disable();
12 if (!PageHighMem(page))
13 return page_address(page);
14
15 + kmap = kmap_high_get(page);
16 + if (kmap)
17 + return kmap;
18 +
19 idx = type + KM_TYPE_NR * smp_processor_id();
20 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21 #ifdef CONFIG_DEBUG_HIGHMEM
22 @@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
23 #else
24 (void) idx; /* to kill a warning */
25 #endif
26 + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
27 + /* this address was obtained through kmap_high_get() */
28 + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
29 }
30 pagefault_enable();
31 }
32 diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
33 index 98c104a..edab67e 100644
34 --- a/arch/powerpc/include/asm/mmu-hash64.h
35 +++ b/arch/powerpc/include/asm/mmu-hash64.h
36 @@ -41,6 +41,7 @@ extern char initial_stab[];
37
38 #define SLB_NUM_BOLTED 3
39 #define SLB_CACHE_ENTRIES 8
40 +#define SLB_MIN_SIZE 32
41
42 /* Bits in the SLB ESID word */
43 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
44 @@ -296,6 +297,7 @@ extern void slb_flush_and_rebolt(void);
45 extern void stab_initialize(unsigned long stab);
46
47 extern void slb_vmalloc_update(void);
48 +extern void slb_set_size(u16 size);
49 #endif /* __ASSEMBLY__ */
50
51 /*
52 diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
53 index d6a616a..ccc68b5 100644
54 --- a/arch/powerpc/include/asm/pmc.h
55 +++ b/arch/powerpc/include/asm/pmc.h
56 @@ -27,10 +27,22 @@ extern perf_irq_t perf_irq;
57
58 int reserve_pmc_hardware(perf_irq_t new_perf_irq);
59 void release_pmc_hardware(void);
60 +void ppc_enable_pmcs(void);
61
62 #ifdef CONFIG_PPC64
63 -void power4_enable_pmcs(void);
64 -void pasemi_enable_pmcs(void);
65 +#include <asm/lppaca.h>
66 +
67 +static inline void ppc_set_pmu_inuse(int inuse)
68 +{
69 + get_lppaca()->pmcregs_in_use = inuse;
70 +}
71 +
72 +extern void power4_enable_pmcs(void);
73 +
74 +#else /* CONFIG_PPC64 */
75 +
76 +static inline void ppc_set_pmu_inuse(int inuse) { }
77 +
78 #endif
79
80 #endif /* __KERNEL__ */
81 diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
82 index 2419cc7..ed0ac4e 100644
83 --- a/arch/powerpc/kernel/lparcfg.c
84 +++ b/arch/powerpc/kernel/lparcfg.c
85 @@ -35,6 +35,7 @@
86 #include <asm/prom.h>
87 #include <asm/vdso_datapage.h>
88 #include <asm/vio.h>
89 +#include <asm/mmu.h>
90
91 #define MODULE_VERS "1.8"
92 #define MODULE_NAME "lparcfg"
93 @@ -537,6 +538,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
94
95 seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
96
97 + seq_printf(m, "slb_size=%d\n", mmu_slb_size);
98 +
99 return 0;
100 }
101
102 diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
103 index 70e1f57..7ceefaf 100644
104 --- a/arch/powerpc/kernel/perf_counter.c
105 +++ b/arch/powerpc/kernel/perf_counter.c
106 @@ -32,6 +32,9 @@ struct cpu_hw_counters {
107 unsigned long mmcr[3];
108 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
109 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
110 + u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
111 + unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
112 + unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
113 };
114 DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
115
116 @@ -62,7 +65,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
117 {
118 return 0;
119 }
120 -static inline void perf_set_pmu_inuse(int inuse) { }
121 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
122 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
123 {
124 @@ -93,11 +95,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
125 return 0;
126 }
127
128 -static inline void perf_set_pmu_inuse(int inuse)
129 -{
130 - get_lppaca()->pmcregs_in_use = inuse;
131 -}
132 -
133 /*
134 * The user wants a data address recorded.
135 * If we're not doing instruction sampling, give them the SDAR
136 @@ -245,13 +242,11 @@ static void write_pmc(int idx, unsigned long val)
137 * and see if any combination of alternative codes is feasible.
138 * The feasible set is returned in event[].
139 */
140 -static int power_check_constraints(u64 event[], unsigned int cflags[],
141 +static int power_check_constraints(struct cpu_hw_counters *cpuhw,
142 + u64 event[], unsigned int cflags[],
143 int n_ev)
144 {
145 unsigned long mask, value, nv;
146 - u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
147 - unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
148 - unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
149 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
150 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
151 int i, j;
152 @@ -266,21 +261,23 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
153 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
154 && !ppmu->limited_pmc_event(event[i])) {
155 ppmu->get_alternatives(event[i], cflags[i],
156 - alternatives[i]);
157 - event[i] = alternatives[i][0];
158 + cpuhw->alternatives[i]);
159 + event[i] = cpuhw->alternatives[i][0];
160 }
161 - if (ppmu->get_constraint(event[i], &amasks[i][0],
162 - &avalues[i][0]))
163 + if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
164 + &cpuhw->avalues[i][0]))
165 return -1;
166 }
167 value = mask = 0;
168 for (i = 0; i < n_ev; ++i) {
169 - nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
170 + nv = (value | cpuhw->avalues[i][0]) +
171 + (value & cpuhw->avalues[i][0] & addf);
172 if ((((nv + tadd) ^ value) & mask) != 0 ||
173 - (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
174 + (((nv + tadd) ^ cpuhw->avalues[i][0]) &
175 + cpuhw->amasks[i][0]) != 0)
176 break;
177 value = nv;
178 - mask |= amasks[i][0];
179 + mask |= cpuhw->amasks[i][0];
180 }
181 if (i == n_ev)
182 return 0; /* all OK */
183 @@ -291,10 +288,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
184 for (i = 0; i < n_ev; ++i) {
185 choice[i] = 0;
186 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
187 - alternatives[i]);
188 + cpuhw->alternatives[i]);
189 for (j = 1; j < n_alt[i]; ++j)
190 - ppmu->get_constraint(alternatives[i][j],
191 - &amasks[i][j], &avalues[i][j]);
192 + ppmu->get_constraint(cpuhw->alternatives[i][j],
193 + &cpuhw->amasks[i][j],
194 + &cpuhw->avalues[i][j]);
195 }
196
197 /* enumerate all possibilities and see if any will work */
198 @@ -313,11 +311,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
199 * where k > j, will satisfy the constraints.
200 */
201 while (++j < n_alt[i]) {
202 - nv = (value | avalues[i][j]) +
203 - (value & avalues[i][j] & addf);
204 + nv = (value | cpuhw->avalues[i][j]) +
205 + (value & cpuhw->avalues[i][j] & addf);
206 if ((((nv + tadd) ^ value) & mask) == 0 &&
207 - (((nv + tadd) ^ avalues[i][j])
208 - & amasks[i][j]) == 0)
209 + (((nv + tadd) ^ cpuhw->avalues[i][j])
210 + & cpuhw->amasks[i][j]) == 0)
211 break;
212 }
213 if (j >= n_alt[i]) {
214 @@ -339,7 +337,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
215 svalues[i] = value;
216 smasks[i] = mask;
217 value = nv;
218 - mask |= amasks[i][j];
219 + mask |= cpuhw->amasks[i][j];
220 ++i;
221 j = -1;
222 }
223 @@ -347,7 +345,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
224
225 /* OK, we have a feasible combination, tell the caller the solution */
226 for (i = 0; i < n_ev; ++i)
227 - event[i] = alternatives[i][choice[i]];
228 + event[i] = cpuhw->alternatives[i][choice[i]];
229 return 0;
230 }
231
232 @@ -531,8 +529,7 @@ void hw_perf_disable(void)
233 * Check if we ever enabled the PMU on this cpu.
234 */
235 if (!cpuhw->pmcs_enabled) {
236 - if (ppc_md.enable_pmcs)
237 - ppc_md.enable_pmcs();
238 + ppc_enable_pmcs();
239 cpuhw->pmcs_enabled = 1;
240 }
241
242 @@ -594,7 +591,7 @@ void hw_perf_enable(void)
243 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
244 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
245 if (cpuhw->n_counters == 0)
246 - perf_set_pmu_inuse(0);
247 + ppc_set_pmu_inuse(0);
248 goto out_enable;
249 }
250
251 @@ -627,7 +624,7 @@ void hw_perf_enable(void)
252 * bit set and set the hardware counters to their initial values.
253 * Then unfreeze the counters.
254 */
255 - perf_set_pmu_inuse(1);
256 + ppc_set_pmu_inuse(1);
257 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
258 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
259 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
260 @@ -752,7 +749,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
261 return -EAGAIN;
262 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
263 return -EAGAIN;
264 - i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
265 + i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
266 if (i < 0)
267 return -EAGAIN;
268 cpuhw->n_counters = n0 + n;
269 @@ -807,7 +804,7 @@ static int power_pmu_enable(struct perf_counter *counter)
270 cpuhw->flags[n0] = counter->hw.counter_base;
271 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
272 goto out;
273 - if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
274 + if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
275 goto out;
276
277 counter->hw.config = cpuhw->events[n0];
278 @@ -1012,6 +1009,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
279 unsigned int cflags[MAX_HWCOUNTERS];
280 int n;
281 int err;
282 + struct cpu_hw_counters *cpuhw;
283
284 if (!ppmu)
285 return ERR_PTR(-ENXIO);
286 @@ -1090,7 +1088,11 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
287 cflags[n] = flags;
288 if (check_excludes(ctrs, cflags, n, 1))
289 return ERR_PTR(-EINVAL);
290 - if (power_check_constraints(events, cflags, n + 1))
291 +
292 + cpuhw = &get_cpu_var(cpu_hw_counters);
293 + err = power_check_constraints(cpuhw, events, cflags, n + 1);
294 + put_cpu_var(cpu_hw_counters);
295 + if (err)
296 return ERR_PTR(-EINVAL);
297
298 counter->hw.config = events[n];
299 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
300 index c434823..bf90361 100644
301 --- a/arch/powerpc/kernel/rtas.c
302 +++ b/arch/powerpc/kernel/rtas.c
303 @@ -39,6 +39,7 @@
304 #include <asm/smp.h>
305 #include <asm/atomic.h>
306 #include <asm/time.h>
307 +#include <asm/mmu.h>
308
309 struct rtas_t rtas = {
310 .lock = __RAW_SPIN_LOCK_UNLOCKED
311 @@ -713,6 +714,7 @@ static void rtas_percpu_suspend_me(void *info)
312 {
313 long rc = H_SUCCESS;
314 unsigned long msr_save;
315 + u16 slb_size = mmu_slb_size;
316 int cpu;
317 struct rtas_suspend_me_data *data =
318 (struct rtas_suspend_me_data *)info;
319 @@ -735,13 +737,16 @@ static void rtas_percpu_suspend_me(void *info)
320 /* All other cpus are in H_JOIN, this cpu does
321 * the suspend.
322 */
323 + slb_set_size(SLB_MIN_SIZE);
324 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n",
325 smp_processor_id());
326 data->error = rtas_call(data->token, 0, 1, NULL);
327
328 - if (data->error)
329 + if (data->error) {
330 printk(KERN_DEBUG "ibm,suspend-me returned %d\n",
331 data->error);
332 + slb_set_size(slb_size);
333 + }
334 } else {
335 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
336 smp_processor_id(), rc);
337 diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
338 index f41aec8..956ab33 100644
339 --- a/arch/powerpc/kernel/sysfs.c
340 +++ b/arch/powerpc/kernel/sysfs.c
341 @@ -17,6 +17,7 @@
342 #include <asm/prom.h>
343 #include <asm/machdep.h>
344 #include <asm/smp.h>
345 +#include <asm/pmc.h>
346
347 #include "cacheinfo.h"
348
349 @@ -123,6 +124,8 @@ static DEFINE_PER_CPU(char, pmcs_enabled);
350
351 void ppc_enable_pmcs(void)
352 {
353 + ppc_set_pmu_inuse(1);
354 +
355 /* Only need to enable them once */
356 if (__get_cpu_var(pmcs_enabled))
357 return;
358 diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
359 index 5b7038f..deb6193 100644
360 --- a/arch/powerpc/mm/slb.c
361 +++ b/arch/powerpc/mm/slb.c
362 @@ -240,14 +240,22 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
363 static inline void patch_slb_encoding(unsigned int *insn_addr,
364 unsigned int immed)
365 {
366 - /* Assume the instruction had a "0" immediate value, just
367 - * "or" in the new value
368 - */
369 - *insn_addr |= immed;
370 + *insn_addr = (*insn_addr & 0xffff0000) | immed;
371 flush_icache_range((unsigned long)insn_addr, 4+
372 (unsigned long)insn_addr);
373 }
374
375 +void slb_set_size(u16 size)
376 +{
377 + extern unsigned int *slb_compare_rr_to_size;
378 +
379 + if (mmu_slb_size == size)
380 + return;
381 +
382 + mmu_slb_size = size;
383 + patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
384 +}
385 +
386 void slb_initialize(void)
387 {
388 unsigned long linear_llp, vmalloc_llp, io_llp;
389 diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
390 index b6f1b13..2e2bbe1 100644
391 --- a/arch/powerpc/platforms/pseries/reconfig.c
392 +++ b/arch/powerpc/platforms/pseries/reconfig.c
393 @@ -20,6 +20,7 @@
394 #include <asm/machdep.h>
395 #include <asm/uaccess.h>
396 #include <asm/pSeries_reconfig.h>
397 +#include <asm/mmu.h>
398
399
400
401 @@ -439,9 +440,15 @@ static int do_update_property(char *buf, size_t bufsize)
402 if (!newprop)
403 return -ENOMEM;
404
405 + if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
406 + slb_set_size(*(int *)value);
407 +
408 oldprop = of_find_property(np, name,NULL);
409 - if (!oldprop)
410 + if (!oldprop) {
411 + if (strlen(name))
412 + return prom_add_property(np, newprop);
413 return -ENODEV;
414 + }
415
416 rc = prom_update_property(np, newprop, oldprop);
417 if (rc)
418 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
419 index 8d75ea2..ca5f2e1 100644
420 --- a/arch/powerpc/platforms/pseries/setup.c
421 +++ b/arch/powerpc/platforms/pseries/setup.c
422 @@ -223,10 +223,6 @@ static void pseries_lpar_enable_pmcs(void)
423 set = 1UL << 63;
424 reset = 0;
425 plpar_hcall_norets(H_PERFMON, set, reset);
426 -
427 - /* instruct hypervisor to maintain PMCs */
428 - if (firmware_has_feature(FW_FEATURE_SPLPAR))
429 - get_lppaca()->pmcregs_in_use = 1;
430 }
431
432 static void __init pseries_discover_pic(void)
433 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
434 index eabdc1c..68d16d2 100644
435 --- a/arch/x86/include/asm/kvm_host.h
436 +++ b/arch/x86/include/asm/kvm_host.h
437 @@ -618,6 +618,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
438 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
439 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
440 u32 error_code);
441 +bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
442
443 int kvm_pic_set_irq(void *opaque, int irq, int level);
444
445 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
446 index c776826..e597ecc 100644
447 --- a/arch/x86/include/asm/processor.h
448 +++ b/arch/x86/include/asm/processor.h
449 @@ -403,7 +403,17 @@ extern unsigned long kernel_eflags;
450 extern asmlinkage void ignore_sysret(void);
451 #else /* X86_64 */
452 #ifdef CONFIG_CC_STACKPROTECTOR
453 -DECLARE_PER_CPU(unsigned long, stack_canary);
454 +/*
455 + * Make sure stack canary segment base is cached-aligned:
456 + * "For Intel Atom processors, avoid non zero segment base address
457 + * that is not aligned to cache line boundary at all cost."
458 + * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
459 + */
460 +struct stack_canary {
461 + char __pad[20]; /* canary at %gs:20 */
462 + unsigned long canary;
463 +};
464 +DECLARE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned;
465 #endif
466 #endif /* X86_64 */
467
468 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
469 index c2d742c..decad97 100644
470 --- a/arch/x86/include/asm/stackprotector.h
471 +++ b/arch/x86/include/asm/stackprotector.h
472 @@ -78,14 +78,14 @@ static __always_inline void boot_init_stack_canary(void)
473 #ifdef CONFIG_X86_64
474 percpu_write(irq_stack_union.stack_canary, canary);
475 #else
476 - percpu_write(stack_canary, canary);
477 + percpu_write(stack_canary.canary, canary);
478 #endif
479 }
480
481 static inline void setup_stack_canary_segment(int cpu)
482 {
483 #ifdef CONFIG_X86_32
484 - unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
485 + unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
486 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
487 struct desc_struct desc;
488
489 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
490 index 643c59b..5bd119b 100644
491 --- a/arch/x86/include/asm/system.h
492 +++ b/arch/x86/include/asm/system.h
493 @@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
494 "movl %P[task_canary](%[next]), %%ebx\n\t" \
495 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
496 #define __switch_canary_oparam \
497 - , [stack_canary] "=m" (per_cpu_var(stack_canary))
498 + , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
499 #define __switch_canary_iparam \
500 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
501 #else /* CC_STACKPROTECTOR */
502 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
503 index 6c99f50..4607241 100644
504 --- a/arch/x86/kernel/amd_iommu.c
505 +++ b/arch/x86/kernel/amd_iommu.c
506 @@ -485,8 +485,6 @@ void amd_iommu_flush_all_devices(void)
507 int i;
508
509 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
510 - if (amd_iommu_pd_table[i] == NULL)
511 - continue;
512
513 iommu = amd_iommu_rlookup_table[i];
514 if (!iommu)
515 diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
516 index 8952a58..89174f8 100644
517 --- a/arch/x86/kernel/apic/es7000_32.c
518 +++ b/arch/x86/kernel/apic/es7000_32.c
519 @@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void)
520 {
521 /* MPENTIUMIII */
522 if (boot_cpu_data.x86 == 6 &&
523 - (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
524 + (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11))
525 return 1;
526
527 return 0;
528 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
529 index 5ce60a8..e338b5c 100644
530 --- a/arch/x86/kernel/cpu/common.c
531 +++ b/arch/x86/kernel/cpu/common.c
532 @@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist);
533 #else /* CONFIG_X86_64 */
534
535 #ifdef CONFIG_CC_STACKPROTECTOR
536 -DEFINE_PER_CPU(unsigned long, stack_canary);
537 +DEFINE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned;
538 #endif
539
540 /* Make sure %fs and %gs are initialized properly in idle threads */
541 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
542 index cc827ac..7ffec6b 100644
543 --- a/arch/x86/kernel/head_32.S
544 +++ b/arch/x86/kernel/head_32.S
545 @@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP
546 jne 1f
547 movl $per_cpu__gdt_page,%eax
548 movl $per_cpu__stack_canary,%ecx
549 - subl $20, %ecx
550 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
551 shrl $16, %ecx
552 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
553 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
554 index c664d51..63b0ec8 100644
555 --- a/arch/x86/kernel/kvm.c
556 +++ b/arch/x86/kernel/kvm.c
557 @@ -34,7 +34,6 @@
558 struct kvm_para_state {
559 u8 mmu_queue[MMU_QUEUE_SIZE];
560 int mmu_queue_len;
561 - enum paravirt_lazy_mode mode;
562 };
563
564 static DEFINE_PER_CPU(struct kvm_para_state, para_state);
565 @@ -77,7 +76,7 @@ static void kvm_deferred_mmu_op(void *buffer, int len)
566 {
567 struct kvm_para_state *state = kvm_para_state();
568
569 - if (state->mode != PARAVIRT_LAZY_MMU) {
570 + if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
571 kvm_mmu_op(buffer, len);
572 return;
573 }
574 @@ -185,10 +184,7 @@ static void kvm_release_pt(unsigned long pfn)
575
576 static void kvm_enter_lazy_mmu(void)
577 {
578 - struct kvm_para_state *state = kvm_para_state();
579 -
580 paravirt_enter_lazy_mmu();
581 - state->mode = paravirt_get_lazy_mode();
582 }
583
584 static void kvm_leave_lazy_mmu(void)
585 @@ -197,7 +193,6 @@ static void kvm_leave_lazy_mmu(void)
586
587 mmu_queue_flush(state);
588 paravirt_leave_lazy_mmu();
589 - state->mode = paravirt_get_lazy_mode();
590 }
591
592 static void __init paravirt_ops_setup(void)
593 diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
594 index 223af43..e5efcdc 100644
595 --- a/arch/x86/kernel/kvmclock.c
596 +++ b/arch/x86/kernel/kvmclock.c
597 @@ -50,8 +50,8 @@ static unsigned long kvm_get_wallclock(void)
598 struct timespec ts;
599 int low, high;
600
601 - low = (int)__pa(&wall_clock);
602 - high = ((u64)__pa(&wall_clock) >> 32);
603 + low = (int)__pa_symbol(&wall_clock);
604 + high = ((u64)__pa_symbol(&wall_clock) >> 32);
605 native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
606
607 vcpu_time = &get_cpu_var(hv_clock);
608 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
609 index ae99d83..bb6277d 100644
610 --- a/arch/x86/kvm/lapic.c
611 +++ b/arch/x86/kvm/lapic.c
612 @@ -573,6 +573,15 @@ static void start_apic_timer(struct kvm_lapic *apic)
613
614 if (!apic->lapic_timer.period)
615 return;
616 + /*
617 + * Do not allow the guest to program periodic timers with small
618 + * interval, since the hrtimers are not throttled by the host
619 + * scheduler.
620 + */
621 + if (apic_lvtt_period(apic)) {
622 + if (apic->lapic_timer.period < NSEC_PER_MSEC/2)
623 + apic->lapic_timer.period = NSEC_PER_MSEC/2;
624 + }
625
626 hrtimer_start(&apic->lapic_timer.timer,
627 ktime_add_ns(now, apic->lapic_timer.period),
628 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
629 index 0ef5bb2..a5cdb35 100644
630 --- a/arch/x86/kvm/mmu.c
631 +++ b/arch/x86/kvm/mmu.c
632 @@ -2633,7 +2633,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
633
634 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
635 {
636 - while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
637 + while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
638 + !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
639 struct kvm_mmu_page *sp;
640
641 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
642 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
643 index 29f9129..b5fa966 100644
644 --- a/arch/x86/kvm/vmx.c
645 +++ b/arch/x86/kvm/vmx.c
646 @@ -1217,12 +1217,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
647 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
648 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
649 enabled */
650 - min &= ~(CPU_BASED_CR3_LOAD_EXITING |
651 - CPU_BASED_CR3_STORE_EXITING |
652 - CPU_BASED_INVLPG_EXITING);
653 - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
654 - &_cpu_based_exec_control) < 0)
655 - return -EIO;
656 + _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
657 + CPU_BASED_CR3_STORE_EXITING |
658 + CPU_BASED_INVLPG_EXITING);
659 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
660 vmx_capability.ept, vmx_capability.vpid);
661 }
662 @@ -2841,6 +2838,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
663 unsigned long val;
664 int dr, reg;
665
666 + if (!kvm_require_cpl(vcpu, 0))
667 + return 1;
668 dr = vmcs_readl(GUEST_DR7);
669 if (dr & DR7_GD) {
670 /*
671 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
672 index 3d45290..3d36045 100644
673 --- a/arch/x86/kvm/x86.c
674 +++ b/arch/x86/kvm/x86.c
675 @@ -215,6 +215,19 @@ static void __queue_exception(struct kvm_vcpu *vcpu)
676 }
677
678 /*
679 + * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
680 + * a #GP and return false.
681 + */
682 +bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
683 +{
684 + if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
685 + return true;
686 + kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
687 + return false;
688 +}
689 +EXPORT_SYMBOL_GPL(kvm_require_cpl);
690 +
691 +/*
692 * Load the pae pdptrs. Return true is they are all valid.
693 */
694 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
695 @@ -2898,6 +2911,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
696 a3 &= 0xFFFFFFFF;
697 }
698
699 + if (kvm_x86_ops->get_cpl(vcpu) != 0) {
700 + ret = -KVM_EPERM;
701 + goto out;
702 + }
703 +
704 switch (nr) {
705 case KVM_HC_VAPIC_POLL_IRQ:
706 ret = 0;
707 @@ -2909,6 +2927,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
708 ret = -KVM_ENOSYS;
709 break;
710 }
711 +out:
712 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
713 ++vcpu->stat.hypercalls;
714 return r;
715 diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
716 index 616de46..ef4dfca 100644
717 --- a/arch/x86/kvm/x86_emulate.c
718 +++ b/arch/x86/kvm/x86_emulate.c
719 @@ -60,6 +60,7 @@
720 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
721 #define SrcOne (7<<4) /* Implied '1' */
722 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
723 +#define SrcImmU (9<<4) /* Immediate operand, unsigned */
724 #define SrcMask (0xf<<4)
725 /* Generic ModRM decode. */
726 #define ModRM (1<<8)
727 @@ -195,7 +196,7 @@ static u32 opcode_table[256] = {
728 ByteOp | SrcImmUByte, SrcImmUByte,
729 /* 0xE8 - 0xEF */
730 SrcImm | Stack, SrcImm | ImplicitOps,
731 - SrcImm | Src2Imm16, SrcImmByte | ImplicitOps,
732 + SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps,
733 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
734 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
735 /* 0xF0 - 0xF7 */
736 @@ -1027,6 +1028,7 @@ done_prefixes:
737 c->src.type = OP_MEM;
738 break;
739 case SrcImm:
740 + case SrcImmU:
741 c->src.type = OP_IMM;
742 c->src.ptr = (unsigned long *)c->eip;
743 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
744 @@ -1044,6 +1046,19 @@ done_prefixes:
745 c->src.val = insn_fetch(s32, 4, c->eip);
746 break;
747 }
748 + if ((c->d & SrcMask) == SrcImmU) {
749 + switch (c->src.bytes) {
750 + case 1:
751 + c->src.val &= 0xff;
752 + break;
753 + case 2:
754 + c->src.val &= 0xffff;
755 + break;
756 + case 4:
757 + c->src.val &= 0xffffffff;
758 + break;
759 + }
760 + }
761 break;
762 case SrcImmByte:
763 case SrcImmUByte:
764 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
765 index 7e600c1..e245775 100644
766 --- a/arch/x86/mm/pageattr.c
767 +++ b/arch/x86/mm/pageattr.c
768 @@ -822,6 +822,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
769 {
770 struct cpa_data cpa;
771 int ret, cache, checkalias;
772 + unsigned long baddr = 0;
773
774 /*
775 * Check, if we are requested to change a not supported
776 @@ -853,6 +854,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
777 */
778 WARN_ON_ONCE(1);
779 }
780 + /*
781 + * Save address for cache flush. *addr is modified in the call
782 + * to __change_page_attr_set_clr() below.
783 + */
784 + baddr = *addr;
785 }
786
787 /* Must avoid aliasing mappings in the highmem code */
788 @@ -900,7 +906,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
789 cpa_flush_array(addr, numpages, cache,
790 cpa.flags, pages);
791 } else
792 - cpa_flush_range(*addr, numpages, cache);
793 + cpa_flush_range(baddr, numpages, cache);
794 } else
795 cpa_flush_all(cache);
796
797 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
798 index d3aa2aa..b78c9c3 100644
799 --- a/block/blk-sysfs.c
800 +++ b/block/blk-sysfs.c
801 @@ -40,7 +40,12 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
802 {
803 struct request_list *rl = &q->rq;
804 unsigned long nr;
805 - int ret = queue_var_store(&nr, page, count);
806 + int ret;
807 +
808 + if (!q->request_fn)
809 + return -EINVAL;
810 +
811 + ret = queue_var_store(&nr, page, count);
812 if (nr < BLKDEV_MIN_RQ)
813 nr = BLKDEV_MIN_RQ;
814
815 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
816 index 072ba5e..e71149b 100644
817 --- a/drivers/ata/libata-core.c
818 +++ b/drivers/ata/libata-core.c
819 @@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
820 head = tf->device & 0xf;
821 sect = tf->lbal;
822
823 - block = (cyl * dev->heads + head) * dev->sectors + sect;
824 + if (!sect) {
825 + ata_dev_printk(dev, KERN_WARNING, "device reported "
826 + "invalid CHS sector 0\n");
827 + sect = 1; /* oh well */
828 + }
829 +
830 + block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
831 }
832
833 return block;
834 diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
835 index c585577..dee0f1f 100644
836 --- a/drivers/char/agp/intel-agp.c
837 +++ b/drivers/char/agp/intel-agp.c
838 @@ -2313,15 +2313,6 @@ static int agp_intel_resume(struct pci_dev *pdev)
839 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
840 int ret_val;
841
842 - pci_restore_state(pdev);
843 -
844 - /* We should restore our graphics device's config space,
845 - * as host bridge (00:00) resumes before graphics device (02:00),
846 - * then our access to its pci space can work right.
847 - */
848 - if (intel_private.pcidev)
849 - pci_restore_state(intel_private.pcidev);
850 -
851 if (bridge->driver == &intel_generic_driver)
852 intel_configure();
853 else if (bridge->driver == &intel_850_driver)
854 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
855 index aec1931..0b73e4e 100644
856 --- a/drivers/char/tpm/tpm_tis.c
857 +++ b/drivers/char/tpm/tpm_tis.c
858 @@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
859 goto out_err;
860 }
861
862 + /* Default timeouts */
863 + chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
864 + chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
865 + chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
866 + chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
867 +
868 if (request_locality(chip, 0) != 0) {
869 rc = -ENODEV;
870 goto out_err;
871 @@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
872
873 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
874
875 - /* Default timeouts */
876 - chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
877 - chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
878 - chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
879 - chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
880 -
881 dev_info(dev,
882 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
883 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
884 diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
885 index 6e186b1..652bd33 100644
886 --- a/drivers/md/dm-log-userspace-base.c
887 +++ b/drivers/md/dm-log-userspace-base.c
888 @@ -582,7 +582,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
889 break;
890 case STATUSTYPE_TABLE:
891 sz = 0;
892 - table_args = strstr(lc->usr_argv_str, " ");
893 + table_args = strchr(lc->usr_argv_str, ' ');
894 BUG_ON(!table_args); /* There will always be a ' ' */
895 table_args++;
896
897 diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
898 index b9ceddd..4ff665c 100644
899 --- a/drivers/net/mlx4/eq.c
900 +++ b/drivers/net/mlx4/eq.c
901 @@ -526,48 +526,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
902 iounmap(priv->clr_base);
903 }
904
905 -int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
906 -{
907 - struct mlx4_priv *priv = mlx4_priv(dev);
908 - int ret;
909 -
910 - /*
911 - * We assume that mapping one page is enough for the whole EQ
912 - * context table. This is fine with all current HCAs, because
913 - * we only use 32 EQs and each EQ uses 64 bytes of context
914 - * memory, or 1 KB total.
915 - */
916 - priv->eq_table.icm_virt = icm_virt;
917 - priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
918 - if (!priv->eq_table.icm_page)
919 - return -ENOMEM;
920 - priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
921 - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
922 - if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
923 - __free_page(priv->eq_table.icm_page);
924 - return -ENOMEM;
925 - }
926 -
927 - ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
928 - if (ret) {
929 - pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
930 - PCI_DMA_BIDIRECTIONAL);
931 - __free_page(priv->eq_table.icm_page);
932 - }
933 -
934 - return ret;
935 -}
936 -
937 -void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
938 -{
939 - struct mlx4_priv *priv = mlx4_priv(dev);
940 -
941 - mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
942 - pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
943 - PCI_DMA_BIDIRECTIONAL);
944 - __free_page(priv->eq_table.icm_page);
945 -}
946 -
947 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
948 {
949 struct mlx4_priv *priv = mlx4_priv(dev);
950 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
951 index dac621b..8e8b79f 100644
952 --- a/drivers/net/mlx4/main.c
953 +++ b/drivers/net/mlx4/main.c
954 @@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
955 goto err_unmap_aux;
956 }
957
958 - err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
959 + err = mlx4_init_icm_table(dev, &priv->eq_table.table,
960 + init_hca->eqc_base, dev_cap->eqc_entry_sz,
961 + dev->caps.num_eqs, dev->caps.num_eqs,
962 + 0, 0);
963 if (err) {
964 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
965 goto err_unmap_cmpt;
966 @@ -668,7 +671,7 @@ err_unmap_mtt:
967 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
968
969 err_unmap_eq:
970 - mlx4_unmap_eq_icm(dev);
971 + mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
972
973 err_unmap_cmpt:
974 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
975 @@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
976 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
977 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
978 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
979 + mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
980 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
981 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
982 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
983 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
984 - mlx4_unmap_eq_icm(dev);
985
986 mlx4_UNMAP_ICM_AUX(dev);
987 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
988 diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
989 index 5bd79c2..bc72d6e 100644
990 --- a/drivers/net/mlx4/mlx4.h
991 +++ b/drivers/net/mlx4/mlx4.h
992 @@ -205,9 +205,7 @@ struct mlx4_eq_table {
993 void __iomem **uar_map;
994 u32 clr_mask;
995 struct mlx4_eq *eq;
996 - u64 icm_virt;
997 - struct page *icm_page;
998 - dma_addr_t icm_dma;
999 + struct mlx4_icm_table table;
1000 struct mlx4_icm_table cmpt_table;
1001 int have_irq;
1002 u8 inta_pin;
1003 @@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
1004 struct mlx4_dev_cap *dev_cap,
1005 struct mlx4_init_hca_param *init_hca);
1006
1007 -int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
1008 -void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
1009 -
1010 int mlx4_cmd_init(struct mlx4_dev *dev);
1011 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
1012 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
1013 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1014 index 029c1bc..ba6d225 100644
1015 --- a/drivers/net/wireless/ath/ath5k/base.c
1016 +++ b/drivers/net/wireless/ath/ath5k/base.c
1017 @@ -2676,7 +2676,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1018 sc->curchan = chan;
1019 sc->curband = &sc->sbands[chan->band];
1020 }
1021 - ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
1022 + ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL);
1023 if (ret) {
1024 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
1025 goto err;
1026 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1027 index 06b9656..1073137 100644
1028 --- a/drivers/pci/quirks.c
1029 +++ b/drivers/pci/quirks.c
1030 @@ -1201,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1031 switch(dev->subsystem_device) {
1032 case 0x00b8: /* Compaq Evo D510 CMT */
1033 case 0x00b9: /* Compaq Evo D510 SFF */
1034 + case 0x00ba: /* Compaq Evo D510 USDT */
1035 /* Motherboard doesn't have Host bridge
1036 * subvendor/subdevice IDs and on-board VGA
1037 * controller is disabled if an AGP card is
1038 @@ -2382,8 +2383,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
1039 }
1040
1041 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
1042 +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
1043
1044 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
1045 +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
1046
1047 static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
1048 {
1049 @@ -2492,6 +2495,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
1050 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
1051 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
1052 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
1053 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
1054
1055 #endif /* CONFIG_PCI_IOV */
1056
1057 diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
1058 index 18066d5..af0afa1 100644
1059 --- a/drivers/ps3/ps3stor_lib.c
1060 +++ b/drivers/ps3/ps3stor_lib.c
1061 @@ -23,6 +23,65 @@
1062 #include <asm/lv1call.h>
1063 #include <asm/ps3stor.h>
1064
1065 +/*
1066 + * A workaround for flash memory I/O errors when the internal hard disk
1067 + * has not been formatted for OtherOS use. Delay disk close until flash
1068 + * memory is closed.
1069 + */
1070 +
1071 +static struct ps3_flash_workaround {
1072 + int flash_open;
1073 + int disk_open;
1074 + struct ps3_system_bus_device *disk_sbd;
1075 +} ps3_flash_workaround;
1076 +
1077 +static int ps3stor_open_hv_device(struct ps3_system_bus_device *sbd)
1078 +{
1079 + int error = ps3_open_hv_device(sbd);
1080 +
1081 + if (error)
1082 + return error;
1083 +
1084 + if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH)
1085 + ps3_flash_workaround.flash_open = 1;
1086 +
1087 + if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
1088 + ps3_flash_workaround.disk_open = 1;
1089 +
1090 + return 0;
1091 +}
1092 +
1093 +static int ps3stor_close_hv_device(struct ps3_system_bus_device *sbd)
1094 +{
1095 + int error;
1096 +
1097 + if (sbd->match_id == PS3_MATCH_ID_STOR_DISK
1098 + && ps3_flash_workaround.disk_open
1099 + && ps3_flash_workaround.flash_open) {
1100 + ps3_flash_workaround.disk_sbd = sbd;
1101 + return 0;
1102 + }
1103 +
1104 + error = ps3_close_hv_device(sbd);
1105 +
1106 + if (error)
1107 + return error;
1108 +
1109 + if (sbd->match_id == PS3_MATCH_ID_STOR_DISK)
1110 + ps3_flash_workaround.disk_open = 0;
1111 +
1112 + if (sbd->match_id == PS3_MATCH_ID_STOR_FLASH) {
1113 + ps3_flash_workaround.flash_open = 0;
1114 +
1115 + if (ps3_flash_workaround.disk_sbd) {
1116 + ps3_close_hv_device(ps3_flash_workaround.disk_sbd);
1117 + ps3_flash_workaround.disk_open = 0;
1118 + ps3_flash_workaround.disk_sbd = NULL;
1119 + }
1120 + }
1121 +
1122 + return 0;
1123 +}
1124
1125 static int ps3stor_probe_access(struct ps3_storage_device *dev)
1126 {
1127 @@ -90,7 +149,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
1128 int error, res, alignment;
1129 enum ps3_dma_page_size page_size;
1130
1131 - error = ps3_open_hv_device(&dev->sbd);
1132 + error = ps3stor_open_hv_device(&dev->sbd);
1133 if (error) {
1134 dev_err(&dev->sbd.core,
1135 "%s:%u: ps3_open_hv_device failed %d\n", __func__,
1136 @@ -166,7 +225,7 @@ fail_free_irq:
1137 fail_sb_event_receive_port_destroy:
1138 ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
1139 fail_close_device:
1140 - ps3_close_hv_device(&dev->sbd);
1141 + ps3stor_close_hv_device(&dev->sbd);
1142 fail:
1143 return error;
1144 }
1145 @@ -193,7 +252,7 @@ void ps3stor_teardown(struct ps3_storage_device *dev)
1146 "%s:%u: destroy event receive port failed %d\n",
1147 __func__, __LINE__, error);
1148
1149 - error = ps3_close_hv_device(&dev->sbd);
1150 + error = ps3stor_close_hv_device(&dev->sbd);
1151 if (error)
1152 dev_err(&dev->sbd.core,
1153 "%s:%u: ps3_close_hv_device failed %d\n", __func__,
1154 diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
1155 index 2742ae8..9ad38e8 100644
1156 --- a/drivers/scsi/libsrp.c
1157 +++ b/drivers/scsi/libsrp.c
1158 @@ -124,6 +124,7 @@ static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
1159 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
1160 kfree(ring[i]);
1161 }
1162 + kfree(ring);
1163 }
1164
1165 int srp_target_alloc(struct srp_target *target, struct device *dev,
1166 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
1167 index 35a1386..2e4bc3d 100644
1168 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
1169 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
1170 @@ -94,7 +94,7 @@ _base_fault_reset_work(struct work_struct *work)
1171 int rc;
1172
1173 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1174 - if (ioc->ioc_reset_in_progress)
1175 + if (ioc->shost_recovery)
1176 goto rearm_timer;
1177 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1178
1179 @@ -1542,6 +1542,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1180 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
1181 ioc->bios_pg3.BiosVersion & 0x000000FF);
1182
1183 + _base_display_dell_branding(ioc);
1184 +
1185 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
1186
1187 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
1188 @@ -1554,8 +1556,6 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1189 i++;
1190 }
1191
1192 - _base_display_dell_branding(ioc);
1193 -
1194 i = 0;
1195 printk("), ");
1196 printk("Capabilities=(");
1197 @@ -1627,6 +1627,9 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1198 u32 iounit_pg1_flags;
1199
1200 mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
1201 + if (ioc->ir_firmware)
1202 + mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
1203 + &ioc->manu_pg10);
1204 mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
1205 mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
1206 mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
1207 @@ -3501,20 +3504,13 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
1208 __func__));
1209
1210 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1211 - if (ioc->ioc_reset_in_progress) {
1212 + if (ioc->shost_recovery) {
1213 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1214 printk(MPT2SAS_ERR_FMT "%s: busy\n",
1215 ioc->name, __func__);
1216 return -EBUSY;
1217 }
1218 - ioc->ioc_reset_in_progress = 1;
1219 ioc->shost_recovery = 1;
1220 - if (ioc->shost->shost_state == SHOST_RUNNING) {
1221 - /* set back to SHOST_RUNNING in mpt2sas_scsih.c */
1222 - scsi_host_set_state(ioc->shost, SHOST_RECOVERY);
1223 - printk(MPT2SAS_INFO_FMT "putting controller into "
1224 - "SHOST_RECOVERY\n", ioc->name);
1225 - }
1226 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1227
1228 _base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
1229 @@ -3534,7 +3530,10 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
1230 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
1231
1232 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1233 - ioc->ioc_reset_in_progress = 0;
1234 + ioc->shost_recovery = 0;
1235 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1236 +
1237 + if (!r)
1238 + _base_reset_handler(ioc, MPT2_IOC_RUNNING);
1239 return r;
1240 }
1241 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
1242 index acdcff1..22f84d3 100644
1243 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h
1244 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
1245 @@ -119,6 +119,7 @@
1246 #define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
1247 #define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
1248 #define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
1249 +#define MPT2_IOC_RUNNING 4 /* shost running */
1250
1251 /*
1252 * logging format
1253 @@ -196,6 +197,38 @@ struct MPT2SAS_TARGET {
1254 * @block: device is in SDEV_BLOCK state
1255 * @tlr_snoop_check: flag used in determining whether to disable TLR
1256 */
1257 +
1258 +/* OEM Identifiers */
1259 +#define MFG10_OEM_ID_INVALID (0x00000000)
1260 +#define MFG10_OEM_ID_DELL (0x00000001)
1261 +#define MFG10_OEM_ID_FSC (0x00000002)
1262 +#define MFG10_OEM_ID_SUN (0x00000003)
1263 +#define MFG10_OEM_ID_IBM (0x00000004)
1264 +
1265 +/* GENERIC Flags 0*/
1266 +#define MFG10_GF0_OCE_DISABLED (0x00000001)
1267 +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
1268 +#define MFG10_GF0_R10_DISPLAY (0x00000004)
1269 +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
1270 +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
1271 +
1272 +/* OEM Specific Flags will come from OEM specific header files */
1273 +typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
1274 + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
1275 + U8 OEMIdentifier; /* 04h */
1276 + U8 Reserved1; /* 05h */
1277 + U16 Reserved2; /* 08h */
1278 + U32 Reserved3; /* 0Ch */
1279 + U32 GenericFlags0; /* 10h */
1280 + U32 GenericFlags1; /* 14h */
1281 + U32 Reserved4; /* 18h */
1282 + U32 OEMSpecificFlags0; /* 1Ch */
1283 + U32 OEMSpecificFlags1; /* 20h */
1284 + U32 Reserved5[18]; /* 24h-60h*/
1285 +} MPI2_CONFIG_PAGE_MAN_10,
1286 + MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
1287 + Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
1288 +
1289 struct MPT2SAS_DEVICE {
1290 struct MPT2SAS_TARGET *sas_target;
1291 unsigned int lun;
1292 @@ -431,7 +464,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
1293 * @fw_event_list: list of fw events
1294 * @aen_event_read_flag: event log was read
1295 * @broadcast_aen_busy: broadcast aen waiting to be serviced
1296 - * @ioc_reset_in_progress: host reset in progress
1297 + * @shost_recovery: host reset in progress
1298 * @ioc_reset_in_progress_lock:
1299 * @ioc_link_reset_in_progress: phy/hard reset in progress
1300 * @ignore_loginfos: ignore loginfos during task managment
1301 @@ -460,6 +493,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
1302 * @facts: static facts data
1303 * @pfacts: static port facts data
1304 * @manu_pg0: static manufacturing page 0
1305 + * @manu_pg10: static manufacturing page 10
1306 * @bios_pg2: static bios page 2
1307 * @bios_pg3: static bios page 3
1308 * @ioc_pg8: static ioc page 8
1309 @@ -544,7 +578,6 @@ struct MPT2SAS_ADAPTER {
1310 /* misc flags */
1311 int aen_event_read_flag;
1312 u8 broadcast_aen_busy;
1313 - u8 ioc_reset_in_progress;
1314 u8 shost_recovery;
1315 spinlock_t ioc_reset_in_progress_lock;
1316 u8 ioc_link_reset_in_progress;
1317 @@ -663,6 +696,7 @@ struct MPT2SAS_ADAPTER {
1318 dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
1319 u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
1320 u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
1321 + Mpi2ManufacturingPage10_t manu_pg10;
1322 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
1323 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
1324 };
1325 @@ -734,6 +768,8 @@ void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 re
1326 int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
1327 int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
1328 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
1329 +int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
1330 + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page);
1331 int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1332 *mpi_reply, Mpi2BiosPage2_t *config_page);
1333 int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1334 @@ -776,7 +812,6 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1335 u16 *volume_handle);
1336 int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
1337 u64 *wwid);
1338 -
1339 /* ctl shared API */
1340 extern struct device_attribute *mpt2sas_host_attrs[];
1341 extern struct device_attribute *mpt2sas_dev_attrs[];
1342 @@ -802,5 +837,7 @@ void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 h
1343 u16 attached_handle, u8 phy_number, u8 link_rate);
1344 extern struct sas_function_template mpt2sas_transport_functions;
1345 extern struct scsi_transport_template *mpt2sas_transport_template;
1346 +extern int scsi_internal_device_block(struct scsi_device *sdev);
1347 +extern int scsi_internal_device_unblock(struct scsi_device *sdev);
1348
1349 #endif /* MPT2SAS_BASE_H_INCLUDED */
1350 diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
1351 index 6ddee16..b9f4d0f 100644
1352 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c
1353 +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
1354 @@ -426,6 +426,67 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
1355 }
1356
1357 /**
1358 + * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
1359 + * @ioc: per adapter object
1360 + * @mpi_reply: reply mf payload returned from firmware
1361 + * @config_page: contents of the config page
1362 + * Context: sleep.
1363 + *
1364 + * Returns 0 for success, non-zero for failure.
1365 + */
1366 +int
1367 +mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
1368 + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page)
1369 +{
1370 + Mpi2ConfigRequest_t mpi_request;
1371 + int r;
1372 + struct config_request mem;
1373 +
1374 + memset(config_page, 0, sizeof(Mpi2ManufacturingPage10_t));
1375 + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1376 + mpi_request.Function = MPI2_FUNCTION_CONFIG;
1377 + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1378 + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
1379 + mpi_request.Header.PageNumber = 10;
1380 + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
1381 + mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1382 + r = _config_request(ioc, &mpi_request, mpi_reply,
1383 + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1384 + if (r)
1385 + goto out;
1386 +
1387 + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1388 + mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1389 + mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1390 + mpi_request.Header.PageType = mpi_reply->Header.PageType;
1391 + mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1392 + mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1393 + if (mem.config_page_sz > ioc->config_page_sz) {
1394 + r = _config_alloc_config_dma_memory(ioc, &mem);
1395 + if (r)
1396 + goto out;
1397 + } else {
1398 + mem.config_page_dma = ioc->config_page_dma;
1399 + mem.config_page = ioc->config_page;
1400 + }
1401 + ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1402 + MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1403 + mem.config_page_dma);
1404 + r = _config_request(ioc, &mpi_request, mpi_reply,
1405 + MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
1406 + if (!r)
1407 + memcpy(config_page, mem.config_page,
1408 + min_t(u16, mem.config_page_sz,
1409 + sizeof(Mpi2ManufacturingPage10_t)));
1410 +
1411 + if (mem.config_page_sz > ioc->config_page_sz)
1412 + _config_free_config_dma_memory(ioc, &mem);
1413 +
1414 + out:
1415 + return r;
1416 +}
1417 +
1418 +/**
1419 * mpt2sas_config_get_bios_pg2 - obtain bios page 2
1420 * @ioc: per adapter object
1421 * @mpi_reply: reply mf payload returned from firmware
1422 diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
1423 index 14e473d..c2a5101 100644
1424 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
1425 +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
1426 @@ -1963,7 +1963,6 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
1427 {
1428 enum block_state state;
1429 long ret = -EINVAL;
1430 - unsigned long flags;
1431
1432 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
1433 BLOCKING;
1434 @@ -1989,13 +1988,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
1435 !ioc)
1436 return -ENODEV;
1437
1438 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1439 - if (ioc->shost_recovery) {
1440 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
1441 - flags);
1442 + if (ioc->shost_recovery)
1443 return -EAGAIN;
1444 - }
1445 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1446
1447 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
1448 uarg = arg;
1449 @@ -2098,7 +2092,6 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
1450 struct mpt2_ioctl_command karg;
1451 struct MPT2SAS_ADAPTER *ioc;
1452 enum block_state state;
1453 - unsigned long flags;
1454
1455 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
1456 return -EINVAL;
1457 @@ -2113,13 +2106,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
1458 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
1459 return -ENODEV;
1460
1461 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1462 - if (ioc->shost_recovery) {
1463 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
1464 - flags);
1465 + if (ioc->shost_recovery)
1466 return -EAGAIN;
1467 - }
1468 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1469
1470 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
1471 karg.hdr.ioc_number = karg32.hdr.ioc_number;
1472 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1473 index 2e9a444..c7a0870 100644
1474 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1475 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1476 @@ -103,7 +103,6 @@ struct sense_info {
1477 };
1478
1479
1480 -#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
1481 /**
1482 * struct fw_event_work - firmware event struct
1483 * @list: link list framework
1484 @@ -1502,7 +1501,13 @@ _scsih_slave_configure(struct scsi_device *sdev)
1485 break;
1486 case MPI2_RAID_VOL_TYPE_RAID1E:
1487 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1488 - r_level = "RAID1E";
1489 + if (ioc->manu_pg10.OEMIdentifier &&
1490 + (ioc->manu_pg10.GenericFlags0 &
1491 + MFG10_GF0_R10_DISPLAY) &&
1492 + !(raid_device->num_pds % 2))
1493 + r_level = "RAID10";
1494 + else
1495 + r_level = "RAID1E";
1496 break;
1497 case MPI2_RAID_VOL_TYPE_RAID1:
1498 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1499 @@ -1786,17 +1791,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1500 u32 ioc_state;
1501 unsigned long timeleft;
1502 u8 VF_ID = 0;
1503 - unsigned long flags;
1504
1505 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1506 - if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED ||
1507 - ioc->shost_recovery) {
1508 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1509 + if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
1510 + printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
1511 + __func__, ioc->name);
1512 + return;
1513 + }
1514 +
1515 + if (ioc->shost_recovery) {
1516 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1517 __func__, ioc->name);
1518 return;
1519 }
1520 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1521
1522 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
1523 if (ioc_state & MPI2_DOORBELL_USED) {
1524 @@ -2222,7 +2228,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1525 MPT2SAS_INFO_FMT "SDEV_RUNNING: "
1526 "handle(0x%04x)\n", ioc->name, handle));
1527 sas_device_priv_data->block = 0;
1528 - scsi_device_set_state(sdev, SDEV_RUNNING);
1529 + scsi_internal_device_unblock(sdev);
1530 }
1531 }
1532 }
1533 @@ -2251,7 +2257,7 @@ _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1534 MPT2SAS_INFO_FMT "SDEV_BLOCK: "
1535 "handle(0x%04x)\n", ioc->name, handle));
1536 sas_device_priv_data->block = 1;
1537 - scsi_device_set_state(sdev, SDEV_BLOCK);
1538 + scsi_internal_device_block(sdev);
1539 }
1540 }
1541 }
1542 @@ -2327,6 +2333,7 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
1543 u16 handle;
1544 u16 reason_code;
1545 u8 phy_number;
1546 + u8 link_rate;
1547
1548 for (i = 0; i < event_data->NumEntries; i++) {
1549 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
1550 @@ -2337,6 +2344,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
1551 MPI2_EVENT_SAS_TOPO_RC_MASK;
1552 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
1553 _scsih_block_io_device(ioc, handle);
1554 + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
1555 + link_rate = event_data->PHY[i].LinkRate >> 4;
1556 + if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
1557 + _scsih_ublock_io_device(ioc, handle);
1558 + }
1559 }
1560 }
1561
1562 @@ -2405,27 +2417,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
1563 }
1564
1565 /**
1566 - * _scsih_queue_rescan - queue a topology rescan from user context
1567 - * @ioc: per adapter object
1568 - *
1569 - * Return nothing.
1570 - */
1571 -static void
1572 -_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
1573 -{
1574 - struct fw_event_work *fw_event;
1575 -
1576 - if (ioc->wait_for_port_enable_to_complete)
1577 - return;
1578 - fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
1579 - if (!fw_event)
1580 - return;
1581 - fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
1582 - fw_event->ioc = ioc;
1583 - _scsih_fw_event_add(ioc, fw_event);
1584 -}
1585 -
1586 -/**
1587 * _scsih_flush_running_cmds - completing outstanding commands.
1588 * @ioc: per adapter object
1589 *
1590 @@ -2456,46 +2447,6 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
1591 }
1592
1593 /**
1594 - * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
1595 - * @ioc: per adapter object
1596 - * @reset_phase: phase
1597 - *
1598 - * The handler for doing any required cleanup or initialization.
1599 - *
1600 - * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
1601 - * MPT2_IOC_DONE_RESET
1602 - *
1603 - * Return nothing.
1604 - */
1605 -void
1606 -mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
1607 -{
1608 - switch (reset_phase) {
1609 - case MPT2_IOC_PRE_RESET:
1610 - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1611 - "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
1612 - _scsih_fw_event_off(ioc);
1613 - break;
1614 - case MPT2_IOC_AFTER_RESET:
1615 - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1616 - "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
1617 - if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
1618 - ioc->tm_cmds.status |= MPT2_CMD_RESET;
1619 - mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
1620 - complete(&ioc->tm_cmds.done);
1621 - }
1622 - _scsih_fw_event_on(ioc);
1623 - _scsih_flush_running_cmds(ioc);
1624 - break;
1625 - case MPT2_IOC_DONE_RESET:
1626 - dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1627 - "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
1628 - _scsih_queue_rescan(ioc);
1629 - break;
1630 - }
1631 -}
1632 -
1633 -/**
1634 * _scsih_setup_eedp - setup MPI request for EEDP transfer
1635 * @scmd: pointer to scsi command object
1636 * @mpi_request: pointer to the SCSI_IO reqest message frame
1637 @@ -2615,7 +2566,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
1638 Mpi2SCSIIORequest_t *mpi_request;
1639 u32 mpi_control;
1640 u16 smid;
1641 - unsigned long flags;
1642
1643 scmd->scsi_done = done;
1644 sas_device_priv_data = scmd->device->hostdata;
1645 @@ -2634,13 +2584,10 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
1646 }
1647
1648 /* see if we are busy with task managment stuff */
1649 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1650 - if (sas_target_priv_data->tm_busy ||
1651 - ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
1652 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1653 + if (sas_target_priv_data->tm_busy)
1654 + return SCSI_MLQUEUE_DEVICE_BUSY;
1655 + else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
1656 return SCSI_MLQUEUE_HOST_BUSY;
1657 - }
1658 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1659
1660 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1661 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1662 @@ -3436,6 +3383,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1663 if (!handle)
1664 return -1;
1665
1666 + if (ioc->shost_recovery)
1667 + return -1;
1668 +
1669 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
1670 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
1671 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1672 @@ -3572,6 +3522,9 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1673 struct _sas_node *sas_expander;
1674 unsigned long flags;
1675
1676 + if (ioc->shost_recovery)
1677 + return;
1678 +
1679 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1680 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
1681 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1682 @@ -3743,6 +3696,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1683 mutex_unlock(&ioc->tm_cmds.mutex);
1684 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
1685 "done: handle(0x%04x)\n", ioc->name, device_handle));
1686 + if (ioc->shost_recovery)
1687 + goto out;
1688 }
1689
1690 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
1691 @@ -3765,6 +3720,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1692 le32_to_cpu(mpi_reply.IOCLogInfo)));
1693
1694 out:
1695 +
1696 + _scsih_ublock_io_device(ioc, handle);
1697 +
1698 mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
1699 sas_device->parent_handle);
1700
1701 @@ -3908,6 +3866,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1702 "expander event\n", ioc->name));
1703 return;
1704 }
1705 + if (ioc->shost_recovery)
1706 + return;
1707 if (event_data->PHY[i].PhyStatus &
1708 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
1709 continue;
1710 @@ -3942,10 +3902,6 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1711 link_rate_);
1712 }
1713 }
1714 - if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
1715 - if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5)
1716 - _scsih_ublock_io_device(ioc, handle);
1717 - }
1718 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
1719 if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
1720 break;
1721 @@ -5156,22 +5112,9 @@ static void
1722 _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
1723 {
1724 struct _sas_device *sas_device, *sas_device_next;
1725 - struct _sas_node *sas_expander, *sas_expander_next;
1726 + struct _sas_node *sas_expander;
1727 struct _raid_device *raid_device, *raid_device_next;
1728 - unsigned long flags;
1729
1730 - _scsih_search_responding_sas_devices(ioc);
1731 - _scsih_search_responding_raid_devices(ioc);
1732 - _scsih_search_responding_expanders(ioc);
1733 -
1734 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1735 - ioc->shost_recovery = 0;
1736 - if (ioc->shost->shost_state == SHOST_RECOVERY) {
1737 - printk(MPT2SAS_INFO_FMT "putting controller into "
1738 - "SHOST_RUNNING\n", ioc->name);
1739 - scsi_host_set_state(ioc->shost, SHOST_RUNNING);
1740 - }
1741 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1742
1743 list_for_each_entry_safe(sas_device, sas_device_next,
1744 &ioc->sas_device_list, list) {
1745 @@ -5207,16 +5150,63 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
1746 _scsih_raid_device_remove(ioc, raid_device);
1747 }
1748
1749 - list_for_each_entry_safe(sas_expander, sas_expander_next,
1750 - &ioc->sas_expander_list, list) {
1751 + retry_expander_search:
1752 + sas_expander = NULL;
1753 + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1754 if (sas_expander->responding) {
1755 sas_expander->responding = 0;
1756 continue;
1757 }
1758 - printk("\tremoving expander: handle(0x%04x), "
1759 - " sas_addr(0x%016llx)\n", sas_expander->handle,
1760 - (unsigned long long)sas_expander->sas_address);
1761 _scsih_expander_remove(ioc, sas_expander->handle);
1762 + goto retry_expander_search;
1763 + }
1764 +}
1765 +
1766 +/**
1767 + * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
1768 + * @ioc: per adapter object
1769 + * @reset_phase: phase
1770 + *
1771 + * The handler for doing any required cleanup or initialization.
1772 + *
1773 + * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
1774 + * MPT2_IOC_DONE_RESET
1775 + *
1776 + * Return nothing.
1777 + */
1778 +void
1779 +mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
1780 +{
1781 + switch (reset_phase) {
1782 + case MPT2_IOC_PRE_RESET:
1783 + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1784 + "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
1785 + _scsih_fw_event_off(ioc);
1786 + break;
1787 + case MPT2_IOC_AFTER_RESET:
1788 + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1789 + "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
1790 + if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
1791 + ioc->tm_cmds.status |= MPT2_CMD_RESET;
1792 + mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
1793 + complete(&ioc->tm_cmds.done);
1794 + }
1795 + _scsih_fw_event_on(ioc);
1796 + _scsih_flush_running_cmds(ioc);
1797 + break;
1798 + case MPT2_IOC_DONE_RESET:
1799 + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1800 + "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
1801 + _scsih_sas_host_refresh(ioc, 0);
1802 + _scsih_search_responding_sas_devices(ioc);
1803 + _scsih_search_responding_raid_devices(ioc);
1804 + _scsih_search_responding_expanders(ioc);
1805 + break;
1806 + case MPT2_IOC_RUNNING:
1807 + dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1808 + "MPT2_IOC_RUNNING\n", ioc->name, __func__));
1809 + _scsih_remove_unresponding_devices(ioc);
1810 + break;
1811 }
1812 }
1813
1814 @@ -5236,14 +5226,6 @@ _firmware_event_work(struct work_struct *work)
1815 unsigned long flags;
1816 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
1817
1818 - /* This is invoked by calling _scsih_queue_rescan(). */
1819 - if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
1820 - _scsih_fw_event_free(ioc, fw_event);
1821 - _scsih_sas_host_refresh(ioc, 1);
1822 - _scsih_remove_unresponding_devices(ioc);
1823 - return;
1824 - }
1825 -
1826 /* the queue is being flushed so ignore this event */
1827 spin_lock_irqsave(&ioc->fw_event_lock, flags);
1828 if (ioc->fw_events_off || ioc->remove_host) {
1829 @@ -5253,13 +5235,10 @@ _firmware_event_work(struct work_struct *work)
1830 }
1831 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
1832
1833 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1834 if (ioc->shost_recovery) {
1835 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1836 _scsih_fw_event_requeue(ioc, fw_event, 1000);
1837 return;
1838 }
1839 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1840
1841 switch (fw_event->event) {
1842 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1843 @@ -5461,6 +5440,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
1844 if (!sas_device)
1845 continue;
1846 _scsih_remove_device(ioc, sas_device->handle);
1847 + if (ioc->shost_recovery)
1848 + return;
1849 goto retry_device_search;
1850 }
1851 }
1852 @@ -5482,6 +5463,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
1853 if (!expander_sibling)
1854 continue;
1855 _scsih_expander_remove(ioc, expander_sibling->handle);
1856 + if (ioc->shost_recovery)
1857 + return;
1858 goto retry_expander_search;
1859 }
1860 }
1861 diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
1862 index 686695b..a53086d 100644
1863 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
1864 +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
1865 @@ -140,11 +140,18 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1866 u32 device_info;
1867 u32 ioc_status;
1868
1869 + if (ioc->shost_recovery) {
1870 + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1871 + __func__, ioc->name);
1872 + return -EFAULT;
1873 + }
1874 +
1875 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1876 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1877 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1878 +
1879 ioc->name, __FILE__, __LINE__, __func__);
1880 - return -1;
1881 + return -ENXIO;
1882 }
1883
1884 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1885 @@ -153,7 +160,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1886 printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
1887 "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
1888 __FILE__, __LINE__, __func__);
1889 - return -1;
1890 + return -EIO;
1891 }
1892
1893 memset(identify, 0, sizeof(identify));
1894 @@ -288,21 +295,17 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
1895 void *psge;
1896 u32 sgl_flags;
1897 u8 issue_reset = 0;
1898 - unsigned long flags;
1899 void *data_out = NULL;
1900 dma_addr_t data_out_dma;
1901 u32 sz;
1902 u64 *sas_address_le;
1903 u16 wait_state_count;
1904
1905 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1906 - if (ioc->ioc_reset_in_progress) {
1907 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1908 + if (ioc->shost_recovery) {
1909 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1910 __func__, ioc->name);
1911 return -EFAULT;
1912 }
1913 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1914
1915 mutex_lock(&ioc->transport_cmds.mutex);
1916
1917 @@ -806,6 +809,12 @@ mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc,
1918 struct _sas_node *sas_node;
1919 struct _sas_phy *mpt2sas_phy;
1920
1921 + if (ioc->shost_recovery) {
1922 + printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1923 + __func__, ioc->name);
1924 + return;
1925 + }
1926 +
1927 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1928 sas_node = _transport_sas_node_find_by_handle(ioc, handle);
1929 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1930 @@ -1025,7 +1034,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1931 void *psge;
1932 u32 sgl_flags;
1933 u8 issue_reset = 0;
1934 - unsigned long flags;
1935 dma_addr_t dma_addr_in = 0;
1936 dma_addr_t dma_addr_out = 0;
1937 u16 wait_state_count;
1938 @@ -1045,14 +1053,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1939 return -EINVAL;
1940 }
1941
1942 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1943 - if (ioc->ioc_reset_in_progress) {
1944 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1945 + if (ioc->shost_recovery) {
1946 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1947 __func__, ioc->name);
1948 return -EFAULT;
1949 }
1950 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1951
1952 rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
1953 if (rc)
1954 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1955 index b7b9fec..a89c421 100644
1956 --- a/drivers/scsi/sd.c
1957 +++ b/drivers/scsi/sd.c
1958 @@ -2021,6 +2021,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1959
1960 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1961 sdp->removable ? "removable " : "");
1962 + put_device(&sdkp->dev);
1963 }
1964
1965 /**
1966 @@ -2106,6 +2107,7 @@ static int sd_probe(struct device *dev)
1967
1968 get_device(&sdp->sdev_gendev);
1969
1970 + get_device(&sdkp->dev); /* prevent release before async_schedule */
1971 async_schedule(sd_probe_async, sdkp);
1972
1973 return 0;
1974 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
1975 index 9230402..4968c4c 100644
1976 --- a/drivers/scsi/sg.c
1977 +++ b/drivers/scsi/sg.c
1978 @@ -1811,7 +1811,7 @@ retry:
1979 return 0;
1980 out:
1981 for (i = 0; i < k; i++)
1982 - __free_pages(schp->pages[k], order);
1983 + __free_pages(schp->pages[i], order);
1984
1985 if (--order >= 0)
1986 goto retry;
1987 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1988 index b7c1603..7c1e65d 100644
1989 --- a/fs/binfmt_elf.c
1990 +++ b/fs/binfmt_elf.c
1991 @@ -501,22 +501,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
1992 }
1993 }
1994
1995 - /*
1996 - * Now fill out the bss section. First pad the last page up
1997 - * to the page boundary, and then perform a mmap to make sure
1998 - * that there are zero-mapped pages up to and including the
1999 - * last bss page.
2000 - */
2001 - if (padzero(elf_bss)) {
2002 - error = -EFAULT;
2003 - goto out_close;
2004 - }
2005 + if (last_bss > elf_bss) {
2006 + /*
2007 + * Now fill out the bss section. First pad the last page up
2008 + * to the page boundary, and then perform a mmap to make sure
2009 + * that there are zero-mapped pages up to and including the
2010 + * last bss page.
2011 + */
2012 + if (padzero(elf_bss)) {
2013 + error = -EFAULT;
2014 + goto out_close;
2015 + }
2016
2017 - /* What we have mapped so far */
2018 - elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
2019 + /* What we have mapped so far */
2020 + elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
2021
2022 - /* Map the last of the bss segment */
2023 - if (last_bss > elf_bss) {
2024 + /* Map the last of the bss segment */
2025 down_write(&current->mm->mmap_sem);
2026 error = do_brk(elf_bss, last_bss - elf_bss);
2027 up_write(&current->mm->mmap_sem);
2028 diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
2029 index 3ddce03..d731092 100644
2030 --- a/include/linux/kvm_para.h
2031 +++ b/include/linux/kvm_para.h
2032 @@ -13,6 +13,7 @@
2033 #define KVM_ENOSYS 1000
2034 #define KVM_EFAULT EFAULT
2035 #define KVM_E2BIG E2BIG
2036 +#define KVM_EPERM EPERM
2037
2038 #define KVM_HC_VAPIC_POLL_IRQ 1
2039 #define KVM_HC_MMU_OP 2
2040 diff --git a/ipc/shm.c b/ipc/shm.c
2041 index 1bc4701..30162a5 100644
2042 --- a/ipc/shm.c
2043 +++ b/ipc/shm.c
2044 @@ -410,7 +410,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
2045 return error;
2046
2047 no_id:
2048 - if (shp->mlock_user) /* shmflg & SHM_HUGETLB case */
2049 + if (is_file_hugepages(file) && shp->mlock_user)
2050 user_shm_unlock(size, shp->mlock_user);
2051 fput(file);
2052 no_file:
2053 diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
2054 index d7cbc57..3f49f53 100644
2055 --- a/kernel/perf_counter.c
2056 +++ b/kernel/perf_counter.c
2057 @@ -469,7 +469,8 @@ static void update_counter_times(struct perf_counter *counter)
2058 struct perf_counter_context *ctx = counter->ctx;
2059 u64 run_end;
2060
2061 - if (counter->state < PERF_COUNTER_STATE_INACTIVE)
2062 + if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
2063 + counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
2064 return;
2065
2066 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
2067 @@ -518,7 +519,7 @@ static void __perf_counter_disable(void *info)
2068 */
2069 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
2070 update_context_time(ctx);
2071 - update_counter_times(counter);
2072 + update_group_times(counter);
2073 if (counter == counter->group_leader)
2074 group_sched_out(counter, cpuctx, ctx);
2075 else
2076 @@ -573,7 +574,7 @@ static void perf_counter_disable(struct perf_counter *counter)
2077 * in, so we can change the state safely.
2078 */
2079 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
2080 - update_counter_times(counter);
2081 + update_group_times(counter);
2082 counter->state = PERF_COUNTER_STATE_OFF;
2083 }
2084
2085 @@ -851,6 +852,27 @@ retry:
2086 }
2087
2088 /*
2089 + * Put a counter into inactive state and update time fields.
2090 + * Enabling the leader of a group effectively enables all
2091 + * the group members that aren't explicitly disabled, so we
2092 + * have to update their ->tstamp_enabled also.
2093 + * Note: this works for group members as well as group leaders
2094 + * since the non-leader members' sibling_lists will be empty.
2095 + */
2096 +static void __perf_counter_mark_enabled(struct perf_counter *counter,
2097 + struct perf_counter_context *ctx)
2098 +{
2099 + struct perf_counter *sub;
2100 +
2101 + counter->state = PERF_COUNTER_STATE_INACTIVE;
2102 + counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
2103 + list_for_each_entry(sub, &counter->sibling_list, list_entry)
2104 + if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
2105 + sub->tstamp_enabled =
2106 + ctx->time - sub->total_time_enabled;
2107 +}
2108 +
2109 +/*
2110 * Cross CPU call to enable a performance counter
2111 */
2112 static void __perf_counter_enable(void *info)
2113 @@ -877,8 +899,7 @@ static void __perf_counter_enable(void *info)
2114
2115 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
2116 goto unlock;
2117 - counter->state = PERF_COUNTER_STATE_INACTIVE;
2118 - counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
2119 + __perf_counter_mark_enabled(counter, ctx);
2120
2121 /*
2122 * If the counter is in a group and isn't the group leader,
2123 @@ -971,11 +992,9 @@ static void perf_counter_enable(struct perf_counter *counter)
2124 * Since we have the lock this context can't be scheduled
2125 * in, so we can change the state safely.
2126 */
2127 - if (counter->state == PERF_COUNTER_STATE_OFF) {
2128 - counter->state = PERF_COUNTER_STATE_INACTIVE;
2129 - counter->tstamp_enabled =
2130 - ctx->time - counter->total_time_enabled;
2131 - }
2132 + if (counter->state == PERF_COUNTER_STATE_OFF)
2133 + __perf_counter_mark_enabled(counter, ctx);
2134 +
2135 out:
2136 spin_unlock_irq(&ctx->lock);
2137 }
2138 @@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
2139 counter->attr.enable_on_exec = 0;
2140 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
2141 continue;
2142 - counter->state = PERF_COUNTER_STATE_INACTIVE;
2143 - counter->tstamp_enabled =
2144 - ctx->time - counter->total_time_enabled;
2145 + __perf_counter_mark_enabled(counter, ctx);
2146 enabled = 1;
2147 }
2148
2149 @@ -4171,6 +4188,7 @@ static int perf_copy_attr(struct perf_counter_attr __user *uattr,
2150 if (val)
2151 goto err_size;
2152 }
2153 + size = sizeof(*attr);
2154 }
2155
2156 ret = copy_from_user(attr, uattr, size);
2157 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
2158 index 7e595ce..46c101a 100644
2159 --- a/net/wireless/scan.c
2160 +++ b/net/wireless/scan.c
2161 @@ -97,7 +97,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
2162 dev->bss_generation++;
2163 }
2164
2165 -static u8 *find_ie(u8 num, u8 *ies, size_t len)
2166 +static u8 *find_ie(u8 num, u8 *ies, int len)
2167 {
2168 while (len > 2 && ies[0] != num) {
2169 len -= ies[1] + 2;
2170 diff --git a/sound/pci/cs46xx/cs46xx_lib.h b/sound/pci/cs46xx/cs46xx_lib.h
2171 index 4eb55aa..b518949 100644
2172 --- a/sound/pci/cs46xx/cs46xx_lib.h
2173 +++ b/sound/pci/cs46xx/cs46xx_lib.h
2174 @@ -35,7 +35,7 @@
2175
2176
2177 #ifdef CONFIG_SND_CS46XX_NEW_DSP
2178 -#define CS46XX_MIN_PERIOD_SIZE 1
2179 +#define CS46XX_MIN_PERIOD_SIZE 64
2180 #define CS46XX_MAX_PERIOD_SIZE 1024*1024
2181 #else
2182 #define CS46XX_MIN_PERIOD_SIZE 2048
2183 diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
2184 index c1eb923..09b2b2a 100644
2185 --- a/sound/pci/oxygen/oxygen_io.c
2186 +++ b/sound/pci/oxygen/oxygen_io.c
2187 @@ -215,17 +215,8 @@ EXPORT_SYMBOL(oxygen_write_spi);
2188
2189 void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data)
2190 {
2191 - unsigned long timeout;
2192 -
2193 /* should not need more than about 300 us */
2194 - timeout = jiffies + msecs_to_jiffies(1);
2195 - do {
2196 - if (!(oxygen_read16(chip, OXYGEN_2WIRE_BUS_STATUS)
2197 - & OXYGEN_2WIRE_BUSY))
2198 - break;
2199 - udelay(1);
2200 - cond_resched();
2201 - } while (time_after_eq(timeout, jiffies));
2202 + msleep(1);
2203
2204 oxygen_write8(chip, OXYGEN_2WIRE_MAP, map);
2205 oxygen_write8(chip, OXYGEN_2WIRE_DATA, data);
2206 diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
2207 index e7348d3..b3a2de8 100644
2208 --- a/sound/soc/codecs/wm8350.c
2209 +++ b/sound/soc/codecs/wm8350.c
2210 @@ -613,7 +613,7 @@ SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VOLUME, 15, 1, 1);
2211
2212 /* Out4 Capture Mux */
2213 static const struct snd_kcontrol_new wm8350_out4_capture_controls =
2214 -SOC_DAPM_ENUM("Route", wm8350_enum[8]);
2215 +SOC_DAPM_ENUM("Route", wm8350_enum[7]);
2216
2217 static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = {
2218
2219 diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
2220 index b4b06c7..e874ad4 100644
2221 --- a/tools/perf/builtin-stat.c
2222 +++ b/tools/perf/builtin-stat.c
2223 @@ -82,19 +82,32 @@ static u64 runtime_cycles[MAX_RUN];
2224 static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
2225 static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
2226
2227 -static u64 event_res_avg[MAX_COUNTERS][3];
2228 -static u64 event_res_noise[MAX_COUNTERS][3];
2229 +struct stats
2230 +{
2231 + double sum;
2232 + double sum_sq;
2233 +};
2234
2235 -static u64 event_scaled_avg[MAX_COUNTERS];
2236 +static double avg_stats(struct stats *stats)
2237 +{
2238 + return stats->sum / run_count;
2239 +}
2240
2241 -static u64 runtime_nsecs_avg;
2242 -static u64 runtime_nsecs_noise;
2243 +/*
2244 + * stddev = sqrt(1/N (\Sum n_i^2) - avg(n)^2)
2245 + */
2246 +static double stddev_stats(struct stats *stats)
2247 +{
2248 + double avg = stats->sum / run_count;
2249
2250 -static u64 walltime_nsecs_avg;
2251 -static u64 walltime_nsecs_noise;
2252 + return sqrt(stats->sum_sq/run_count - avg*avg);
2253 +}
2254
2255 -static u64 runtime_cycles_avg;
2256 -static u64 runtime_cycles_noise;
2257 +struct stats event_res_stats[MAX_COUNTERS][3];
2258 +struct stats event_scaled_stats[MAX_COUNTERS];
2259 +struct stats runtime_nsecs_stats;
2260 +struct stats walltime_nsecs_stats;
2261 +struct stats runtime_cycles_stats;
2262
2263 #define MATCH_EVENT(t, c, counter) \
2264 (attrs[counter].type == PERF_TYPE_##t && \
2265 @@ -278,42 +291,37 @@ static int run_perf_stat(int argc __used, const char **argv)
2266 return WEXITSTATUS(status);
2267 }
2268
2269 -static void print_noise(u64 *count, u64 *noise)
2270 +static void print_noise(double avg, double stddev)
2271 {
2272 if (run_count > 1)
2273 - fprintf(stderr, " ( +- %7.3f%% )",
2274 - (double)noise[0]/(count[0]+1)*100.0);
2275 + fprintf(stderr, " ( +- %7.3f%% )", 100*stddev / avg);
2276 }
2277
2278 -static void nsec_printout(int counter, u64 *count, u64 *noise)
2279 +static void nsec_printout(int counter, double avg, double stddev)
2280 {
2281 - double msecs = (double)count[0] / 1000000;
2282 + double msecs = avg / 1e6;
2283
2284 fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
2285
2286 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
2287 - if (walltime_nsecs_avg)
2288 - fprintf(stderr, " # %10.3f CPUs ",
2289 - (double)count[0] / (double)walltime_nsecs_avg);
2290 + fprintf(stderr, " # %10.3f CPUs ",
2291 + avg / avg_stats(&walltime_nsecs_stats));
2292 }
2293 - print_noise(count, noise);
2294 + print_noise(avg, stddev);
2295 }
2296
2297 -static void abs_printout(int counter, u64 *count, u64 *noise)
2298 +static void abs_printout(int counter, double avg, double stddev)
2299 {
2300 - fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter));
2301 + fprintf(stderr, " %14.0f %-24s", avg, event_name(counter));
2302
2303 - if (runtime_cycles_avg &&
2304 - MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
2305 + if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
2306 fprintf(stderr, " # %10.3f IPC ",
2307 - (double)count[0] / (double)runtime_cycles_avg);
2308 + avg / avg_stats(&runtime_cycles_stats));
2309 } else {
2310 - if (runtime_nsecs_avg) {
2311 - fprintf(stderr, " # %10.3f M/sec",
2312 - (double)count[0]/runtime_nsecs_avg*1000.0);
2313 - }
2314 + fprintf(stderr, " # %10.3f M/sec",
2315 + 1000.0 * avg / avg_stats(&runtime_nsecs_stats));
2316 }
2317 - print_noise(count, noise);
2318 + print_noise(avg, stddev);
2319 }
2320
2321 /*
2322 @@ -321,12 +329,12 @@ static void abs_printout(int counter, u64 *count, u64 *noise)
2323 */
2324 static void print_counter(int counter)
2325 {
2326 - u64 *count, *noise;
2327 + double avg, stddev;
2328 int scaled;
2329
2330 - count = event_res_avg[counter];
2331 - noise = event_res_noise[counter];
2332 - scaled = event_scaled_avg[counter];
2333 + avg = avg_stats(&event_res_stats[counter][0]);
2334 + stddev = stddev_stats(&event_res_stats[counter][0]);
2335 + scaled = avg_stats(&event_scaled_stats[counter]);
2336
2337 if (scaled == -1) {
2338 fprintf(stderr, " %14s %-24s\n",
2339 @@ -335,36 +343,34 @@ static void print_counter(int counter)
2340 }
2341
2342 if (nsec_counter(counter))
2343 - nsec_printout(counter, count, noise);
2344 + nsec_printout(counter, avg, stddev);
2345 else
2346 - abs_printout(counter, count, noise);
2347 + abs_printout(counter, avg, stddev);
2348 +
2349 + if (scaled) {
2350 + double avg_enabled, avg_running;
2351 +
2352 + avg_enabled = avg_stats(&event_res_stats[counter][1]);
2353 + avg_running = avg_stats(&event_res_stats[counter][2]);
2354
2355 - if (scaled)
2356 fprintf(stderr, " (scaled from %.2f%%)",
2357 - (double) count[2] / count[1] * 100);
2358 + 100 * avg_running / avg_enabled);
2359 + }
2360
2361 fprintf(stderr, "\n");
2362 }
2363
2364 -/*
2365 - * normalize_noise noise values down to stddev:
2366 - */
2367 -static void normalize_noise(u64 *val)
2368 +static void update_stats(const char *name, int idx, struct stats *stats, u64 *val)
2369 {
2370 - double res;
2371 + double sq = *val;
2372
2373 - res = (double)*val / (run_count * sqrt((double)run_count));
2374 -
2375 - *val = (u64)res;
2376 -}
2377 -
2378 -static void update_avg(const char *name, int idx, u64 *avg, u64 *val)
2379 -{
2380 - *avg += *val;
2381 + stats->sum += *val;
2382 + stats->sum_sq += sq * sq;
2383
2384 if (verbose > 1)
2385 fprintf(stderr, "debug: %20s[%d]: %Ld\n", name, idx, *val);
2386 }
2387 +
2388 /*
2389 * Calculate the averages and noises:
2390 */
2391 @@ -376,61 +382,22 @@ static void calc_avg(void)
2392 fprintf(stderr, "\n");
2393
2394 for (i = 0; i < run_count; i++) {
2395 - update_avg("runtime", 0, &runtime_nsecs_avg, runtime_nsecs + i);
2396 - update_avg("walltime", 0, &walltime_nsecs_avg, walltime_nsecs + i);
2397 - update_avg("runtime_cycles", 0, &runtime_cycles_avg, runtime_cycles + i);
2398 + update_stats("runtime", 0, &runtime_nsecs_stats, runtime_nsecs + i);
2399 + update_stats("walltime", 0, &walltime_nsecs_stats, walltime_nsecs + i);
2400 + update_stats("runtime_cycles", 0, &runtime_cycles_stats, runtime_cycles + i);
2401
2402 for (j = 0; j < nr_counters; j++) {
2403 - update_avg("counter/0", j,
2404 - event_res_avg[j]+0, event_res[i][j]+0);
2405 - update_avg("counter/1", j,
2406 - event_res_avg[j]+1, event_res[i][j]+1);
2407 - update_avg("counter/2", j,
2408 - event_res_avg[j]+2, event_res[i][j]+2);
2409 + update_stats("counter/0", j,
2410 + event_res_stats[j]+0, event_res[i][j]+0);
2411 + update_stats("counter/1", j,
2412 + event_res_stats[j]+1, event_res[i][j]+1);
2413 + update_stats("counter/2", j,
2414 + event_res_stats[j]+2, event_res[i][j]+2);
2415 if (event_scaled[i][j] != (u64)-1)
2416 - update_avg("scaled", j,
2417 - event_scaled_avg + j, event_scaled[i]+j);
2418 - else
2419 - event_scaled_avg[j] = -1;
2420 + update_stats("scaled", j,
2421 + event_scaled_stats + j, event_scaled[i]+j);
2422 }
2423 }
2424 - runtime_nsecs_avg /= run_count;
2425 - walltime_nsecs_avg /= run_count;
2426 - runtime_cycles_avg /= run_count;
2427 -
2428 - for (j = 0; j < nr_counters; j++) {
2429 - event_res_avg[j][0] /= run_count;
2430 - event_res_avg[j][1] /= run_count;
2431 - event_res_avg[j][2] /= run_count;
2432 - }
2433 -
2434 - for (i = 0; i < run_count; i++) {
2435 - runtime_nsecs_noise +=
2436 - abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg));
2437 - walltime_nsecs_noise +=
2438 - abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg));
2439 - runtime_cycles_noise +=
2440 - abs((s64)(runtime_cycles[i] - runtime_cycles_avg));
2441 -
2442 - for (j = 0; j < nr_counters; j++) {
2443 - event_res_noise[j][0] +=
2444 - abs((s64)(event_res[i][j][0] - event_res_avg[j][0]));
2445 - event_res_noise[j][1] +=
2446 - abs((s64)(event_res[i][j][1] - event_res_avg[j][1]));
2447 - event_res_noise[j][2] +=
2448 - abs((s64)(event_res[i][j][2] - event_res_avg[j][2]));
2449 - }
2450 - }
2451 -
2452 - normalize_noise(&runtime_nsecs_noise);
2453 - normalize_noise(&walltime_nsecs_noise);
2454 - normalize_noise(&runtime_cycles_noise);
2455 -
2456 - for (j = 0; j < nr_counters; j++) {
2457 - normalize_noise(&event_res_noise[j][0]);
2458 - normalize_noise(&event_res_noise[j][1]);
2459 - normalize_noise(&event_res_noise[j][2]);
2460 - }
2461 }
2462
2463 static void print_stat(int argc, const char **argv)
2464 @@ -457,10 +424,11 @@ static void print_stat(int argc, const char **argv)
2465
2466 fprintf(stderr, "\n");
2467 fprintf(stderr, " %14.9f seconds time elapsed",
2468 - (double)walltime_nsecs_avg/1e9);
2469 + avg_stats(&walltime_nsecs_stats)/1e9);
2470 if (run_count > 1) {
2471 fprintf(stderr, " ( +- %7.3f%% )",
2472 - 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg);
2473 + 100*stddev_stats(&walltime_nsecs_stats) /
2474 + avg_stats(&walltime_nsecs_stats));
2475 }
2476 fprintf(stderr, "\n\n");
2477 }
2478 diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
2479 index 1150c6d..5f39805 100644
2480 --- a/virt/kvm/ioapic.c
2481 +++ b/virt/kvm/ioapic.c
2482 @@ -188,6 +188,8 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
2483 if ((edge && old_irr != ioapic->irr) ||
2484 (!edge && !entry.fields.remote_irr))
2485 ret = ioapic_service(ioapic, irq);
2486 + else
2487 + ret = 0; /* report coalesced interrupt */
2488 }
2489 }
2490 return ret;