Annotation of /trunk/kernel-magellan/patches-4.15/0111-4.15.12-all-fixes.patch
Parent Directory | Revision Log
Revision 3095 -
(hide annotations)
(download)
Wed Mar 21 14:53:00 2018 UTC (6 years, 7 months ago) by niro
File size: 68697 byte(s)
Wed Mar 21 14:53:00 2018 UTC (6 years, 7 months ago) by niro
File size: 68697 byte(s)
-linux-4.15.12
1 | niro | 3095 | diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt |
2 | index e64d903bcbe8..46da5f184460 100644 | ||
3 | --- a/Documentation/devicetree/bindings/usb/dwc2.txt | ||
4 | +++ b/Documentation/devicetree/bindings/usb/dwc2.txt | ||
5 | @@ -19,7 +19,7 @@ Required properties: | ||
6 | configured in FS mode; | ||
7 | - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs | ||
8 | configured in HS mode; | ||
9 | - - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs | ||
10 | + - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs | ||
11 | configured in HS mode; | ||
12 | - reg : Should contain 1 register range (address and length) | ||
13 | - interrupts : Should contain 1 interrupt | ||
14 | diff --git a/Makefile b/Makefile | ||
15 | index 74c0f5e8dd55..2e6ba1553dff 100644 | ||
16 | --- a/Makefile | ||
17 | +++ b/Makefile | ||
18 | @@ -1,7 +1,7 @@ | ||
19 | # SPDX-License-Identifier: GPL-2.0 | ||
20 | VERSION = 4 | ||
21 | PATCHLEVEL = 15 | ||
22 | -SUBLEVEL = 11 | ||
23 | +SUBLEVEL = 12 | ||
24 | EXTRAVERSION = | ||
25 | NAME = Fearless Coyote | ||
26 | |||
27 | diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c | ||
28 | index 79089778725b..e3b45546d589 100644 | ||
29 | --- a/arch/parisc/kernel/cache.c | ||
30 | +++ b/arch/parisc/kernel/cache.c | ||
31 | @@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm) | ||
32 | rp3440, etc. So, avoid it if the mm isn't too big. */ | ||
33 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && | ||
34 | mm_total_size(mm) >= parisc_cache_flush_threshold) { | ||
35 | - flush_tlb_all(); | ||
36 | + if (mm->context) | ||
37 | + flush_tlb_all(); | ||
38 | flush_cache_all(); | ||
39 | return; | ||
40 | } | ||
41 | @@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm) | ||
42 | pfn = pte_pfn(*ptep); | ||
43 | if (!pfn_valid(pfn)) | ||
44 | continue; | ||
45 | + if (unlikely(mm->context)) | ||
46 | + flush_tlb_page(vma, addr); | ||
47 | __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | ||
48 | } | ||
49 | } | ||
50 | @@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm) | ||
51 | void flush_cache_range(struct vm_area_struct *vma, | ||
52 | unsigned long start, unsigned long end) | ||
53 | { | ||
54 | + pgd_t *pgd; | ||
55 | + unsigned long addr; | ||
56 | + | ||
57 | if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && | ||
58 | end - start >= parisc_cache_flush_threshold) { | ||
59 | - flush_tlb_range(vma, start, end); | ||
60 | + if (vma->vm_mm->context) | ||
61 | + flush_tlb_range(vma, start, end); | ||
62 | flush_cache_all(); | ||
63 | return; | ||
64 | } | ||
65 | |||
66 | - flush_user_dcache_range_asm(start, end); | ||
67 | - if (vma->vm_flags & VM_EXEC) | ||
68 | - flush_user_icache_range_asm(start, end); | ||
69 | - flush_tlb_range(vma, start, end); | ||
70 | + if (vma->vm_mm->context == mfsp(3)) { | ||
71 | + flush_user_dcache_range_asm(start, end); | ||
72 | + if (vma->vm_flags & VM_EXEC) | ||
73 | + flush_user_icache_range_asm(start, end); | ||
74 | + flush_tlb_range(vma, start, end); | ||
75 | + return; | ||
76 | + } | ||
77 | + | ||
78 | + pgd = vma->vm_mm->pgd; | ||
79 | + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { | ||
80 | + unsigned long pfn; | ||
81 | + pte_t *ptep = get_ptep(pgd, addr); | ||
82 | + if (!ptep) | ||
83 | + continue; | ||
84 | + pfn = pte_pfn(*ptep); | ||
85 | + if (pfn_valid(pfn)) { | ||
86 | + if (unlikely(vma->vm_mm->context)) | ||
87 | + flush_tlb_page(vma, addr); | ||
88 | + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); | ||
89 | + } | ||
90 | + } | ||
91 | } | ||
92 | |||
93 | void | ||
94 | flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) | ||
95 | { | ||
96 | - BUG_ON(!vma->vm_mm->context); | ||
97 | - | ||
98 | if (pfn_valid(pfn)) { | ||
99 | - flush_tlb_page(vma, vmaddr); | ||
100 | + if (likely(vma->vm_mm->context)) | ||
101 | + flush_tlb_page(vma, vmaddr); | ||
102 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | ||
103 | } | ||
104 | } | ||
105 | diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h | ||
106 | index 66c14347c502..23a65439c37c 100644 | ||
107 | --- a/arch/x86/include/asm/cpufeatures.h | ||
108 | +++ b/arch/x86/include/asm/cpufeatures.h | ||
109 | @@ -314,6 +314,7 @@ | ||
110 | #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ | ||
111 | #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ | ||
112 | #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ | ||
113 | +#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ | ||
114 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ | ||
115 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ | ||
116 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ | ||
117 | @@ -326,6 +327,7 @@ | ||
118 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ | ||
119 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ | ||
120 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ | ||
121 | +#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ | ||
122 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ | ||
123 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ | ||
124 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ | ||
125 | diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h | ||
126 | index d0dabeae0505..f928ad9b143f 100644 | ||
127 | --- a/arch/x86/include/asm/nospec-branch.h | ||
128 | +++ b/arch/x86/include/asm/nospec-branch.h | ||
129 | @@ -183,7 +183,10 @@ | ||
130 | * otherwise we'll run out of registers. We don't care about CET | ||
131 | * here, anyway. | ||
132 | */ | ||
133 | -# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ | ||
134 | +# define CALL_NOSPEC \ | ||
135 | + ALTERNATIVE( \ | ||
136 | + ANNOTATE_RETPOLINE_SAFE \ | ||
137 | + "call *%[thunk_target]\n", \ | ||
138 | " jmp 904f;\n" \ | ||
139 | " .align 16\n" \ | ||
140 | "901: call 903f;\n" \ | ||
141 | diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c | ||
142 | index 4aa9fd379390..c3af167d0a70 100644 | ||
143 | --- a/arch/x86/kernel/cpu/intel.c | ||
144 | +++ b/arch/x86/kernel/cpu/intel.c | ||
145 | @@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) | ||
146 | /* | ||
147 | * Early microcode releases for the Spectre v2 mitigation were broken. | ||
148 | * Information taken from; | ||
149 | - * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf | ||
150 | + * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf | ||
151 | * - https://kb.vmware.com/s/article/52345 | ||
152 | * - Microcode revisions observed in the wild | ||
153 | * - Release note from 20180108 microcode release | ||
154 | @@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { | ||
155 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, | ||
156 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, | ||
157 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, | ||
158 | - { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, | ||
159 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, | ||
160 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, | ||
161 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, | ||
162 | diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c | ||
163 | index 5edb27f1a2c4..9d0b5af7db91 100644 | ||
164 | --- a/arch/x86/kernel/vm86_32.c | ||
165 | +++ b/arch/x86/kernel/vm86_32.c | ||
166 | @@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) | ||
167 | return; | ||
168 | |||
169 | check_vip: | ||
170 | - if (VEFLAGS & X86_EFLAGS_VIP) { | ||
171 | + if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == | ||
172 | + (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { | ||
173 | save_v86_state(regs, VM86_STI); | ||
174 | return; | ||
175 | } | ||
176 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c | ||
177 | index fe2cb4cfa75b..37277859a2a1 100644 | ||
178 | --- a/arch/x86/kvm/mmu.c | ||
179 | +++ b/arch/x86/kvm/mmu.c | ||
180 | @@ -2758,8 +2758,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | ||
181 | else | ||
182 | pte_access &= ~ACC_WRITE_MASK; | ||
183 | |||
184 | + if (!kvm_is_mmio_pfn(pfn)) | ||
185 | + spte |= shadow_me_mask; | ||
186 | + | ||
187 | spte |= (u64)pfn << PAGE_SHIFT; | ||
188 | - spte |= shadow_me_mask; | ||
189 | |||
190 | if (pte_access & ACC_WRITE_MASK) { | ||
191 | |||
192 | diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c | ||
193 | index c88573d90f3e..25a30b5d6582 100644 | ||
194 | --- a/arch/x86/mm/fault.c | ||
195 | +++ b/arch/x86/mm/fault.c | ||
196 | @@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address) | ||
197 | if (!pmd_k) | ||
198 | return -1; | ||
199 | |||
200 | - if (pmd_huge(*pmd_k)) | ||
201 | + if (pmd_large(*pmd_k)) | ||
202 | return 0; | ||
203 | |||
204 | pte_k = pte_offset_kernel(pmd_k, address); | ||
205 | @@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address) | ||
206 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) | ||
207 | BUG(); | ||
208 | |||
209 | - if (pud_huge(*pud)) | ||
210 | + if (pud_large(*pud)) | ||
211 | return 0; | ||
212 | |||
213 | pmd = pmd_offset(pud, address); | ||
214 | @@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address) | ||
215 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) | ||
216 | BUG(); | ||
217 | |||
218 | - if (pmd_huge(*pmd)) | ||
219 | + if (pmd_large(*pmd)) | ||
220 | return 0; | ||
221 | |||
222 | pte_ref = pte_offset_kernel(pmd_ref, address); | ||
223 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | ||
224 | index 21e7ae159dff..9f72993a6175 100644 | ||
225 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | ||
226 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | ||
227 | @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) | ||
228 | /* don't do anything if sink is not display port, i.e., | ||
229 | * passive dp->(dvi|hdmi) adaptor | ||
230 | */ | ||
231 | - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | ||
232 | - int saved_dpms = connector->dpms; | ||
233 | - /* Only turn off the display if it's physically disconnected */ | ||
234 | - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { | ||
235 | - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
236 | - } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { | ||
237 | - /* Don't try to start link training before we | ||
238 | - * have the dpcd */ | ||
239 | - if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | ||
240 | - return; | ||
241 | - | ||
242 | - /* set it to OFF so that drm_helper_connector_dpms() | ||
243 | - * won't return immediately since the current state | ||
244 | - * is ON at this point. | ||
245 | - */ | ||
246 | - connector->dpms = DRM_MODE_DPMS_OFF; | ||
247 | - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
248 | - } | ||
249 | - connector->dpms = saved_dpms; | ||
250 | + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && | ||
251 | + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && | ||
252 | + amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { | ||
253 | + /* Don't start link training before we have the DPCD */ | ||
254 | + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) | ||
255 | + return; | ||
256 | + | ||
257 | + /* Turn the connector off and back on immediately, which | ||
258 | + * will trigger link training | ||
259 | + */ | ||
260 | + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
261 | + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
262 | } | ||
263 | } | ||
264 | } | ||
265 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | ||
266 | index 1eac7c3c687b..e0eef2c41190 100644 | ||
267 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | ||
268 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | ||
269 | @@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) | ||
270 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); | ||
271 | |||
272 | if (robj) { | ||
273 | - if (robj->gem_base.import_attach) | ||
274 | - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
275 | amdgpu_mn_unregister(robj); | ||
276 | amdgpu_bo_unref(&robj); | ||
277 | } | ||
278 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | ||
279 | index ea25164e7f4b..828252dc1d91 100644 | ||
280 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | ||
281 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | ||
282 | @@ -44,6 +44,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | ||
283 | |||
284 | amdgpu_bo_kunmap(bo); | ||
285 | |||
286 | + if (bo->gem_base.import_attach) | ||
287 | + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
288 | drm_gem_object_release(&bo->gem_base); | ||
289 | amdgpu_bo_unref(&bo->parent); | ||
290 | if (!list_empty(&bo->shadow_list)) { | ||
291 | diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c | ||
292 | index 380f340204e8..f56f60f695e1 100644 | ||
293 | --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c | ||
294 | +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | ||
295 | @@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) | ||
296 | struct nvif_device *device = &drm->client.device; | ||
297 | struct drm_connector *connector; | ||
298 | |||
299 | + INIT_LIST_HEAD(&drm->bl_connectors); | ||
300 | + | ||
301 | if (apple_gmux_present()) { | ||
302 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | - INIT_LIST_HEAD(&drm->bl_connectors); | ||
307 | - | ||
308 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
309 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | ||
310 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
311 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | ||
312 | index e35d3e17cd7c..c6e3d0dd1070 100644 | ||
313 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | ||
314 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | ||
315 | @@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, | ||
316 | |||
317 | tail = this->addr + this->size; | ||
318 | if (vmm->func->page_block && next && next->page != p) | ||
319 | - tail = ALIGN_DOWN(addr, vmm->func->page_block); | ||
320 | + tail = ALIGN_DOWN(tail, vmm->func->page_block); | ||
321 | |||
322 | if (addr <= tail && tail - addr >= size) { | ||
323 | rb_erase(&this->tree, &vmm->free); | ||
324 | diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c | ||
325 | index cf3deb283da5..065c058f7b5f 100644 | ||
326 | --- a/drivers/gpu/drm/radeon/radeon_gem.c | ||
327 | +++ b/drivers/gpu/drm/radeon/radeon_gem.c | ||
328 | @@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) | ||
329 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); | ||
330 | |||
331 | if (robj) { | ||
332 | - if (robj->gem_base.import_attach) | ||
333 | - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | ||
334 | radeon_mn_unregister(robj); | ||
335 | radeon_bo_unref(&robj); | ||
336 | } | ||
337 | diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c | ||
338 | index 093594976126..baadb706c276 100644 | ||
339 | --- a/drivers/gpu/drm/radeon/radeon_object.c | ||
340 | +++ b/drivers/gpu/drm/radeon/radeon_object.c | ||
341 | @@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | ||
342 | mutex_unlock(&bo->rdev->gem.mutex); | ||
343 | radeon_bo_clear_surface_reg(bo); | ||
344 | WARN_ON_ONCE(!list_empty(&bo->va)); | ||
345 | + if (bo->gem_base.import_attach) | ||
346 | + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); | ||
347 | drm_gem_object_release(&bo->gem_base); | ||
348 | kfree(bo); | ||
349 | } | ||
350 | diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c | ||
351 | index 42713511b53b..524e6134642e 100644 | ||
352 | --- a/drivers/infiniband/sw/rdmavt/mr.c | ||
353 | +++ b/drivers/infiniband/sw/rdmavt/mr.c | ||
354 | @@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) | ||
355 | unsigned long timeout; | ||
356 | struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); | ||
357 | |||
358 | - if (percpu_ref_is_zero(&mr->refcount)) | ||
359 | - return 0; | ||
360 | - /* avoid dma mr */ | ||
361 | - if (mr->lkey) | ||
362 | + if (mr->lkey) { | ||
363 | + /* avoid dma mr */ | ||
364 | rvt_dereg_clean_qps(mr); | ||
365 | + /* @mr was indexed on rcu protected @lkey_table */ | ||
366 | + synchronize_rcu(); | ||
367 | + } | ||
368 | + | ||
369 | timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); | ||
370 | if (!timeout) { | ||
371 | rvt_pr_err(rdi, | ||
372 | diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c | ||
373 | index 06f025fd5726..12c325066deb 100644 | ||
374 | --- a/drivers/irqchip/irq-gic-v3-its.c | ||
375 | +++ b/drivers/irqchip/irq-gic-v3-its.c | ||
376 | @@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = { | ||
377 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. | ||
378 | */ | ||
379 | #define IRQS_PER_CHUNK_SHIFT 5 | ||
380 | -#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) | ||
381 | +#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) | ||
382 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | ||
383 | |||
384 | static unsigned long *lpi_bitmap; | ||
385 | @@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | ||
386 | |||
387 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
388 | /* | ||
389 | - * At least one bit of EventID is being used, hence a minimum | ||
390 | - * of two entries. No, the architecture doesn't let you | ||
391 | - * express an ITT with a single entry. | ||
392 | + * We allocate at least one chunk worth of LPIs bet device, | ||
393 | + * and thus that many ITEs. The device may require less though. | ||
394 | */ | ||
395 | - nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | ||
396 | + nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); | ||
397 | sz = nr_ites * its->ite_size; | ||
398 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | ||
399 | itt = kzalloc(sz, GFP_KERNEL); | ||
400 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c | ||
401 | index 3551fbd6fe41..935593032123 100644 | ||
402 | --- a/drivers/nvme/host/core.c | ||
403 | +++ b/drivers/nvme/host/core.c | ||
404 | @@ -2052,6 +2052,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = { | ||
405 | NULL, | ||
406 | }; | ||
407 | |||
408 | +static int nvme_active_ctrls(struct nvme_subsystem *subsys) | ||
409 | +{ | ||
410 | + int count = 0; | ||
411 | + struct nvme_ctrl *ctrl; | ||
412 | + | ||
413 | + mutex_lock(&subsys->lock); | ||
414 | + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | ||
415 | + if (ctrl->state != NVME_CTRL_DELETING && | ||
416 | + ctrl->state != NVME_CTRL_DEAD) | ||
417 | + count++; | ||
418 | + } | ||
419 | + mutex_unlock(&subsys->lock); | ||
420 | + | ||
421 | + return count; | ||
422 | +} | ||
423 | + | ||
424 | static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | ||
425 | { | ||
426 | struct nvme_subsystem *subsys, *found; | ||
427 | @@ -2090,7 +2106,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | ||
428 | * Verify that the subsystem actually supports multiple | ||
429 | * controllers, else bail out. | ||
430 | */ | ||
431 | - if (!(id->cmic & (1 << 1))) { | ||
432 | + if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) { | ||
433 | dev_err(ctrl->device, | ||
434 | "ignoring ctrl due to duplicate subnqn (%s).\n", | ||
435 | found->subnqn); | ||
436 | diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c | ||
437 | index 1e7ce0b6f299..1b7febc43da9 100644 | ||
438 | --- a/drivers/phy/broadcom/phy-brcm-usb-init.c | ||
439 | +++ b/drivers/phy/broadcom/phy-brcm-usb-init.c | ||
440 | @@ -50,6 +50,8 @@ | ||
441 | #define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */ | ||
442 | #define USB_CTRL_EBRIDGE 0x0c | ||
443 | #define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */ | ||
444 | +#define USB_CTRL_OBRIDGE 0x10 | ||
445 | +#define USB_CTRL_OBRIDGE_LS_KEEP_ALIVE_MASK 0x08000000 | ||
446 | #define USB_CTRL_MDIO 0x14 | ||
447 | #define USB_CTRL_MDIO2 0x18 | ||
448 | #define USB_CTRL_UTMI_CTL_1 0x2c | ||
449 | @@ -71,6 +73,7 @@ | ||
450 | #define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */ | ||
451 | #define USB_CTRL_USB30_PCTL 0x70 | ||
452 | #define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002 | ||
453 | +#define USB_CTRL_USB30_PCTL_PHY3_IDDQ_OVERRIDE_MASK 0x00008000 | ||
454 | #define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000 | ||
455 | #define USB_CTRL_USB_DEVICE_CTL1 0x90 | ||
456 | #define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */ | ||
457 | @@ -116,7 +119,6 @@ enum { | ||
458 | USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR, | ||
459 | USB_CTRL_SETUP_OC3_DISABLE_SELECTOR, | ||
460 | USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR, | ||
461 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_SELECTOR, | ||
462 | USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR, | ||
463 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR, | ||
464 | USB_CTRL_USB_PM_USB_PWRDN_SELECTOR, | ||
465 | @@ -203,7 +205,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
466 | USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, | ||
467 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
468 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
469 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
470 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
471 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, | ||
472 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
473 | @@ -225,7 +226,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
474 | 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ | ||
475 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
476 | USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, | ||
477 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
478 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
479 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, | ||
480 | 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ | ||
481 | @@ -247,7 +247,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
482 | USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, | ||
483 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
484 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
485 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
486 | USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, | ||
487 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, | ||
488 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
489 | @@ -269,7 +268,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
490 | 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ | ||
491 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
492 | USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, | ||
493 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
494 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
495 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, | ||
496 | 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ | ||
497 | @@ -291,7 +289,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
498 | 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ | ||
499 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
500 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
501 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
502 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
503 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK, | ||
504 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
505 | @@ -313,7 +310,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
506 | 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ | ||
507 | 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */ | ||
508 | USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, | ||
509 | - 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */ | ||
510 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
511 | 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ | ||
512 | 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ | ||
513 | @@ -335,7 +331,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
514 | USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, | ||
515 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
516 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
517 | - 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */ | ||
518 | USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, | ||
519 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, | ||
520 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
521 | @@ -357,7 +352,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
522 | 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */ | ||
523 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
524 | USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK, | ||
525 | - 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */ | ||
526 | 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */ | ||
527 | 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */ | ||
528 | 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */ | ||
529 | @@ -379,7 +373,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
530 | USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, | ||
531 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
532 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
533 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
534 | USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, | ||
535 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, | ||
536 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
537 | @@ -401,7 +394,6 @@ usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = { | ||
538 | USB_CTRL_SETUP_STRAP_IPP_SEL_MASK, | ||
539 | USB_CTRL_SETUP_OC3_DISABLE_MASK, | ||
540 | 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */ | ||
541 | - USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK, | ||
542 | USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK, | ||
543 | USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK, | ||
544 | USB_CTRL_USB_PM_USB_PWRDN_MASK, | ||
545 | @@ -926,6 +918,7 @@ void brcm_usb_init_common(struct brcm_usb_init_params *params) | ||
546 | USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB); | ||
547 | break; | ||
548 | default: | ||
549 | + USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB); | ||
550 | USB_CTRL_SET_FAMILY(params, USB_PM, BDC_SOFT_RESETB); | ||
551 | break; | ||
552 | } | ||
553 | @@ -952,13 +945,17 @@ void brcm_usb_init_eohci(struct brcm_usb_init_params *params) | ||
554 | * Don't enable this so the memory controller doesn't read | ||
555 | * into memory holes. NOTE: This bit is low true on 7366C0. | ||
556 | */ | ||
557 | - USB_CTRL_SET_FAMILY(params, EBRIDGE, ESTOP_SCB_REQ); | ||
558 | + USB_CTRL_SET(ctrl, EBRIDGE, ESTOP_SCB_REQ); | ||
559 | |||
560 | /* Setup the endian bits */ | ||
561 | reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP)); | ||
562 | reg &= ~USB_CTRL_SETUP_ENDIAN_BITS; | ||
563 | reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN); | ||
564 | brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP)); | ||
565 | + | ||
566 | + if (params->selected_family == BRCM_FAMILY_7271A0) | ||
567 | + /* Enable LS keep alive fix for certain keyboards */ | ||
568 | + USB_CTRL_SET(ctrl, OBRIDGE, LS_KEEP_ALIVE); | ||
569 | } | ||
570 | |||
571 | void brcm_usb_init_xhci(struct brcm_usb_init_params *params) | ||
572 | @@ -1003,6 +1000,7 @@ void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params) | ||
573 | void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params) | ||
574 | { | ||
575 | brcmusb_xhci_soft_reset(params, 1); | ||
576 | + USB_CTRL_SET(params->ctrl_regs, USB30_PCTL, PHY3_IDDQ_OVERRIDE); | ||
577 | } | ||
578 | |||
579 | void brcm_usb_set_family_map(struct brcm_usb_init_params *params) | ||
580 | diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c | ||
581 | index 195b98139e5f..d1dab36fa5b7 100644 | ||
582 | --- a/drivers/phy/broadcom/phy-brcm-usb.c | ||
583 | +++ b/drivers/phy/broadcom/phy-brcm-usb.c | ||
584 | @@ -338,9 +338,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) | ||
585 | ARRAY_SIZE(brcm_dr_mode_to_name), | ||
586 | mode, &priv->ini.mode); | ||
587 | } | ||
588 | - if (of_property_read_bool(dn, "brcm,has_xhci")) | ||
589 | + if (of_property_read_bool(dn, "brcm,has-xhci")) | ||
590 | priv->has_xhci = true; | ||
591 | - if (of_property_read_bool(dn, "brcm,has_eohci")) | ||
592 | + if (of_property_read_bool(dn, "brcm,has-eohci")) | ||
593 | priv->has_eohci = true; | ||
594 | |||
595 | err = brcm_usb_phy_dvr_init(dev, priv, dn); | ||
596 | diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c | ||
597 | index 6082389f25c3..7b44a2c68a45 100644 | ||
598 | --- a/drivers/scsi/qla2xxx/qla_init.c | ||
599 | +++ b/drivers/scsi/qla2xxx/qla_init.c | ||
600 | @@ -102,11 +102,16 @@ qla2x00_async_iocb_timeout(void *data) | ||
601 | struct srb_iocb *lio = &sp->u.iocb_cmd; | ||
602 | struct event_arg ea; | ||
603 | |||
604 | - ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | ||
605 | - "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", | ||
606 | - sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); | ||
607 | + if (fcport) { | ||
608 | + ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | ||
609 | + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", | ||
610 | + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); | ||
611 | |||
612 | - fcport->flags &= ~FCF_ASYNC_SENT; | ||
613 | + fcport->flags &= ~FCF_ASYNC_SENT; | ||
614 | + } else { | ||
615 | + pr_info("Async-%s timeout - hdl=%x.\n", | ||
616 | + sp->name, sp->handle); | ||
617 | + } | ||
618 | |||
619 | switch (sp->type) { | ||
620 | case SRB_LOGIN_CMD: | ||
621 | diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c | ||
622 | index e538e6308885..522d585a1a08 100644 | ||
623 | --- a/drivers/scsi/qla2xxx/qla_mid.c | ||
624 | +++ b/drivers/scsi/qla2xxx/qla_mid.c | ||
625 | @@ -582,8 +582,9 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) | ||
626 | ret = qla25xx_init_req_que(vha, req); | ||
627 | if (ret != QLA_SUCCESS) | ||
628 | return QLA_FUNCTION_FAILED; | ||
629 | + | ||
630 | + qla25xx_free_req_que(vha, req); | ||
631 | } | ||
632 | - qla25xx_free_req_que(vha, req); | ||
633 | |||
634 | return ret; | ||
635 | } | ||
636 | @@ -598,8 +599,9 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | ||
637 | ret = qla25xx_init_rsp_que(vha, rsp); | ||
638 | if (ret != QLA_SUCCESS) | ||
639 | return QLA_FUNCTION_FAILED; | ||
640 | + | ||
641 | + qla25xx_free_rsp_que(vha, rsp); | ||
642 | } | ||
643 | - qla25xx_free_rsp_que(vha, rsp); | ||
644 | |||
645 | return ret; | ||
646 | } | ||
647 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c | ||
648 | index 1f69e89b950f..1204c1d59bc4 100644 | ||
649 | --- a/drivers/scsi/qla2xxx/qla_os.c | ||
650 | +++ b/drivers/scsi/qla2xxx/qla_os.c | ||
651 | @@ -449,7 +449,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | ||
652 | ha->req_q_map[0] = req; | ||
653 | set_bit(0, ha->rsp_qid_map); | ||
654 | set_bit(0, ha->req_qid_map); | ||
655 | - return 1; | ||
656 | + return 0; | ||
657 | |||
658 | fail_qpair_map: | ||
659 | kfree(ha->base_qpair); | ||
660 | @@ -466,6 +466,9 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | ||
661 | |||
662 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | ||
663 | { | ||
664 | + if (!ha->req_q_map) | ||
665 | + return; | ||
666 | + | ||
667 | if (IS_QLAFX00(ha)) { | ||
668 | if (req && req->ring_fx00) | ||
669 | dma_free_coherent(&ha->pdev->dev, | ||
670 | @@ -476,14 +479,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | ||
671 | (req->length + 1) * sizeof(request_t), | ||
672 | req->ring, req->dma); | ||
673 | |||
674 | - if (req) | ||
675 | + if (req) { | ||
676 | kfree(req->outstanding_cmds); | ||
677 | - | ||
678 | - kfree(req); | ||
679 | + kfree(req); | ||
680 | + } | ||
681 | } | ||
682 | |||
683 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | ||
684 | { | ||
685 | + if (!ha->rsp_q_map) | ||
686 | + return; | ||
687 | + | ||
688 | if (IS_QLAFX00(ha)) { | ||
689 | if (rsp && rsp->ring) | ||
690 | dma_free_coherent(&ha->pdev->dev, | ||
691 | @@ -494,7 +500,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | ||
692 | (rsp->length + 1) * sizeof(response_t), | ||
693 | rsp->ring, rsp->dma); | ||
694 | } | ||
695 | - kfree(rsp); | ||
696 | + if (rsp) | ||
697 | + kfree(rsp); | ||
698 | } | ||
699 | |||
700 | static void qla2x00_free_queues(struct qla_hw_data *ha) | ||
701 | @@ -1717,6 +1724,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | ||
702 | struct qla_tgt_cmd *cmd; | ||
703 | uint8_t trace = 0; | ||
704 | |||
705 | + if (!ha->req_q_map) | ||
706 | + return; | ||
707 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
708 | for (que = 0; que < ha->max_req_queues; que++) { | ||
709 | req = ha->req_q_map[que]; | ||
710 | @@ -3071,14 +3080,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | ||
711 | /* Set up the irqs */ | ||
712 | ret = qla2x00_request_irqs(ha, rsp); | ||
713 | if (ret) | ||
714 | - goto probe_hw_failed; | ||
715 | + goto probe_failed; | ||
716 | |||
717 | /* Alloc arrays of request and response ring ptrs */ | ||
718 | - if (!qla2x00_alloc_queues(ha, req, rsp)) { | ||
719 | + if (qla2x00_alloc_queues(ha, req, rsp)) { | ||
720 | ql_log(ql_log_fatal, base_vha, 0x003d, | ||
721 | "Failed to allocate memory for queue pointers..." | ||
722 | "aborting.\n"); | ||
723 | - goto probe_init_failed; | ||
724 | + goto probe_failed; | ||
725 | } | ||
726 | |||
727 | if (ha->mqenable && shost_use_blk_mq(host)) { | ||
728 | @@ -3363,15 +3372,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | ||
729 | |||
730 | return 0; | ||
731 | |||
732 | -probe_init_failed: | ||
733 | - qla2x00_free_req_que(ha, req); | ||
734 | - ha->req_q_map[0] = NULL; | ||
735 | - clear_bit(0, ha->req_qid_map); | ||
736 | - qla2x00_free_rsp_que(ha, rsp); | ||
737 | - ha->rsp_q_map[0] = NULL; | ||
738 | - clear_bit(0, ha->rsp_qid_map); | ||
739 | - ha->max_req_queues = ha->max_rsp_queues = 0; | ||
740 | - | ||
741 | probe_failed: | ||
742 | if (base_vha->timer_active) | ||
743 | qla2x00_stop_timer(base_vha); | ||
744 | @@ -4451,11 +4451,17 @@ qla2x00_mem_free(struct qla_hw_data *ha) | ||
745 | if (ha->init_cb) | ||
746 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, | ||
747 | ha->init_cb, ha->init_cb_dma); | ||
748 | - vfree(ha->optrom_buffer); | ||
749 | - kfree(ha->nvram); | ||
750 | - kfree(ha->npiv_info); | ||
751 | - kfree(ha->swl); | ||
752 | - kfree(ha->loop_id_map); | ||
753 | + | ||
754 | + if (ha->optrom_buffer) | ||
755 | + vfree(ha->optrom_buffer); | ||
756 | + if (ha->nvram) | ||
757 | + kfree(ha->nvram); | ||
758 | + if (ha->npiv_info) | ||
759 | + kfree(ha->npiv_info); | ||
760 | + if (ha->swl) | ||
761 | + kfree(ha->swl); | ||
762 | + if (ha->loop_id_map) | ||
763 | + kfree(ha->loop_id_map); | ||
764 | |||
765 | ha->srb_mempool = NULL; | ||
766 | ha->ctx_mempool = NULL; | ||
767 | @@ -4471,6 +4477,15 @@ qla2x00_mem_free(struct qla_hw_data *ha) | ||
768 | ha->ex_init_cb_dma = 0; | ||
769 | ha->async_pd = NULL; | ||
770 | ha->async_pd_dma = 0; | ||
771 | + ha->loop_id_map = NULL; | ||
772 | + ha->npiv_info = NULL; | ||
773 | + ha->optrom_buffer = NULL; | ||
774 | + ha->swl = NULL; | ||
775 | + ha->nvram = NULL; | ||
776 | + ha->mctp_dump = NULL; | ||
777 | + ha->dcbx_tlv = NULL; | ||
778 | + ha->xgmac_data = NULL; | ||
779 | + ha->sfp_data = NULL; | ||
780 | |||
781 | ha->s_dma_pool = NULL; | ||
782 | ha->dl_dma_pool = NULL; | ||
783 | diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c | ||
784 | index cb35bb1ae305..46bb4d057293 100644 | ||
785 | --- a/drivers/scsi/qla2xxx/qla_target.c | ||
786 | +++ b/drivers/scsi/qla2xxx/qla_target.c | ||
787 | @@ -982,6 +982,7 @@ static void qlt_free_session_done(struct work_struct *work) | ||
788 | |||
789 | logo.id = sess->d_id; | ||
790 | logo.cmd_count = 0; | ||
791 | + sess->send_els_logo = 0; | ||
792 | qlt_send_first_logo(vha, &logo); | ||
793 | } | ||
794 | |||
795 | diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c | ||
796 | index 03fd20f0b496..c4a47496d2fb 100644 | ||
797 | --- a/drivers/usb/dwc2/params.c | ||
798 | +++ b/drivers/usb/dwc2/params.c | ||
799 | @@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg) | ||
800 | p->activate_stm_fs_transceiver = true; | ||
801 | } | ||
802 | |||
803 | -static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) | ||
804 | +static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg) | ||
805 | { | ||
806 | struct dwc2_core_params *p = &hsotg->params; | ||
807 | |||
808 | @@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = { | ||
809 | { .compatible = "st,stm32f4x9-fsotg", | ||
810 | .data = dwc2_set_stm32f4x9_fsotg_params }, | ||
811 | { .compatible = "st,stm32f4x9-hsotg" }, | ||
812 | - { .compatible = "st,stm32f7xx-hsotg", | ||
813 | - .data = dwc2_set_stm32f7xx_hsotg_params }, | ||
814 | + { .compatible = "st,stm32f7-hsotg", | ||
815 | + .data = dwc2_set_stm32f7_hsotg_params }, | ||
816 | {}, | ||
817 | }; | ||
818 | MODULE_DEVICE_TABLE(of, dwc2_of_match_table); | ||
819 | diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c | ||
820 | index 51de21ef3cdc..b417d9aeaeeb 100644 | ||
821 | --- a/drivers/usb/dwc3/core.c | ||
822 | +++ b/drivers/usb/dwc3/core.c | ||
823 | @@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) | ||
824 | reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); | ||
825 | reg |= DWC3_GCTL_PRTCAPDIR(mode); | ||
826 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | ||
827 | + | ||
828 | + dwc->current_dr_role = mode; | ||
829 | } | ||
830 | |||
831 | static void __dwc3_set_mode(struct work_struct *work) | ||
832 | @@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work) | ||
833 | |||
834 | dwc3_set_prtcap(dwc, dwc->desired_dr_role); | ||
835 | |||
836 | - dwc->current_dr_role = dwc->desired_dr_role; | ||
837 | - | ||
838 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
839 | |||
840 | switch (dwc->desired_dr_role) { | ||
841 | @@ -218,7 +218,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) | ||
842 | * XHCI driver will reset the host block. If dwc3 was configured for | ||
843 | * host-only mode, then we can return early. | ||
844 | */ | ||
845 | - if (dwc->dr_mode == USB_DR_MODE_HOST) | ||
846 | + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) | ||
847 | return 0; | ||
848 | |||
849 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | ||
850 | @@ -915,7 +915,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | ||
851 | |||
852 | switch (dwc->dr_mode) { | ||
853 | case USB_DR_MODE_PERIPHERAL: | ||
854 | - dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE; | ||
855 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); | ||
856 | |||
857 | if (dwc->usb2_phy) | ||
858 | @@ -931,7 +930,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | ||
859 | } | ||
860 | break; | ||
861 | case USB_DR_MODE_HOST: | ||
862 | - dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST; | ||
863 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); | ||
864 | |||
865 | if (dwc->usb2_phy) | ||
866 | @@ -1279,7 +1277,7 @@ static int dwc3_remove(struct platform_device *pdev) | ||
867 | } | ||
868 | |||
869 | #ifdef CONFIG_PM | ||
870 | -static int dwc3_suspend_common(struct dwc3 *dwc) | ||
871 | +static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) | ||
872 | { | ||
873 | unsigned long flags; | ||
874 | |||
875 | @@ -1291,6 +1289,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | ||
876 | dwc3_core_exit(dwc); | ||
877 | break; | ||
878 | case DWC3_GCTL_PRTCAP_HOST: | ||
879 | + /* do nothing during host runtime_suspend */ | ||
880 | + if (!PMSG_IS_AUTO(msg)) | ||
881 | + dwc3_core_exit(dwc); | ||
882 | + break; | ||
883 | default: | ||
884 | /* do nothing */ | ||
885 | break; | ||
886 | @@ -1299,7 +1301,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | ||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | -static int dwc3_resume_common(struct dwc3 *dwc) | ||
891 | +static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) | ||
892 | { | ||
893 | unsigned long flags; | ||
894 | int ret; | ||
895 | @@ -1315,6 +1317,13 @@ static int dwc3_resume_common(struct dwc3 *dwc) | ||
896 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
897 | break; | ||
898 | case DWC3_GCTL_PRTCAP_HOST: | ||
899 | + /* nothing to do on host runtime_resume */ | ||
900 | + if (!PMSG_IS_AUTO(msg)) { | ||
901 | + ret = dwc3_core_init(dwc); | ||
902 | + if (ret) | ||
903 | + return ret; | ||
904 | + } | ||
905 | + break; | ||
906 | default: | ||
907 | /* do nothing */ | ||
908 | break; | ||
909 | @@ -1326,12 +1335,11 @@ static int dwc3_resume_common(struct dwc3 *dwc) | ||
910 | static int dwc3_runtime_checks(struct dwc3 *dwc) | ||
911 | { | ||
912 | switch (dwc->current_dr_role) { | ||
913 | - case USB_DR_MODE_PERIPHERAL: | ||
914 | - case USB_DR_MODE_OTG: | ||
915 | + case DWC3_GCTL_PRTCAP_DEVICE: | ||
916 | if (dwc->connected) | ||
917 | return -EBUSY; | ||
918 | break; | ||
919 | - case USB_DR_MODE_HOST: | ||
920 | + case DWC3_GCTL_PRTCAP_HOST: | ||
921 | default: | ||
922 | /* do nothing */ | ||
923 | break; | ||
924 | @@ -1348,7 +1356,7 @@ static int dwc3_runtime_suspend(struct device *dev) | ||
925 | if (dwc3_runtime_checks(dwc)) | ||
926 | return -EBUSY; | ||
927 | |||
928 | - ret = dwc3_suspend_common(dwc); | ||
929 | + ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); | ||
930 | if (ret) | ||
931 | return ret; | ||
932 | |||
933 | @@ -1364,7 +1372,7 @@ static int dwc3_runtime_resume(struct device *dev) | ||
934 | |||
935 | device_init_wakeup(dev, false); | ||
936 | |||
937 | - ret = dwc3_resume_common(dwc); | ||
938 | + ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); | ||
939 | if (ret) | ||
940 | return ret; | ||
941 | |||
942 | @@ -1411,7 +1419,7 @@ static int dwc3_suspend(struct device *dev) | ||
943 | struct dwc3 *dwc = dev_get_drvdata(dev); | ||
944 | int ret; | ||
945 | |||
946 | - ret = dwc3_suspend_common(dwc); | ||
947 | + ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); | ||
948 | if (ret) | ||
949 | return ret; | ||
950 | |||
951 | @@ -1427,7 +1435,7 @@ static int dwc3_resume(struct device *dev) | ||
952 | |||
953 | pinctrl_pm_select_default_state(dev); | ||
954 | |||
955 | - ret = dwc3_resume_common(dwc); | ||
956 | + ret = dwc3_resume_common(dwc, PMSG_RESUME); | ||
957 | if (ret) | ||
958 | return ret; | ||
959 | |||
960 | diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h | ||
961 | index 4a4a4c98508c..6d4e7a66cedd 100644 | ||
962 | --- a/drivers/usb/dwc3/core.h | ||
963 | +++ b/drivers/usb/dwc3/core.h | ||
964 | @@ -158,13 +158,15 @@ | ||
965 | #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) | ||
966 | #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) | ||
967 | |||
968 | -#define DWC3_TXFIFOQ 1 | ||
969 | -#define DWC3_RXFIFOQ 3 | ||
970 | -#define DWC3_TXREQQ 5 | ||
971 | -#define DWC3_RXREQQ 7 | ||
972 | -#define DWC3_RXINFOQ 9 | ||
973 | -#define DWC3_DESCFETCHQ 13 | ||
974 | -#define DWC3_EVENTQ 15 | ||
975 | +#define DWC3_TXFIFOQ 0 | ||
976 | +#define DWC3_RXFIFOQ 1 | ||
977 | +#define DWC3_TXREQQ 2 | ||
978 | +#define DWC3_RXREQQ 3 | ||
979 | +#define DWC3_RXINFOQ 4 | ||
980 | +#define DWC3_PSTATQ 5 | ||
981 | +#define DWC3_DESCFETCHQ 6 | ||
982 | +#define DWC3_EVENTQ 7 | ||
983 | +#define DWC3_AUXEVENTQ 8 | ||
984 | |||
985 | /* Global RX Threshold Configuration Register */ | ||
986 | #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) | ||
987 | diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c | ||
988 | index 7ae0eefc7cc7..e54c3622eb28 100644 | ||
989 | --- a/drivers/usb/dwc3/dwc3-of-simple.c | ||
990 | +++ b/drivers/usb/dwc3/dwc3-of-simple.c | ||
991 | @@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) | ||
992 | clk_disable_unprepare(simple->clks[i]); | ||
993 | clk_put(simple->clks[i]); | ||
994 | } | ||
995 | + simple->num_clocks = 0; | ||
996 | |||
997 | reset_control_assert(simple->resets); | ||
998 | reset_control_put(simple->resets); | ||
999 | diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c | ||
1000 | index 1e940f054cb8..6dbc489513cd 100644 | ||
1001 | --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c | ||
1002 | +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c | ||
1003 | @@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) | ||
1004 | if (ret) { | ||
1005 | dev_err(&pci->dev, | ||
1006 | "couldn't add resources to bdc device\n"); | ||
1007 | + platform_device_put(bdc); | ||
1008 | return ret; | ||
1009 | } | ||
1010 | |||
1011 | diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c | ||
1012 | index 6e87af248367..409cde4e6a51 100644 | ||
1013 | --- a/drivers/usb/gadget/udc/renesas_usb3.c | ||
1014 | +++ b/drivers/usb/gadget/udc/renesas_usb3.c | ||
1015 | @@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) | ||
1016 | __renesas_usb3_ep_free_request(usb3->ep0_req); | ||
1017 | if (usb3->phy) | ||
1018 | phy_put(usb3->phy); | ||
1019 | - pm_runtime_disable(usb3_to_dev(usb3)); | ||
1020 | + pm_runtime_disable(&pdev->dev); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | diff --git a/fs/aio.c b/fs/aio.c | ||
1025 | index a062d75109cb..6bcd3fb5265a 100644 | ||
1026 | --- a/fs/aio.c | ||
1027 | +++ b/fs/aio.c | ||
1028 | @@ -68,9 +68,9 @@ struct aio_ring { | ||
1029 | #define AIO_RING_PAGES 8 | ||
1030 | |||
1031 | struct kioctx_table { | ||
1032 | - struct rcu_head rcu; | ||
1033 | - unsigned nr; | ||
1034 | - struct kioctx *table[]; | ||
1035 | + struct rcu_head rcu; | ||
1036 | + unsigned nr; | ||
1037 | + struct kioctx __rcu *table[]; | ||
1038 | }; | ||
1039 | |||
1040 | struct kioctx_cpu { | ||
1041 | @@ -115,7 +115,8 @@ struct kioctx { | ||
1042 | struct page **ring_pages; | ||
1043 | long nr_pages; | ||
1044 | |||
1045 | - struct work_struct free_work; | ||
1046 | + struct rcu_head free_rcu; | ||
1047 | + struct work_struct free_work; /* see free_ioctx() */ | ||
1048 | |||
1049 | /* | ||
1050 | * signals when all in-flight requests are done | ||
1051 | @@ -329,7 +330,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) | ||
1052 | for (i = 0; i < table->nr; i++) { | ||
1053 | struct kioctx *ctx; | ||
1054 | |||
1055 | - ctx = table->table[i]; | ||
1056 | + ctx = rcu_dereference(table->table[i]); | ||
1057 | if (ctx && ctx->aio_ring_file == file) { | ||
1058 | if (!atomic_read(&ctx->dead)) { | ||
1059 | ctx->user_id = ctx->mmap_base = vma->vm_start; | ||
1060 | @@ -588,6 +589,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | ||
1061 | return cancel(&kiocb->common); | ||
1062 | } | ||
1063 | |||
1064 | +/* | ||
1065 | + * free_ioctx() should be RCU delayed to synchronize against the RCU | ||
1066 | + * protected lookup_ioctx() and also needs process context to call | ||
1067 | + * aio_free_ring(), so the double bouncing through kioctx->free_rcu and | ||
1068 | + * ->free_work. | ||
1069 | + */ | ||
1070 | static void free_ioctx(struct work_struct *work) | ||
1071 | { | ||
1072 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | ||
1073 | @@ -601,6 +608,14 @@ static void free_ioctx(struct work_struct *work) | ||
1074 | kmem_cache_free(kioctx_cachep, ctx); | ||
1075 | } | ||
1076 | |||
1077 | +static void free_ioctx_rcufn(struct rcu_head *head) | ||
1078 | +{ | ||
1079 | + struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); | ||
1080 | + | ||
1081 | + INIT_WORK(&ctx->free_work, free_ioctx); | ||
1082 | + schedule_work(&ctx->free_work); | ||
1083 | +} | ||
1084 | + | ||
1085 | static void free_ioctx_reqs(struct percpu_ref *ref) | ||
1086 | { | ||
1087 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | ||
1088 | @@ -609,8 +624,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | ||
1089 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) | ||
1090 | complete(&ctx->rq_wait->comp); | ||
1091 | |||
1092 | - INIT_WORK(&ctx->free_work, free_ioctx); | ||
1093 | - schedule_work(&ctx->free_work); | ||
1094 | + /* Synchronize against RCU protected table->table[] dereferences */ | ||
1095 | + call_rcu(&ctx->free_rcu, free_ioctx_rcufn); | ||
1096 | } | ||
1097 | |||
1098 | /* | ||
1099 | @@ -651,9 +666,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | ||
1100 | while (1) { | ||
1101 | if (table) | ||
1102 | for (i = 0; i < table->nr; i++) | ||
1103 | - if (!table->table[i]) { | ||
1104 | + if (!rcu_access_pointer(table->table[i])) { | ||
1105 | ctx->id = i; | ||
1106 | - table->table[i] = ctx; | ||
1107 | + rcu_assign_pointer(table->table[i], ctx); | ||
1108 | spin_unlock(&mm->ioctx_lock); | ||
1109 | |||
1110 | /* While kioctx setup is in progress, | ||
1111 | @@ -834,11 +849,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | ||
1112 | } | ||
1113 | |||
1114 | table = rcu_dereference_raw(mm->ioctx_table); | ||
1115 | - WARN_ON(ctx != table->table[ctx->id]); | ||
1116 | - table->table[ctx->id] = NULL; | ||
1117 | + WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); | ||
1118 | + RCU_INIT_POINTER(table->table[ctx->id], NULL); | ||
1119 | spin_unlock(&mm->ioctx_lock); | ||
1120 | |||
1121 | - /* percpu_ref_kill() will do the necessary call_rcu() */ | ||
1122 | + /* free_ioctx_reqs() will do the necessary RCU synchronization */ | ||
1123 | wake_up_all(&ctx->wait); | ||
1124 | |||
1125 | /* | ||
1126 | @@ -880,7 +895,8 @@ void exit_aio(struct mm_struct *mm) | ||
1127 | |||
1128 | skipped = 0; | ||
1129 | for (i = 0; i < table->nr; ++i) { | ||
1130 | - struct kioctx *ctx = table->table[i]; | ||
1131 | + struct kioctx *ctx = | ||
1132 | + rcu_dereference_protected(table->table[i], true); | ||
1133 | |||
1134 | if (!ctx) { | ||
1135 | skipped++; | ||
1136 | @@ -1069,7 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | ||
1137 | if (!table || id >= table->nr) | ||
1138 | goto out; | ||
1139 | |||
1140 | - ctx = table->table[id]; | ||
1141 | + ctx = rcu_dereference(table->table[id]); | ||
1142 | if (ctx && ctx->user_id == ctx_id) { | ||
1143 | percpu_ref_get(&ctx->users); | ||
1144 | ret = ctx; | ||
1145 | diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c | ||
1146 | index 7d0dc100a09a..8a9df8003345 100644 | ||
1147 | --- a/fs/btrfs/backref.c | ||
1148 | +++ b/fs/btrfs/backref.c | ||
1149 | @@ -1263,7 +1263,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, | ||
1150 | while (node) { | ||
1151 | ref = rb_entry(node, struct prelim_ref, rbnode); | ||
1152 | node = rb_next(&ref->rbnode); | ||
1153 | - WARN_ON(ref->count < 0); | ||
1154 | + /* | ||
1155 | + * ref->count < 0 can happen here if there are delayed | ||
1156 | + * refs with a node->action of BTRFS_DROP_DELAYED_REF. | ||
1157 | + * prelim_ref_insert() relies on this when merging | ||
1158 | + * identical refs to keep the overall count correct. | ||
1159 | + * prelim_ref_insert() will merge only those refs | ||
1160 | + * which compare identically. Any refs having | ||
1161 | + * e.g. different offsets would not be merged, | ||
1162 | + * and would retain their original ref->count < 0. | ||
1163 | + */ | ||
1164 | if (roots && ref->count && ref->root_id && ref->parent == 0) { | ||
1165 | if (sc && sc->root_objectid && | ||
1166 | ref->root_id != sc->root_objectid) { | ||
1167 | @@ -1509,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr) | ||
1168 | if (!node) | ||
1169 | break; | ||
1170 | bytenr = node->val; | ||
1171 | + shared.share_count = 0; | ||
1172 | cond_resched(); | ||
1173 | } | ||
1174 | |||
1175 | diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c | ||
1176 | index 8903c4fbf7e6..8a3e42412506 100644 | ||
1177 | --- a/fs/btrfs/raid56.c | ||
1178 | +++ b/fs/btrfs/raid56.c | ||
1179 | @@ -1351,6 +1351,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, | ||
1180 | stripe_start = stripe->physical; | ||
1181 | if (physical >= stripe_start && | ||
1182 | physical < stripe_start + rbio->stripe_len && | ||
1183 | + stripe->dev->bdev && | ||
1184 | bio->bi_disk == stripe->dev->bdev->bd_disk && | ||
1185 | bio->bi_partno == stripe->dev->bdev->bd_partno) { | ||
1186 | return i; | ||
1187 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c | ||
1188 | index a25684287501..6631f48c6a11 100644 | ||
1189 | --- a/fs/btrfs/volumes.c | ||
1190 | +++ b/fs/btrfs/volumes.c | ||
1191 | @@ -574,6 +574,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev) | ||
1192 | btrfs_sysfs_remove_fsid(fs_devs); | ||
1193 | list_del(&fs_devs->list); | ||
1194 | free_fs_devices(fs_devs); | ||
1195 | + break; | ||
1196 | } else { | ||
1197 | fs_devs->num_devices--; | ||
1198 | list_del(&dev->dev_list); | ||
1199 | @@ -4737,10 +4738,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | ||
1200 | ndevs = min(ndevs, devs_max); | ||
1201 | |||
1202 | /* | ||
1203 | - * the primary goal is to maximize the number of stripes, so use as many | ||
1204 | - * devices as possible, even if the stripes are not maximum sized. | ||
1205 | + * The primary goal is to maximize the number of stripes, so use as | ||
1206 | + * many devices as possible, even if the stripes are not maximum sized. | ||
1207 | + * | ||
1208 | + * The DUP profile stores more than one stripe per device, the | ||
1209 | + * max_avail is the total size so we have to adjust. | ||
1210 | */ | ||
1211 | - stripe_size = devices_info[ndevs-1].max_avail; | ||
1212 | + stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); | ||
1213 | num_stripes = ndevs * dev_stripes; | ||
1214 | |||
1215 | /* | ||
1216 | @@ -4775,8 +4779,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | ||
1217 | stripe_size = devices_info[ndevs-1].max_avail; | ||
1218 | } | ||
1219 | |||
1220 | - stripe_size = div_u64(stripe_size, dev_stripes); | ||
1221 | - | ||
1222 | /* align to BTRFS_STRIPE_LEN */ | ||
1223 | stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); | ||
1224 | |||
1225 | @@ -7091,10 +7093,24 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, | ||
1226 | |||
1227 | mutex_lock(&fs_devices->device_list_mutex); | ||
1228 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | ||
1229 | - if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device)) | ||
1230 | + stats_cnt = atomic_read(&device->dev_stats_ccnt); | ||
1231 | + if (!device->dev_stats_valid || stats_cnt == 0) | ||
1232 | continue; | ||
1233 | |||
1234 | - stats_cnt = atomic_read(&device->dev_stats_ccnt); | ||
1235 | + | ||
1236 | + /* | ||
1237 | + * There is a LOAD-LOAD control dependency between the value of | ||
1238 | + * dev_stats_ccnt and updating the on-disk values which requires | ||
1239 | + * reading the in-memory counters. Such control dependencies | ||
1240 | + * require explicit read memory barriers. | ||
1241 | + * | ||
1242 | + * This memory barriers pairs with smp_mb__before_atomic in | ||
1243 | + * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full | ||
1244 | + * barrier implied by atomic_xchg in | ||
1245 | + * btrfs_dev_stats_read_and_reset | ||
1246 | + */ | ||
1247 | + smp_rmb(); | ||
1248 | + | ||
1249 | ret = update_dev_stat_item(trans, fs_info, device); | ||
1250 | if (!ret) | ||
1251 | atomic_sub(stats_cnt, &device->dev_stats_ccnt); | ||
1252 | diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h | ||
1253 | index ff15208344a7..52ee7b094f3f 100644 | ||
1254 | --- a/fs/btrfs/volumes.h | ||
1255 | +++ b/fs/btrfs/volumes.h | ||
1256 | @@ -498,6 +498,12 @@ static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, | ||
1257 | int index) | ||
1258 | { | ||
1259 | atomic_inc(dev->dev_stat_values + index); | ||
1260 | + /* | ||
1261 | + * This memory barrier orders stores updating statistics before stores | ||
1262 | + * updating dev_stats_ccnt. | ||
1263 | + * | ||
1264 | + * It pairs with smp_rmb() in btrfs_run_dev_stats(). | ||
1265 | + */ | ||
1266 | smp_mb__before_atomic(); | ||
1267 | atomic_inc(&dev->dev_stats_ccnt); | ||
1268 | } | ||
1269 | @@ -523,6 +529,12 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev, | ||
1270 | int index, unsigned long val) | ||
1271 | { | ||
1272 | atomic_set(dev->dev_stat_values + index, val); | ||
1273 | + /* | ||
1274 | + * This memory barrier orders stores updating statistics before stores | ||
1275 | + * updating dev_stats_ccnt. | ||
1276 | + * | ||
1277 | + * It pairs with smp_rmb() in btrfs_run_dev_stats(). | ||
1278 | + */ | ||
1279 | smp_mb__before_atomic(); | ||
1280 | atomic_inc(&dev->dev_stats_ccnt); | ||
1281 | } | ||
1282 | diff --git a/fs/dcache.c b/fs/dcache.c | ||
1283 | index 5c7df1df81ff..eb2c297a87d0 100644 | ||
1284 | --- a/fs/dcache.c | ||
1285 | +++ b/fs/dcache.c | ||
1286 | @@ -644,11 +644,16 @@ static inline struct dentry *lock_parent(struct dentry *dentry) | ||
1287 | spin_unlock(&parent->d_lock); | ||
1288 | goto again; | ||
1289 | } | ||
1290 | - rcu_read_unlock(); | ||
1291 | - if (parent != dentry) | ||
1292 | + if (parent != dentry) { | ||
1293 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | ||
1294 | - else | ||
1295 | + if (unlikely(dentry->d_lockref.count < 0)) { | ||
1296 | + spin_unlock(&parent->d_lock); | ||
1297 | + parent = NULL; | ||
1298 | + } | ||
1299 | + } else { | ||
1300 | parent = NULL; | ||
1301 | + } | ||
1302 | + rcu_read_unlock(); | ||
1303 | return parent; | ||
1304 | } | ||
1305 | |||
1306 | diff --git a/fs/namei.c b/fs/namei.c | ||
1307 | index 4e3fc58dae72..ee19c4ef24b2 100644 | ||
1308 | --- a/fs/namei.c | ||
1309 | +++ b/fs/namei.c | ||
1310 | @@ -578,9 +578,10 @@ static int __nd_alloc_stack(struct nameidata *nd) | ||
1311 | static bool path_connected(const struct path *path) | ||
1312 | { | ||
1313 | struct vfsmount *mnt = path->mnt; | ||
1314 | + struct super_block *sb = mnt->mnt_sb; | ||
1315 | |||
1316 | - /* Only bind mounts can have disconnected paths */ | ||
1317 | - if (mnt->mnt_root == mnt->mnt_sb->s_root) | ||
1318 | + /* Bind mounts and multi-root filesystems can have disconnected paths */ | ||
1319 | + if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) | ||
1320 | return true; | ||
1321 | |||
1322 | return is_subdir(path->dentry, mnt->mnt_root); | ||
1323 | diff --git a/fs/nfs/super.c b/fs/nfs/super.c | ||
1324 | index 29bacdc56f6a..5e470e233c83 100644 | ||
1325 | --- a/fs/nfs/super.c | ||
1326 | +++ b/fs/nfs/super.c | ||
1327 | @@ -2631,6 +2631,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, | ||
1328 | /* initial superblock/root creation */ | ||
1329 | mount_info->fill_super(s, mount_info); | ||
1330 | nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); | ||
1331 | + if (!(server->flags & NFS_MOUNT_UNSHARED)) | ||
1332 | + s->s_iflags |= SB_I_MULTIROOT; | ||
1333 | } | ||
1334 | |||
1335 | mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); | ||
1336 | diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c | ||
1337 | index 3861d61fb265..3ce946063ffe 100644 | ||
1338 | --- a/fs/xfs/xfs_icache.c | ||
1339 | +++ b/fs/xfs/xfs_icache.c | ||
1340 | @@ -295,6 +295,7 @@ xfs_reinit_inode( | ||
1341 | uint32_t generation = inode->i_generation; | ||
1342 | uint64_t version = inode->i_version; | ||
1343 | umode_t mode = inode->i_mode; | ||
1344 | + dev_t dev = inode->i_rdev; | ||
1345 | |||
1346 | error = inode_init_always(mp->m_super, inode); | ||
1347 | |||
1348 | @@ -302,6 +303,7 @@ xfs_reinit_inode( | ||
1349 | inode->i_generation = generation; | ||
1350 | inode->i_version = version; | ||
1351 | inode->i_mode = mode; | ||
1352 | + inode->i_rdev = dev; | ||
1353 | return error; | ||
1354 | } | ||
1355 | |||
1356 | diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h | ||
1357 | index 8c896540a72c..ff58c2933fdf 100644 | ||
1358 | --- a/include/kvm/arm_vgic.h | ||
1359 | +++ b/include/kvm/arm_vgic.h | ||
1360 | @@ -349,6 +349,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu); | ||
1361 | bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); | ||
1362 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | ||
1363 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | ||
1364 | +void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); | ||
1365 | |||
1366 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | ||
1367 | |||
1368 | diff --git a/include/linux/fs.h b/include/linux/fs.h | ||
1369 | index 79421287ff5e..d8af431d9c91 100644 | ||
1370 | --- a/include/linux/fs.h | ||
1371 | +++ b/include/linux/fs.h | ||
1372 | @@ -1312,6 +1312,7 @@ extern int send_sigurg(struct fown_struct *fown); | ||
1373 | #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ | ||
1374 | #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ | ||
1375 | #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ | ||
1376 | +#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ | ||
1377 | |||
1378 | /* sb->s_iflags to limit user namespace mounts */ | ||
1379 | #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ | ||
1380 | diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h | ||
1381 | index c00c4c33e432..b26eccc78fb1 100644 | ||
1382 | --- a/include/linux/irqchip/arm-gic-v3.h | ||
1383 | +++ b/include/linux/irqchip/arm-gic-v3.h | ||
1384 | @@ -503,6 +503,7 @@ | ||
1385 | |||
1386 | #define ICH_HCR_EN (1 << 0) | ||
1387 | #define ICH_HCR_UIE (1 << 1) | ||
1388 | +#define ICH_HCR_NPIE (1 << 3) | ||
1389 | #define ICH_HCR_TC (1 << 10) | ||
1390 | #define ICH_HCR_TALL0 (1 << 11) | ||
1391 | #define ICH_HCR_TALL1 (1 << 12) | ||
1392 | diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h | ||
1393 | index d3453ee072fc..68d8b1f73682 100644 | ||
1394 | --- a/include/linux/irqchip/arm-gic.h | ||
1395 | +++ b/include/linux/irqchip/arm-gic.h | ||
1396 | @@ -84,6 +84,7 @@ | ||
1397 | |||
1398 | #define GICH_HCR_EN (1 << 0) | ||
1399 | #define GICH_HCR_UIE (1 << 1) | ||
1400 | +#define GICH_HCR_NPIE (1 << 3) | ||
1401 | |||
1402 | #define GICH_LR_VIRTUALID (0x3ff << 0) | ||
1403 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) | ||
1404 | diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c | ||
1405 | index c2db7e905f7d..012881461058 100644 | ||
1406 | --- a/sound/core/oss/pcm_oss.c | ||
1407 | +++ b/sound/core/oss/pcm_oss.c | ||
1408 | @@ -1762,10 +1762,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) | ||
1409 | return -ENOMEM; | ||
1410 | _snd_pcm_hw_params_any(params); | ||
1411 | err = snd_pcm_hw_refine(substream, params); | ||
1412 | - format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); | ||
1413 | - kfree(params); | ||
1414 | if (err < 0) | ||
1415 | - return err; | ||
1416 | + goto error; | ||
1417 | + format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); | ||
1418 | for (fmt = 0; fmt < 32; ++fmt) { | ||
1419 | if (snd_mask_test(format_mask, fmt)) { | ||
1420 | int f = snd_pcm_oss_format_to(fmt); | ||
1421 | @@ -1773,7 +1772,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) | ||
1422 | formats |= f; | ||
1423 | } | ||
1424 | } | ||
1425 | - return formats; | ||
1426 | + | ||
1427 | + error: | ||
1428 | + kfree(params); | ||
1429 | + return err < 0 ? err : formats; | ||
1430 | } | ||
1431 | |||
1432 | static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) | ||
1433 | diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c | ||
1434 | index 35ff97bfd492..6204b886309a 100644 | ||
1435 | --- a/sound/core/seq/seq_clientmgr.c | ||
1436 | +++ b/sound/core/seq/seq_clientmgr.c | ||
1437 | @@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client) | ||
1438 | |||
1439 | if (!client) | ||
1440 | return 0; | ||
1441 | - snd_seq_delete_all_ports(client); | ||
1442 | - snd_seq_queue_client_leave(client->number); | ||
1443 | spin_lock_irqsave(&clients_lock, flags); | ||
1444 | clienttablock[client->number] = 1; | ||
1445 | clienttab[client->number] = NULL; | ||
1446 | spin_unlock_irqrestore(&clients_lock, flags); | ||
1447 | + snd_seq_delete_all_ports(client); | ||
1448 | + snd_seq_queue_client_leave(client->number); | ||
1449 | snd_use_lock_sync(&client->use_lock); | ||
1450 | snd_seq_queue_client_termination(client->number); | ||
1451 | if (client->pool) | ||
1452 | diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c | ||
1453 | index bc1c8488fc2a..2bc6759e4adc 100644 | ||
1454 | --- a/sound/core/seq/seq_prioq.c | ||
1455 | +++ b/sound/core/seq/seq_prioq.c | ||
1456 | @@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo) | ||
1457 | if (f->cells > 0) { | ||
1458 | /* drain prioQ */ | ||
1459 | while (f->cells > 0) | ||
1460 | - snd_seq_cell_free(snd_seq_prioq_cell_out(f)); | ||
1461 | + snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL)); | ||
1462 | } | ||
1463 | |||
1464 | kfree(f); | ||
1465 | @@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f, | ||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | +/* return 1 if the current time >= event timestamp */ | ||
1470 | +static int event_is_ready(struct snd_seq_event *ev, void *current_time) | ||
1471 | +{ | ||
1472 | + if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK) | ||
1473 | + return snd_seq_compare_tick_time(current_time, &ev->time.tick); | ||
1474 | + else | ||
1475 | + return snd_seq_compare_real_time(current_time, &ev->time.time); | ||
1476 | +} | ||
1477 | + | ||
1478 | /* dequeue cell from prioq */ | ||
1479 | -struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) | ||
1480 | +struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, | ||
1481 | + void *current_time) | ||
1482 | { | ||
1483 | struct snd_seq_event_cell *cell; | ||
1484 | unsigned long flags; | ||
1485 | @@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) | ||
1486 | spin_lock_irqsave(&f->lock, flags); | ||
1487 | |||
1488 | cell = f->head; | ||
1489 | + if (cell && current_time && !event_is_ready(&cell->event, current_time)) | ||
1490 | + cell = NULL; | ||
1491 | if (cell) { | ||
1492 | f->head = cell->next; | ||
1493 | |||
1494 | @@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f) | ||
1495 | return f->cells; | ||
1496 | } | ||
1497 | |||
1498 | - | ||
1499 | -/* peek at cell at the head of the prioq */ | ||
1500 | -struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f) | ||
1501 | -{ | ||
1502 | - if (f == NULL) { | ||
1503 | - pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n"); | ||
1504 | - return NULL; | ||
1505 | - } | ||
1506 | - return f->head; | ||
1507 | -} | ||
1508 | - | ||
1509 | - | ||
1510 | static inline int prioq_match(struct snd_seq_event_cell *cell, | ||
1511 | int client, int timestamp) | ||
1512 | { | ||
1513 | diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h | ||
1514 | index d38bb78d9345..2c315ca10fc4 100644 | ||
1515 | --- a/sound/core/seq/seq_prioq.h | ||
1516 | +++ b/sound/core/seq/seq_prioq.h | ||
1517 | @@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo); | ||
1518 | int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); | ||
1519 | |||
1520 | /* dequeue cell from prioq */ | ||
1521 | -struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); | ||
1522 | +struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, | ||
1523 | + void *current_time); | ||
1524 | |||
1525 | /* return number of events available in prioq */ | ||
1526 | int snd_seq_prioq_avail(struct snd_seq_prioq *f); | ||
1527 | |||
1528 | -/* peek at cell at the head of the prioq */ | ||
1529 | -struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f); | ||
1530 | - | ||
1531 | /* client left queue */ | ||
1532 | void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); | ||
1533 | |||
1534 | diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c | ||
1535 | index 79e0c5604ef8..1a6dc4ff44a6 100644 | ||
1536 | --- a/sound/core/seq/seq_queue.c | ||
1537 | +++ b/sound/core/seq/seq_queue.c | ||
1538 | @@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) | ||
1539 | |||
1540 | __again: | ||
1541 | /* Process tick queue... */ | ||
1542 | - while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { | ||
1543 | - if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, | ||
1544 | - &cell->event.time.tick)) { | ||
1545 | - cell = snd_seq_prioq_cell_out(q->tickq); | ||
1546 | - if (cell) | ||
1547 | - snd_seq_dispatch_event(cell, atomic, hop); | ||
1548 | - } else { | ||
1549 | - /* event remains in the queue */ | ||
1550 | + for (;;) { | ||
1551 | + cell = snd_seq_prioq_cell_out(q->tickq, | ||
1552 | + &q->timer->tick.cur_tick); | ||
1553 | + if (!cell) | ||
1554 | break; | ||
1555 | - } | ||
1556 | + snd_seq_dispatch_event(cell, atomic, hop); | ||
1557 | } | ||
1558 | |||
1559 | - | ||
1560 | /* Process time queue... */ | ||
1561 | - while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { | ||
1562 | - if (snd_seq_compare_real_time(&q->timer->cur_time, | ||
1563 | - &cell->event.time.time)) { | ||
1564 | - cell = snd_seq_prioq_cell_out(q->timeq); | ||
1565 | - if (cell) | ||
1566 | - snd_seq_dispatch_event(cell, atomic, hop); | ||
1567 | - } else { | ||
1568 | - /* event remains in the queue */ | ||
1569 | + for (;;) { | ||
1570 | + cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); | ||
1571 | + if (!cell) | ||
1572 | break; | ||
1573 | - } | ||
1574 | + snd_seq_dispatch_event(cell, atomic, hop); | ||
1575 | } | ||
1576 | |||
1577 | /* free lock */ | ||
1578 | diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c | ||
1579 | index 96143df19b21..d5017adf9feb 100644 | ||
1580 | --- a/sound/pci/hda/hda_intel.c | ||
1581 | +++ b/sound/pci/hda/hda_intel.c | ||
1582 | @@ -181,11 +181,15 @@ static const struct kernel_param_ops param_ops_xint = { | ||
1583 | }; | ||
1584 | #define param_check_xint param_check_int | ||
1585 | |||
1586 | -static int power_save = -1; | ||
1587 | +static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; | ||
1588 | module_param(power_save, xint, 0644); | ||
1589 | MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " | ||
1590 | "(in second, 0 = disable)."); | ||
1591 | |||
1592 | +static bool pm_blacklist = true; | ||
1593 | +module_param(pm_blacklist, bool, 0644); | ||
1594 | +MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist"); | ||
1595 | + | ||
1596 | /* reset the HD-audio controller in power save mode. | ||
1597 | * this may give more power-saving, but will take longer time to | ||
1598 | * wake up. | ||
1599 | @@ -2300,10 +2304,9 @@ static int azx_probe_continue(struct azx *chip) | ||
1600 | |||
1601 | val = power_save; | ||
1602 | #ifdef CONFIG_PM | ||
1603 | - if (val == -1) { | ||
1604 | + if (pm_blacklist) { | ||
1605 | const struct snd_pci_quirk *q; | ||
1606 | |||
1607 | - val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; | ||
1608 | q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); | ||
1609 | if (q && val) { | ||
1610 | dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", | ||
1611 | diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c | ||
1612 | index 361466a2eaef..ade443a88421 100644 | ||
1613 | --- a/tools/testing/selftests/x86/entry_from_vm86.c | ||
1614 | +++ b/tools/testing/selftests/x86/entry_from_vm86.c | ||
1615 | @@ -95,6 +95,10 @@ asm ( | ||
1616 | "int3\n\t" | ||
1617 | "vmcode_int80:\n\t" | ||
1618 | "int $0x80\n\t" | ||
1619 | + "vmcode_popf_hlt:\n\t" | ||
1620 | + "push %ax\n\t" | ||
1621 | + "popf\n\t" | ||
1622 | + "hlt\n\t" | ||
1623 | "vmcode_umip:\n\t" | ||
1624 | /* addressing via displacements */ | ||
1625 | "smsw (2052)\n\t" | ||
1626 | @@ -124,8 +128,8 @@ asm ( | ||
1627 | |||
1628 | extern unsigned char vmcode[], end_vmcode[]; | ||
1629 | extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], | ||
1630 | - vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[], | ||
1631 | - vmcode_umip_str[], vmcode_umip_sldt[]; | ||
1632 | + vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[], | ||
1633 | + vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[]; | ||
1634 | |||
1635 | /* Returns false if the test was skipped. */ | ||
1636 | static bool do_test(struct vm86plus_struct *v86, unsigned long eip, | ||
1637 | @@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, | ||
1638 | (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { | ||
1639 | printf("[OK]\tReturned correctly\n"); | ||
1640 | } else { | ||
1641 | - printf("[FAIL]\tIncorrect return reason\n"); | ||
1642 | + printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip); | ||
1643 | nerrs++; | ||
1644 | } | ||
1645 | |||
1646 | @@ -264,6 +268,9 @@ int main(void) | ||
1647 | v86.regs.ds = load_addr / 16; | ||
1648 | v86.regs.es = load_addr / 16; | ||
1649 | |||
1650 | + /* Use the end of the page as our stack. */ | ||
1651 | + v86.regs.esp = 4096; | ||
1652 | + | ||
1653 | assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ | ||
1654 | |||
1655 | /* #BR -- should deliver SIG??? */ | ||
1656 | @@ -295,6 +302,23 @@ int main(void) | ||
1657 | v86.regs.eflags &= ~X86_EFLAGS_IF; | ||
1658 | do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); | ||
1659 | |||
1660 | + /* POPF with VIP set but IF clear: should not trap */ | ||
1661 | + v86.regs.eflags = X86_EFLAGS_VIP; | ||
1662 | + v86.regs.eax = 0; | ||
1663 | + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear"); | ||
1664 | + | ||
1665 | + /* POPF with VIP set and IF set: should trap */ | ||
1666 | + v86.regs.eflags = X86_EFLAGS_VIP; | ||
1667 | + v86.regs.eax = X86_EFLAGS_IF; | ||
1668 | + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set"); | ||
1669 | + | ||
1670 | + /* POPF with VIP clear and IF set: should not trap */ | ||
1671 | + v86.regs.eflags = 0; | ||
1672 | + v86.regs.eax = X86_EFLAGS_IF; | ||
1673 | + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set"); | ||
1674 | + | ||
1675 | + v86.regs.eflags = 0; | ||
1676 | + | ||
1677 | /* INT3 -- should cause #BP */ | ||
1678 | do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); | ||
1679 | |||
1680 | @@ -318,7 +342,7 @@ int main(void) | ||
1681 | clearhandler(SIGSEGV); | ||
1682 | |||
1683 | /* Make sure nothing explodes if we fork. */ | ||
1684 | - if (fork() > 0) | ||
1685 | + if (fork() == 0) | ||
1686 | return 0; | ||
1687 | |||
1688 | return (nerrs == 0 ? 0 : 1); | ||
1689 | diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c | ||
1690 | index cc29a8148328..811631a1296c 100644 | ||
1691 | --- a/virt/kvm/arm/arch_timer.c | ||
1692 | +++ b/virt/kvm/arm/arch_timer.c | ||
1693 | @@ -589,6 +589,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | ||
1694 | |||
1695 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | ||
1696 | { | ||
1697 | + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
1698 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
1699 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | ||
1700 | |||
1701 | @@ -602,6 +603,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | ||
1702 | ptimer->cnt_ctl = 0; | ||
1703 | kvm_timer_update_state(vcpu); | ||
1704 | |||
1705 | + if (timer->enabled && irqchip_in_kernel(vcpu->kvm)) | ||
1706 | + kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq); | ||
1707 | + | ||
1708 | return 0; | ||
1709 | } | ||
1710 | |||
1711 | @@ -773,7 +777,7 @@ int kvm_timer_hyp_init(bool has_gic) | ||
1712 | } | ||
1713 | } | ||
1714 | |||
1715 | - kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | ||
1716 | + kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); | ||
1717 | |||
1718 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | ||
1719 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, | ||
1720 | diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c | ||
1721 | index f5c3d6d7019e..b89ce5432214 100644 | ||
1722 | --- a/virt/kvm/arm/hyp/vgic-v3-sr.c | ||
1723 | +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | ||
1724 | @@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | ||
1725 | * are now visible to the system register interface. | ||
1726 | */ | ||
1727 | if (!cpu_if->vgic_sre) { | ||
1728 | - dsb(st); | ||
1729 | + dsb(sy); | ||
1730 | + isb(); | ||
1731 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | ||
1732 | } | ||
1733 | |||
1734 | diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c | ||
1735 | index 9dea96380339..b69798a7880e 100644 | ||
1736 | --- a/virt/kvm/arm/mmu.c | ||
1737 | +++ b/virt/kvm/arm/mmu.c | ||
1738 | @@ -1760,9 +1760,9 @@ int kvm_mmu_init(void) | ||
1739 | */ | ||
1740 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | ||
1741 | |||
1742 | - kvm_info("IDMAP page: %lx\n", hyp_idmap_start); | ||
1743 | - kvm_info("HYP VA range: %lx:%lx\n", | ||
1744 | - kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); | ||
1745 | + kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); | ||
1746 | + kvm_debug("HYP VA range: %lx:%lx\n", | ||
1747 | + kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); | ||
1748 | |||
1749 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && | ||
1750 | hyp_idmap_start < kern_hyp_va(~0UL) && | ||
1751 | diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c | ||
1752 | index 80897102da26..028d2ba05b7b 100644 | ||
1753 | --- a/virt/kvm/arm/vgic/vgic-v2.c | ||
1754 | +++ b/virt/kvm/arm/vgic/vgic-v2.c | ||
1755 | @@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void) | ||
1756 | vgic_v2_write_lr(i, 0); | ||
1757 | } | ||
1758 | |||
1759 | +void vgic_v2_set_npie(struct kvm_vcpu *vcpu) | ||
1760 | +{ | ||
1761 | + struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | ||
1762 | + | ||
1763 | + cpuif->vgic_hcr |= GICH_HCR_NPIE; | ||
1764 | +} | ||
1765 | + | ||
1766 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) | ||
1767 | { | ||
1768 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | ||
1769 | @@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | ||
1770 | int lr; | ||
1771 | unsigned long flags; | ||
1772 | |||
1773 | - cpuif->vgic_hcr &= ~GICH_HCR_UIE; | ||
1774 | + cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); | ||
1775 | |||
1776 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | ||
1777 | u32 val = cpuif->vgic_lr[lr]; | ||
1778 | @@ -381,7 +388,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | ||
1779 | kvm_vgic_global_state.type = VGIC_V2; | ||
1780 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | ||
1781 | |||
1782 | - kvm_info("vgic-v2@%llx\n", info->vctrl.start); | ||
1783 | + kvm_debug("vgic-v2@%llx\n", info->vctrl.start); | ||
1784 | |||
1785 | return 0; | ||
1786 | out: | ||
1787 | diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c | ||
1788 | index f47e8481fa45..f667c7e86b8f 100644 | ||
1789 | --- a/virt/kvm/arm/vgic/vgic-v3.c | ||
1790 | +++ b/virt/kvm/arm/vgic/vgic-v3.c | ||
1791 | @@ -26,6 +26,13 @@ static bool group1_trap; | ||
1792 | static bool common_trap; | ||
1793 | static bool gicv4_enable; | ||
1794 | |||
1795 | +void vgic_v3_set_npie(struct kvm_vcpu *vcpu) | ||
1796 | +{ | ||
1797 | + struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | ||
1798 | + | ||
1799 | + cpuif->vgic_hcr |= ICH_HCR_NPIE; | ||
1800 | +} | ||
1801 | + | ||
1802 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) | ||
1803 | { | ||
1804 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | ||
1805 | @@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | ||
1806 | int lr; | ||
1807 | unsigned long flags; | ||
1808 | |||
1809 | - cpuif->vgic_hcr &= ~ICH_HCR_UIE; | ||
1810 | + cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); | ||
1811 | |||
1812 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | ||
1813 | u64 val = cpuif->vgic_lr[lr]; | ||
1814 | diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c | ||
1815 | index ecb8e25f5fe5..04816ecdf9ce 100644 | ||
1816 | --- a/virt/kvm/arm/vgic/vgic.c | ||
1817 | +++ b/virt/kvm/arm/vgic/vgic.c | ||
1818 | @@ -460,6 +460,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | ||
1819 | return ret; | ||
1820 | } | ||
1821 | |||
1822 | +/** | ||
1823 | + * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ | ||
1824 | + * @vcpu: The VCPU pointer | ||
1825 | + * @vintid: The INTID of the interrupt | ||
1826 | + * | ||
1827 | + * Reset the active and pending states of a mapped interrupt. Kernel | ||
1828 | + * subsystems injecting mapped interrupts should reset their interrupt lines | ||
1829 | + * when we are doing a reset of the VM. | ||
1830 | + */ | ||
1831 | +void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | ||
1832 | +{ | ||
1833 | + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | ||
1834 | + unsigned long flags; | ||
1835 | + | ||
1836 | + if (!irq->hw) | ||
1837 | + goto out; | ||
1838 | + | ||
1839 | + spin_lock_irqsave(&irq->irq_lock, flags); | ||
1840 | + irq->active = false; | ||
1841 | + irq->pending_latch = false; | ||
1842 | + irq->line_level = false; | ||
1843 | + spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
1844 | +out: | ||
1845 | + vgic_put_irq(vcpu->kvm, irq); | ||
1846 | +} | ||
1847 | + | ||
1848 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | ||
1849 | { | ||
1850 | struct vgic_irq *irq; | ||
1851 | @@ -649,22 +675,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | ||
1852 | vgic_v3_set_underflow(vcpu); | ||
1853 | } | ||
1854 | |||
1855 | +static inline void vgic_set_npie(struct kvm_vcpu *vcpu) | ||
1856 | +{ | ||
1857 | + if (kvm_vgic_global_state.type == VGIC_V2) | ||
1858 | + vgic_v2_set_npie(vcpu); | ||
1859 | + else | ||
1860 | + vgic_v3_set_npie(vcpu); | ||
1861 | +} | ||
1862 | + | ||
1863 | /* Requires the ap_list_lock to be held. */ | ||
1864 | -static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | ||
1865 | +static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | ||
1866 | + bool *multi_sgi) | ||
1867 | { | ||
1868 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1869 | struct vgic_irq *irq; | ||
1870 | int count = 0; | ||
1871 | |||
1872 | + *multi_sgi = false; | ||
1873 | + | ||
1874 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | ||
1875 | |||
1876 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | ||
1877 | spin_lock(&irq->irq_lock); | ||
1878 | /* GICv2 SGIs can count for more than one... */ | ||
1879 | - if (vgic_irq_is_sgi(irq->intid) && irq->source) | ||
1880 | - count += hweight8(irq->source); | ||
1881 | - else | ||
1882 | + if (vgic_irq_is_sgi(irq->intid) && irq->source) { | ||
1883 | + int w = hweight8(irq->source); | ||
1884 | + | ||
1885 | + count += w; | ||
1886 | + *multi_sgi |= (w > 1); | ||
1887 | + } else { | ||
1888 | count++; | ||
1889 | + } | ||
1890 | spin_unlock(&irq->irq_lock); | ||
1891 | } | ||
1892 | return count; | ||
1893 | @@ -675,28 +716,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | ||
1894 | { | ||
1895 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1896 | struct vgic_irq *irq; | ||
1897 | - int count = 0; | ||
1898 | + int count; | ||
1899 | + bool npie = false; | ||
1900 | + bool multi_sgi; | ||
1901 | + u8 prio = 0xff; | ||
1902 | |||
1903 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | ||
1904 | |||
1905 | - if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) | ||
1906 | + count = compute_ap_list_depth(vcpu, &multi_sgi); | ||
1907 | + if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | ||
1908 | vgic_sort_ap_list(vcpu); | ||
1909 | |||
1910 | + count = 0; | ||
1911 | + | ||
1912 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | ||
1913 | spin_lock(&irq->irq_lock); | ||
1914 | |||
1915 | - if (unlikely(vgic_target_oracle(irq) != vcpu)) | ||
1916 | - goto next; | ||
1917 | - | ||
1918 | /* | ||
1919 | - * If we get an SGI with multiple sources, try to get | ||
1920 | - * them in all at once. | ||
1921 | + * If we have multi-SGIs in the pipeline, we need to | ||
1922 | + * guarantee that they are all seen before any IRQ of | ||
1923 | + * lower priority. In that case, we need to filter out | ||
1924 | + * these interrupts by exiting early. This is easy as | ||
1925 | + * the AP list has been sorted already. | ||
1926 | */ | ||
1927 | - do { | ||
1928 | + if (multi_sgi && irq->priority > prio) { | ||
1929 | + spin_unlock(&irq->irq_lock); | ||
1930 | + break; | ||
1931 | + } | ||
1932 | + | ||
1933 | + if (likely(vgic_target_oracle(irq) == vcpu)) { | ||
1934 | vgic_populate_lr(vcpu, irq, count++); | ||
1935 | - } while (irq->source && count < kvm_vgic_global_state.nr_lr); | ||
1936 | |||
1937 | -next: | ||
1938 | + if (irq->source) { | ||
1939 | + npie = true; | ||
1940 | + prio = irq->priority; | ||
1941 | + } | ||
1942 | + } | ||
1943 | + | ||
1944 | spin_unlock(&irq->irq_lock); | ||
1945 | |||
1946 | if (count == kvm_vgic_global_state.nr_lr) { | ||
1947 | @@ -707,6 +763,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | ||
1948 | } | ||
1949 | } | ||
1950 | |||
1951 | + if (npie) | ||
1952 | + vgic_set_npie(vcpu); | ||
1953 | + | ||
1954 | vcpu->arch.vgic_cpu.used_lrs = count; | ||
1955 | |||
1956 | /* Nuke remaining LRs */ | ||
1957 | diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h | ||
1958 | index efbcf8f96f9c..d434ebd67599 100644 | ||
1959 | --- a/virt/kvm/arm/vgic/vgic.h | ||
1960 | +++ b/virt/kvm/arm/vgic/vgic.h | ||
1961 | @@ -151,6 +151,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); | ||
1962 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | ||
1963 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); | ||
1964 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); | ||
1965 | +void vgic_v2_set_npie(struct kvm_vcpu *vcpu); | ||
1966 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
1967 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, | ||
1968 | int offset, u32 *val); | ||
1969 | @@ -180,6 +181,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); | ||
1970 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | ||
1971 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); | ||
1972 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); | ||
1973 | +void vgic_v3_set_npie(struct kvm_vcpu *vcpu); | ||
1974 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
1975 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
1976 | void vgic_v3_enable(struct kvm_vcpu *vcpu); |