Annotation of /trunk/kernel-alx/patches-4.9/0116-4.9.17-all-fixes.patch
Parent Directory | Revision Log
Revision 2956 -
(hide annotations)
(download)
Mon Jul 24 12:03:46 2017 UTC (7 years, 2 months ago) by niro
File size: 195764 byte(s)
Mon Jul 24 12:03:46 2017 UTC (7 years, 2 months ago) by niro
File size: 195764 byte(s)
-added patches-4.9
1 | niro | 2956 | diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt |
2 | index 405da11fc3e4..d11af52427b4 100644 | ||
3 | --- a/Documentation/arm64/silicon-errata.txt | ||
4 | +++ b/Documentation/arm64/silicon-errata.txt | ||
5 | @@ -42,24 +42,26 @@ file acts as a registry of software workarounds in the Linux Kernel and | ||
6 | will be updated when new workarounds are committed and backported to | ||
7 | stable kernels. | ||
8 | |||
9 | -| Implementor | Component | Erratum ID | Kconfig | | ||
10 | -+----------------+-----------------+-----------------+-------------------------+ | ||
11 | -| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ||
12 | -| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ||
13 | -| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | | ||
14 | -| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | | ||
15 | -| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | | ||
16 | -| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | | ||
17 | -| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ||
18 | -| ARM | Cortex-A57 | #852523 | N/A | | ||
19 | -| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ||
20 | -| ARM | Cortex-A72 | #853709 | N/A | | ||
21 | -| ARM | MMU-500 | #841119,#826419 | N/A | | ||
22 | -| | | | | | ||
23 | -| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | ||
24 | -| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | ||
25 | -| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | ||
26 | -| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | ||
27 | -| Cavium | ThunderX SMMUv2 | #27704 | N/A | | ||
28 | -| | | | | | ||
29 | -| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | ||
30 | +| Implementor | Component | Erratum ID | Kconfig | | ||
31 | ++----------------+-----------------+-----------------+-----------------------------+ | ||
32 | +| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ||
33 | +| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ||
34 | +| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | | ||
35 | +| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | | ||
36 | +| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | | ||
37 | +| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | | ||
38 | +| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ||
39 | +| ARM | Cortex-A57 | #852523 | N/A | | ||
40 | +| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ||
41 | +| ARM | Cortex-A72 | #853709 | N/A | | ||
42 | +| ARM | MMU-500 | #841119,#826419 | N/A | | ||
43 | +| | | | | | ||
44 | +| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | ||
45 | +| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | ||
46 | +| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | ||
47 | +| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | ||
48 | +| Cavium | ThunderX SMMUv2 | #27704 | N/A | | ||
49 | +| | | | | | ||
50 | +| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | ||
51 | +| | | | | | ||
52 | +| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | ||
53 | diff --git a/Makefile b/Makefile | ||
54 | index 4e0f962eb434..004f90a4e613 100644 | ||
55 | --- a/Makefile | ||
56 | +++ b/Makefile | ||
57 | @@ -1,6 +1,6 @@ | ||
58 | VERSION = 4 | ||
59 | PATCHLEVEL = 9 | ||
60 | -SUBLEVEL = 16 | ||
61 | +SUBLEVEL = 17 | ||
62 | EXTRAVERSION = | ||
63 | NAME = Roaring Lionus | ||
64 | |||
65 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig | ||
66 | index 969ef880d234..cf57a7799a0f 100644 | ||
67 | --- a/arch/arm64/Kconfig | ||
68 | +++ b/arch/arm64/Kconfig | ||
69 | @@ -474,6 +474,16 @@ config CAVIUM_ERRATUM_27456 | ||
70 | |||
71 | If unsure, say Y. | ||
72 | |||
73 | +config QCOM_QDF2400_ERRATUM_0065 | ||
74 | + bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" | ||
75 | + default y | ||
76 | + help | ||
77 | + On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports | ||
78 | + ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have | ||
79 | + been indicated as 16Bytes (0xf), not 8Bytes (0x7). | ||
80 | + | ||
81 | + If unsure, say Y. | ||
82 | + | ||
83 | endmenu | ||
84 | |||
85 | |||
86 | diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c | ||
87 | index 88e2f2b938f0..55889d057757 100644 | ||
88 | --- a/arch/arm64/kvm/hyp/tlb.c | ||
89 | +++ b/arch/arm64/kvm/hyp/tlb.c | ||
90 | @@ -17,14 +17,62 @@ | ||
91 | |||
92 | #include <asm/kvm_hyp.h> | ||
93 | |||
94 | +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) | ||
95 | +{ | ||
96 | + u64 val; | ||
97 | + | ||
98 | + /* | ||
99 | + * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and | ||
100 | + * most TLB operations target EL2/EL0. In order to affect the | ||
101 | + * guest TLBs (EL1/EL0), we need to change one of these two | ||
102 | + * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so | ||
103 | + * let's flip TGE before executing the TLB operation. | ||
104 | + */ | ||
105 | + write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
106 | + val = read_sysreg(hcr_el2); | ||
107 | + val &= ~HCR_TGE; | ||
108 | + write_sysreg(val, hcr_el2); | ||
109 | + isb(); | ||
110 | +} | ||
111 | + | ||
112 | +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) | ||
113 | +{ | ||
114 | + write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
115 | + isb(); | ||
116 | +} | ||
117 | + | ||
118 | +static hyp_alternate_select(__tlb_switch_to_guest, | ||
119 | + __tlb_switch_to_guest_nvhe, | ||
120 | + __tlb_switch_to_guest_vhe, | ||
121 | + ARM64_HAS_VIRT_HOST_EXTN); | ||
122 | + | ||
123 | +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) | ||
124 | +{ | ||
125 | + /* | ||
126 | + * We're done with the TLB operation, let's restore the host's | ||
127 | + * view of HCR_EL2. | ||
128 | + */ | ||
129 | + write_sysreg(0, vttbr_el2); | ||
130 | + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); | ||
131 | +} | ||
132 | + | ||
133 | +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) | ||
134 | +{ | ||
135 | + write_sysreg(0, vttbr_el2); | ||
136 | +} | ||
137 | + | ||
138 | +static hyp_alternate_select(__tlb_switch_to_host, | ||
139 | + __tlb_switch_to_host_nvhe, | ||
140 | + __tlb_switch_to_host_vhe, | ||
141 | + ARM64_HAS_VIRT_HOST_EXTN); | ||
142 | + | ||
143 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | ||
144 | { | ||
145 | dsb(ishst); | ||
146 | |||
147 | /* Switch to requested VMID */ | ||
148 | kvm = kern_hyp_va(kvm); | ||
149 | - write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
150 | - isb(); | ||
151 | + __tlb_switch_to_guest()(kvm); | ||
152 | |||
153 | /* | ||
154 | * We could do so much better if we had the VA as well. | ||
155 | @@ -45,7 +93,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | ||
156 | dsb(ish); | ||
157 | isb(); | ||
158 | |||
159 | - write_sysreg(0, vttbr_el2); | ||
160 | + __tlb_switch_to_host()(kvm); | ||
161 | } | ||
162 | |||
163 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | ||
164 | @@ -54,14 +102,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | ||
165 | |||
166 | /* Switch to requested VMID */ | ||
167 | kvm = kern_hyp_va(kvm); | ||
168 | - write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
169 | - isb(); | ||
170 | + __tlb_switch_to_guest()(kvm); | ||
171 | |||
172 | asm volatile("tlbi vmalls12e1is" : : ); | ||
173 | dsb(ish); | ||
174 | isb(); | ||
175 | |||
176 | - write_sysreg(0, vttbr_el2); | ||
177 | + __tlb_switch_to_host()(kvm); | ||
178 | } | ||
179 | |||
180 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
181 | @@ -69,14 +116,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
182 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | ||
183 | |||
184 | /* Switch to requested VMID */ | ||
185 | - write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
186 | - isb(); | ||
187 | + __tlb_switch_to_guest()(kvm); | ||
188 | |||
189 | asm volatile("tlbi vmalle1" : : ); | ||
190 | dsb(nsh); | ||
191 | isb(); | ||
192 | |||
193 | - write_sysreg(0, vttbr_el2); | ||
194 | + __tlb_switch_to_host()(kvm); | ||
195 | } | ||
196 | |||
197 | void __hyp_text __kvm_flush_vm_context(void) | ||
198 | diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c | ||
199 | index 9fa046d56eba..411994551afc 100644 | ||
200 | --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c | ||
201 | +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c | ||
202 | @@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) | ||
203 | { | ||
204 | u32 *key = crypto_tfm_ctx(tfm); | ||
205 | |||
206 | - *key = 0; | ||
207 | + *key = ~0; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h | ||
212 | index 5c451140660a..b9e3f0aca261 100644 | ||
213 | --- a/arch/powerpc/include/asm/mmu_context.h | ||
214 | +++ b/arch/powerpc/include/asm/mmu_context.h | ||
215 | @@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm); | ||
216 | struct mm_iommu_table_group_mem_t; | ||
217 | |||
218 | extern int isolate_lru_page(struct page *page); /* from internal.h */ | ||
219 | -extern bool mm_iommu_preregistered(void); | ||
220 | -extern long mm_iommu_get(unsigned long ua, unsigned long entries, | ||
221 | +extern bool mm_iommu_preregistered(struct mm_struct *mm); | ||
222 | +extern long mm_iommu_get(struct mm_struct *mm, | ||
223 | + unsigned long ua, unsigned long entries, | ||
224 | struct mm_iommu_table_group_mem_t **pmem); | ||
225 | -extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); | ||
226 | -extern void mm_iommu_init(mm_context_t *ctx); | ||
227 | -extern void mm_iommu_cleanup(mm_context_t *ctx); | ||
228 | -extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | ||
229 | - unsigned long size); | ||
230 | -extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | ||
231 | - unsigned long entries); | ||
232 | +extern long mm_iommu_put(struct mm_struct *mm, | ||
233 | + struct mm_iommu_table_group_mem_t *mem); | ||
234 | +extern void mm_iommu_init(struct mm_struct *mm); | ||
235 | +extern void mm_iommu_cleanup(struct mm_struct *mm); | ||
236 | +extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, | ||
237 | + unsigned long ua, unsigned long size); | ||
238 | +extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | ||
239 | + unsigned long ua, unsigned long entries); | ||
240 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | ||
241 | unsigned long ua, unsigned long *hpa); | ||
242 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | ||
243 | diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c | ||
244 | index 270ee30abdcf..f516ac508ae3 100644 | ||
245 | --- a/arch/powerpc/kernel/setup-common.c | ||
246 | +++ b/arch/powerpc/kernel/setup-common.c | ||
247 | @@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p) | ||
248 | init_mm.context.pte_frag = NULL; | ||
249 | #endif | ||
250 | #ifdef CONFIG_SPAPR_TCE_IOMMU | ||
251 | - mm_iommu_init(&init_mm.context); | ||
252 | + mm_iommu_init(&init_mm); | ||
253 | #endif | ||
254 | irqstack_early_init(); | ||
255 | exc_lvl_early_init(); | ||
256 | diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c | ||
257 | index b114f8b93ec9..73bf6e14c3aa 100644 | ||
258 | --- a/arch/powerpc/mm/mmu_context_book3s64.c | ||
259 | +++ b/arch/powerpc/mm/mmu_context_book3s64.c | ||
260 | @@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
261 | mm->context.pte_frag = NULL; | ||
262 | #endif | ||
263 | #ifdef CONFIG_SPAPR_TCE_IOMMU | ||
264 | - mm_iommu_init(&mm->context); | ||
265 | + mm_iommu_init(mm); | ||
266 | #endif | ||
267 | return 0; | ||
268 | } | ||
269 | @@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm) | ||
270 | } | ||
271 | #endif | ||
272 | |||
273 | - | ||
274 | void destroy_context(struct mm_struct *mm) | ||
275 | { | ||
276 | #ifdef CONFIG_SPAPR_TCE_IOMMU | ||
277 | - mm_iommu_cleanup(&mm->context); | ||
278 | + WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); | ||
279 | #endif | ||
280 | - | ||
281 | #ifdef CONFIG_PPC_ICSWX | ||
282 | drop_cop(mm->context.acop, mm); | ||
283 | kfree(mm->context.cop_lockp); | ||
284 | diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c | ||
285 | index e0f1c33601dd..7de7124ac91b 100644 | ||
286 | --- a/arch/powerpc/mm/mmu_context_iommu.c | ||
287 | +++ b/arch/powerpc/mm/mmu_context_iommu.c | ||
288 | @@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | ||
289 | } | ||
290 | |||
291 | pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", | ||
292 | - current->pid, | ||
293 | + current ? current->pid : 0, | ||
294 | incr ? '+' : '-', | ||
295 | npages << PAGE_SHIFT, | ||
296 | mm->locked_vm << PAGE_SHIFT, | ||
297 | @@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | -bool mm_iommu_preregistered(void) | ||
302 | +bool mm_iommu_preregistered(struct mm_struct *mm) | ||
303 | { | ||
304 | - if (!current || !current->mm) | ||
305 | - return false; | ||
306 | - | ||
307 | - return !list_empty(¤t->mm->context.iommu_group_mem_list); | ||
308 | + return !list_empty(&mm->context.iommu_group_mem_list); | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(mm_iommu_preregistered); | ||
311 | |||
312 | @@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page) | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | -long mm_iommu_get(unsigned long ua, unsigned long entries, | ||
317 | +long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, | ||
318 | struct mm_iommu_table_group_mem_t **pmem) | ||
319 | { | ||
320 | struct mm_iommu_table_group_mem_t *mem; | ||
321 | long i, j, ret = 0, locked_entries = 0; | ||
322 | struct page *page = NULL; | ||
323 | |||
324 | - if (!current || !current->mm) | ||
325 | - return -ESRCH; /* process exited */ | ||
326 | - | ||
327 | mutex_lock(&mem_list_mutex); | ||
328 | |||
329 | - list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, | ||
330 | + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, | ||
331 | next) { | ||
332 | if ((mem->ua == ua) && (mem->entries == entries)) { | ||
333 | ++mem->used; | ||
334 | @@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, | ||
335 | |||
336 | } | ||
337 | |||
338 | - ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); | ||
339 | + ret = mm_iommu_adjust_locked_vm(mm, entries, true); | ||
340 | if (ret) | ||
341 | goto unlock_exit; | ||
342 | |||
343 | @@ -190,7 +184,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, | ||
344 | * of the CMA zone if possible. NOTE: faulting in + migration | ||
345 | * can be expensive. Batching can be considered later | ||
346 | */ | ||
347 | - if (get_pageblock_migratetype(page) == MIGRATE_CMA) { | ||
348 | + if (is_migrate_cma_page(page)) { | ||
349 | if (mm_iommu_move_page_from_cma(page)) | ||
350 | goto populate; | ||
351 | if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), | ||
352 | @@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, | ||
353 | mem->entries = entries; | ||
354 | *pmem = mem; | ||
355 | |||
356 | - list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); | ||
357 | + list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); | ||
358 | |||
359 | unlock_exit: | ||
360 | if (locked_entries && ret) | ||
361 | - mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); | ||
362 | + mm_iommu_adjust_locked_vm(mm, locked_entries, false); | ||
363 | |||
364 | mutex_unlock(&mem_list_mutex); | ||
365 | |||
366 | @@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head) | ||
367 | static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) | ||
368 | { | ||
369 | list_del_rcu(&mem->next); | ||
370 | - mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); | ||
371 | call_rcu(&mem->rcu, mm_iommu_free); | ||
372 | } | ||
373 | |||
374 | -long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | ||
375 | +long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) | ||
376 | { | ||
377 | long ret = 0; | ||
378 | |||
379 | - if (!current || !current->mm) | ||
380 | - return -ESRCH; /* process exited */ | ||
381 | - | ||
382 | mutex_lock(&mem_list_mutex); | ||
383 | |||
384 | if (mem->used == 0) { | ||
385 | @@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | ||
386 | /* @mapped became 0 so now mappings are disabled, release the region */ | ||
387 | mm_iommu_release(mem); | ||
388 | |||
389 | + mm_iommu_adjust_locked_vm(mm, mem->entries, false); | ||
390 | + | ||
391 | unlock_exit: | ||
392 | mutex_unlock(&mem_list_mutex); | ||
393 | |||
394 | @@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) | ||
395 | } | ||
396 | EXPORT_SYMBOL_GPL(mm_iommu_put); | ||
397 | |||
398 | -struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | ||
399 | - unsigned long size) | ||
400 | +struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, | ||
401 | + unsigned long ua, unsigned long size) | ||
402 | { | ||
403 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | ||
404 | |||
405 | - list_for_each_entry_rcu(mem, | ||
406 | - ¤t->mm->context.iommu_group_mem_list, | ||
407 | - next) { | ||
408 | + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { | ||
409 | if ((mem->ua <= ua) && | ||
410 | (ua + size <= mem->ua + | ||
411 | (mem->entries << PAGE_SHIFT))) { | ||
412 | @@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | ||
413 | } | ||
414 | EXPORT_SYMBOL_GPL(mm_iommu_lookup); | ||
415 | |||
416 | -struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | ||
417 | - unsigned long entries) | ||
418 | +struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, | ||
419 | + unsigned long ua, unsigned long entries) | ||
420 | { | ||
421 | struct mm_iommu_table_group_mem_t *mem, *ret = NULL; | ||
422 | |||
423 | - list_for_each_entry_rcu(mem, | ||
424 | - ¤t->mm->context.iommu_group_mem_list, | ||
425 | - next) { | ||
426 | + list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { | ||
427 | if ((mem->ua == ua) && (mem->entries == entries)) { | ||
428 | ret = mem; | ||
429 | break; | ||
430 | @@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) | ||
431 | } | ||
432 | EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); | ||
433 | |||
434 | -void mm_iommu_init(mm_context_t *ctx) | ||
435 | +void mm_iommu_init(struct mm_struct *mm) | ||
436 | { | ||
437 | - INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list); | ||
438 | -} | ||
439 | - | ||
440 | -void mm_iommu_cleanup(mm_context_t *ctx) | ||
441 | -{ | ||
442 | - struct mm_iommu_table_group_mem_t *mem, *tmp; | ||
443 | - | ||
444 | - list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) { | ||
445 | - list_del_rcu(&mem->next); | ||
446 | - mm_iommu_do_free(mem); | ||
447 | - } | ||
448 | + INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); | ||
449 | } | ||
450 | diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c | ||
451 | index 7fe88bb57e36..38623e219816 100644 | ||
452 | --- a/arch/x86/events/core.c | ||
453 | +++ b/arch/x86/events/core.c | ||
454 | @@ -2096,8 +2096,8 @@ static int x86_pmu_event_init(struct perf_event *event) | ||
455 | |||
456 | static void refresh_pce(void *ignored) | ||
457 | { | ||
458 | - if (current->mm) | ||
459 | - load_mm_cr4(current->mm); | ||
460 | + if (current->active_mm) | ||
461 | + load_mm_cr4(current->active_mm); | ||
462 | } | ||
463 | |||
464 | static void x86_pmu_event_mapped(struct perf_event *event) | ||
465 | diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c | ||
466 | index 8f44c5a50ab8..f228f74051b6 100644 | ||
467 | --- a/arch/x86/kernel/cpu/mshyperv.c | ||
468 | +++ b/arch/x86/kernel/cpu/mshyperv.c | ||
469 | @@ -31,6 +31,7 @@ | ||
470 | #include <asm/apic.h> | ||
471 | #include <asm/timer.h> | ||
472 | #include <asm/reboot.h> | ||
473 | +#include <asm/nmi.h> | ||
474 | |||
475 | struct ms_hyperv_info ms_hyperv; | ||
476 | EXPORT_SYMBOL_GPL(ms_hyperv); | ||
477 | @@ -158,6 +159,26 @@ static unsigned char hv_get_nmi_reason(void) | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | +#ifdef CONFIG_X86_LOCAL_APIC | ||
482 | +/* | ||
483 | + * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes | ||
484 | + * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle | ||
485 | + * unknown NMI on the first CPU which gets it. | ||
486 | + */ | ||
487 | +static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) | ||
488 | +{ | ||
489 | + static atomic_t nmi_cpu = ATOMIC_INIT(-1); | ||
490 | + | ||
491 | + if (!unknown_nmi_panic) | ||
492 | + return NMI_DONE; | ||
493 | + | ||
494 | + if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) | ||
495 | + return NMI_HANDLED; | ||
496 | + | ||
497 | + return NMI_DONE; | ||
498 | +} | ||
499 | +#endif | ||
500 | + | ||
501 | static void __init ms_hyperv_init_platform(void) | ||
502 | { | ||
503 | /* | ||
504 | @@ -183,6 +204,9 @@ static void __init ms_hyperv_init_platform(void) | ||
505 | pr_info("HyperV: LAPIC Timer Frequency: %#x\n", | ||
506 | lapic_timer_frequency); | ||
507 | } | ||
508 | + | ||
509 | + register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, | ||
510 | + "hv_nmi_unknown"); | ||
511 | #endif | ||
512 | |||
513 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) | ||
514 | diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c | ||
515 | index 54a2372f5dbb..b5785c197e53 100644 | ||
516 | --- a/arch/x86/kernel/head64.c | ||
517 | +++ b/arch/x86/kernel/head64.c | ||
518 | @@ -4,6 +4,7 @@ | ||
519 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | ||
520 | */ | ||
521 | |||
522 | +#define DISABLE_BRANCH_PROFILING | ||
523 | #include <linux/init.h> | ||
524 | #include <linux/linkage.h> | ||
525 | #include <linux/types.h> | ||
526 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c | ||
527 | index 46b2f41f8b05..eea88fe5d969 100644 | ||
528 | --- a/arch/x86/kernel/tsc.c | ||
529 | +++ b/arch/x86/kernel/tsc.c | ||
530 | @@ -1287,6 +1287,8 @@ static int __init init_tsc_clocksource(void) | ||
531 | * exporting a reliable TSC. | ||
532 | */ | ||
533 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | ||
534 | + if (boot_cpu_has(X86_FEATURE_ART)) | ||
535 | + art_related_clocksource = &clocksource_tsc; | ||
536 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | ||
537 | return 0; | ||
538 | } | ||
539 | diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c | ||
540 | index 0493c17b8a51..333362f992e4 100644 | ||
541 | --- a/arch/x86/mm/kasan_init_64.c | ||
542 | +++ b/arch/x86/mm/kasan_init_64.c | ||
543 | @@ -1,3 +1,4 @@ | ||
544 | +#define DISABLE_BRANCH_PROFILING | ||
545 | #define pr_fmt(fmt) "kasan: " fmt | ||
546 | #include <linux/bootmem.h> | ||
547 | #include <linux/kasan.h> | ||
548 | diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c | ||
549 | index bedfab98077a..a00a6c07bb6f 100644 | ||
550 | --- a/arch/x86/pci/xen.c | ||
551 | +++ b/arch/x86/pci/xen.c | ||
552 | @@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
553 | return 1; | ||
554 | |||
555 | for_each_pci_msi_entry(msidesc, dev) { | ||
556 | - __pci_read_msi_msg(msidesc, &msg); | ||
557 | - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | | ||
558 | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); | ||
559 | - if (msg.data != XEN_PIRQ_MSI_DATA || | ||
560 | - xen_irq_from_pirq(pirq) < 0) { | ||
561 | - pirq = xen_allocate_pirq_msi(dev, msidesc); | ||
562 | - if (pirq < 0) { | ||
563 | - irq = -ENODEV; | ||
564 | - goto error; | ||
565 | - } | ||
566 | - xen_msi_compose_msg(dev, pirq, &msg); | ||
567 | - __pci_write_msi_msg(msidesc, &msg); | ||
568 | - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); | ||
569 | - } else { | ||
570 | - dev_dbg(&dev->dev, | ||
571 | - "xen: msi already bound to pirq=%d\n", pirq); | ||
572 | + pirq = xen_allocate_pirq_msi(dev, msidesc); | ||
573 | + if (pirq < 0) { | ||
574 | + irq = -ENODEV; | ||
575 | + goto error; | ||
576 | } | ||
577 | + xen_msi_compose_msg(dev, pirq, &msg); | ||
578 | + __pci_write_msi_msg(msidesc, &msg); | ||
579 | + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); | ||
580 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, | ||
581 | (type == PCI_CAP_ID_MSI) ? nvec : 1, | ||
582 | (type == PCI_CAP_ID_MSIX) ? | ||
583 | diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c | ||
584 | index 0774799942e0..c6fee7437be4 100644 | ||
585 | --- a/block/scsi_ioctl.c | ||
586 | +++ b/block/scsi_ioctl.c | ||
587 | @@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) | ||
588 | __set_bit(WRITE_16, filter->write_ok); | ||
589 | __set_bit(WRITE_LONG, filter->write_ok); | ||
590 | __set_bit(WRITE_LONG_2, filter->write_ok); | ||
591 | + __set_bit(WRITE_SAME, filter->write_ok); | ||
592 | + __set_bit(WRITE_SAME_16, filter->write_ok); | ||
593 | + __set_bit(WRITE_SAME_32, filter->write_ok); | ||
594 | __set_bit(ERASE, filter->write_ok); | ||
595 | __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); | ||
596 | __set_bit(MODE_SELECT, filter->write_ok); | ||
597 | diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c | ||
598 | index bdc67bad61a7..4421f7c9981c 100644 | ||
599 | --- a/drivers/acpi/blacklist.c | ||
600 | +++ b/drivers/acpi/blacklist.c | ||
601 | @@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { | ||
602 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), | ||
603 | }, | ||
604 | }, | ||
605 | + { | ||
606 | + .callback = dmi_enable_rev_override, | ||
607 | + .ident = "DELL Precision 5520", | ||
608 | + .matches = { | ||
609 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
610 | + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), | ||
611 | + }, | ||
612 | + }, | ||
613 | + { | ||
614 | + .callback = dmi_enable_rev_override, | ||
615 | + .ident = "DELL Precision 3520", | ||
616 | + .matches = { | ||
617 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
618 | + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), | ||
619 | + }, | ||
620 | + }, | ||
621 | + /* | ||
622 | + * Resolves a quirk with the Dell Latitude 3350 that | ||
623 | + * causes the ethernet adapter to not function. | ||
624 | + */ | ||
625 | + { | ||
626 | + .callback = dmi_enable_rev_override, | ||
627 | + .ident = "DELL Latitude 3350", | ||
628 | + .matches = { | ||
629 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
630 | + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), | ||
631 | + }, | ||
632 | + }, | ||
633 | #endif | ||
634 | {} | ||
635 | }; | ||
636 | diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c | ||
637 | index 3bbd2a58db47..2acaa77ad482 100644 | ||
638 | --- a/drivers/clk/bcm/clk-bcm2835.c | ||
639 | +++ b/drivers/clk/bcm/clk-bcm2835.c | ||
640 | @@ -1598,7 +1598,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { | ||
641 | .a2w_reg = A2W_PLLH_AUX, | ||
642 | .load_mask = CM_PLLH_LOADAUX, | ||
643 | .hold_mask = 0, | ||
644 | - .fixed_divider = 10), | ||
645 | + .fixed_divider = 1), | ||
646 | [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV( | ||
647 | .name = "pllh_pix", | ||
648 | .source_pll = "pllh", | ||
649 | diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c | ||
650 | index 015f7110b96d..d235fbe2564f 100644 | ||
651 | --- a/drivers/dma/ioat/init.c | ||
652 | +++ b/drivers/dma/ioat/init.c | ||
653 | @@ -691,7 +691,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | ||
654 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
655 | ioat_chan->completion = | ||
656 | dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, | ||
657 | - GFP_KERNEL, &ioat_chan->completion_dma); | ||
658 | + GFP_NOWAIT, &ioat_chan->completion_dma); | ||
659 | if (!ioat_chan->completion) | ||
660 | return -ENOMEM; | ||
661 | |||
662 | @@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | ||
663 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
664 | |||
665 | order = IOAT_MAX_ORDER; | ||
666 | - ring = ioat_alloc_ring(c, order, GFP_KERNEL); | ||
667 | + ring = ioat_alloc_ring(c, order, GFP_NOWAIT); | ||
668 | if (!ring) | ||
669 | return -ENOMEM; | ||
670 | |||
671 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | ||
672 | index 77a52b54a31e..70f0344c508c 100644 | ||
673 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | ||
674 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | ||
675 | @@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o | ||
676 | nvkm-y += nvkm/engine/disp/cursgt215.o | ||
677 | nvkm-y += nvkm/engine/disp/cursgf119.o | ||
678 | nvkm-y += nvkm/engine/disp/cursgk104.o | ||
679 | +nvkm-y += nvkm/engine/disp/cursgp102.o | ||
680 | |||
681 | nvkm-y += nvkm/engine/disp/oimmnv50.o | ||
682 | nvkm-y += nvkm/engine/disp/oimmg84.o | ||
683 | nvkm-y += nvkm/engine/disp/oimmgt215.o | ||
684 | nvkm-y += nvkm/engine/disp/oimmgf119.o | ||
685 | nvkm-y += nvkm/engine/disp/oimmgk104.o | ||
686 | +nvkm-y += nvkm/engine/disp/oimmgp102.o | ||
687 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c | ||
688 | index dd2953bc9264..9d90d8b4b7e6 100644 | ||
689 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c | ||
690 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c | ||
691 | @@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) | ||
692 | |||
693 | if (mthd->addr) { | ||
694 | snprintf(cname_, sizeof(cname_), "%s %d", | ||
695 | - mthd->name, chan->chid); | ||
696 | + mthd->name, chan->chid.user); | ||
697 | cname = cname_; | ||
698 | } | ||
699 | |||
700 | @@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, | ||
701 | if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { | ||
702 | notify->size = sizeof(struct nvif_notify_uevent_rep); | ||
703 | notify->types = 1; | ||
704 | - notify->index = chan->chid; | ||
705 | + notify->index = chan->chid.user; | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | @@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) | ||
710 | struct nv50_disp_chan *chan = nv50_disp_chan(object); | ||
711 | struct nv50_disp *disp = chan->root->disp; | ||
712 | struct nvkm_device *device = disp->base.engine.subdev.device; | ||
713 | - *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); | ||
714 | + *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr); | ||
715 | return 0; | ||
716 | } | ||
717 | |||
718 | @@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) | ||
719 | struct nv50_disp_chan *chan = nv50_disp_chan(object); | ||
720 | struct nv50_disp *disp = chan->root->disp; | ||
721 | struct nvkm_device *device = disp->base.engine.subdev.device; | ||
722 | - nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); | ||
723 | + nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data); | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | @@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) | ||
728 | struct nv50_disp *disp = chan->root->disp; | ||
729 | struct nvkm_device *device = disp->base.engine.subdev.device; | ||
730 | *addr = device->func->resource_addr(device, 0) + | ||
731 | - 0x640000 + (chan->chid * 0x1000); | ||
732 | + 0x640000 + (chan->chid.user * 0x1000); | ||
733 | *size = 0x001000; | ||
734 | return 0; | ||
735 | } | ||
736 | @@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object) | ||
737 | { | ||
738 | struct nv50_disp_chan *chan = nv50_disp_chan(object); | ||
739 | struct nv50_disp *disp = chan->root->disp; | ||
740 | - if (chan->chid >= 0) | ||
741 | - disp->chan[chan->chid] = NULL; | ||
742 | + if (chan->chid.user >= 0) | ||
743 | + disp->chan[chan->chid.user] = NULL; | ||
744 | return chan->func->dtor ? chan->func->dtor(chan) : chan; | ||
745 | } | ||
746 | |||
747 | @@ -263,7 +263,7 @@ nv50_disp_chan = { | ||
748 | int | ||
749 | nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, | ||
750 | const struct nv50_disp_chan_mthd *mthd, | ||
751 | - struct nv50_disp_root *root, int chid, int head, | ||
752 | + struct nv50_disp_root *root, int ctrl, int user, int head, | ||
753 | const struct nvkm_oclass *oclass, | ||
754 | struct nv50_disp_chan *chan) | ||
755 | { | ||
756 | @@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, | ||
757 | chan->func = func; | ||
758 | chan->mthd = mthd; | ||
759 | chan->root = root; | ||
760 | - chan->chid = chid; | ||
761 | + chan->chid.ctrl = ctrl; | ||
762 | + chan->chid.user = user; | ||
763 | chan->head = head; | ||
764 | |||
765 | - if (disp->chan[chan->chid]) { | ||
766 | - chan->chid = -1; | ||
767 | + if (disp->chan[chan->chid.user]) { | ||
768 | + chan->chid.user = -1; | ||
769 | return -EBUSY; | ||
770 | } | ||
771 | - disp->chan[chan->chid] = chan; | ||
772 | + disp->chan[chan->chid.user] = chan; | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | int | ||
777 | nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, | ||
778 | const struct nv50_disp_chan_mthd *mthd, | ||
779 | - struct nv50_disp_root *root, int chid, int head, | ||
780 | + struct nv50_disp_root *root, int ctrl, int user, int head, | ||
781 | const struct nvkm_oclass *oclass, | ||
782 | struct nvkm_object **pobject) | ||
783 | { | ||
784 | @@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, | ||
785 | return -ENOMEM; | ||
786 | *pobject = &chan->object; | ||
787 | |||
788 | - return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan); | ||
789 | + return nv50_disp_chan_ctor(func, mthd, root, ctrl, user, | ||
790 | + head, oclass, chan); | ||
791 | } | ||
792 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h | ||
793 | index f5f683d9fd20..737b38f6fbd2 100644 | ||
794 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h | ||
795 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h | ||
796 | @@ -7,7 +7,11 @@ struct nv50_disp_chan { | ||
797 | const struct nv50_disp_chan_func *func; | ||
798 | const struct nv50_disp_chan_mthd *mthd; | ||
799 | struct nv50_disp_root *root; | ||
800 | - int chid; | ||
801 | + | ||
802 | + struct { | ||
803 | + int ctrl; | ||
804 | + int user; | ||
805 | + } chid; | ||
806 | int head; | ||
807 | |||
808 | struct nvkm_object object; | ||
809 | @@ -25,11 +29,11 @@ struct nv50_disp_chan_func { | ||
810 | |||
811 | int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *, | ||
812 | const struct nv50_disp_chan_mthd *, | ||
813 | - struct nv50_disp_root *, int chid, int head, | ||
814 | + struct nv50_disp_root *, int ctrl, int user, int head, | ||
815 | const struct nvkm_oclass *, struct nv50_disp_chan *); | ||
816 | int nv50_disp_chan_new_(const struct nv50_disp_chan_func *, | ||
817 | const struct nv50_disp_chan_mthd *, | ||
818 | - struct nv50_disp_root *, int chid, int head, | ||
819 | + struct nv50_disp_root *, int ctrl, int user, int head, | ||
820 | const struct nvkm_oclass *, struct nvkm_object **); | ||
821 | |||
822 | extern const struct nv50_disp_chan_func nv50_disp_pioc_func; | ||
823 | @@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; | ||
824 | struct nv50_disp_pioc_oclass { | ||
825 | int (*ctor)(const struct nv50_disp_chan_func *, | ||
826 | const struct nv50_disp_chan_mthd *, | ||
827 | - struct nv50_disp_root *, int chid, | ||
828 | + struct nv50_disp_root *, int ctrl, int user, | ||
829 | const struct nvkm_oclass *, void *data, u32 size, | ||
830 | struct nvkm_object **); | ||
831 | struct nvkm_sclass base; | ||
832 | const struct nv50_disp_chan_func *func; | ||
833 | const struct nv50_disp_chan_mthd *mthd; | ||
834 | - int chid; | ||
835 | + struct { | ||
836 | + int ctrl; | ||
837 | + int user; | ||
838 | + } chid; | ||
839 | }; | ||
840 | |||
841 | extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass; | ||
842 | @@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass; | ||
843 | extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass; | ||
844 | extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass; | ||
845 | |||
846 | +extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass; | ||
847 | +extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass; | ||
848 | |||
849 | int nv50_disp_curs_new(const struct nv50_disp_chan_func *, | ||
850 | const struct nv50_disp_chan_mthd *, | ||
851 | - struct nv50_disp_root *, int chid, | ||
852 | + struct nv50_disp_root *, int ctrl, int user, | ||
853 | const struct nvkm_oclass *, void *data, u32 size, | ||
854 | struct nvkm_object **); | ||
855 | int nv50_disp_oimm_new(const struct nv50_disp_chan_func *, | ||
856 | const struct nv50_disp_chan_mthd *, | ||
857 | - struct nv50_disp_root *, int chid, | ||
858 | + struct nv50_disp_root *, int ctrl, int user, | ||
859 | const struct nvkm_oclass *, void *data, u32 size, | ||
860 | struct nvkm_object **); | ||
861 | #endif | ||
862 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c | ||
863 | index dd99fc7060b1..fa781b5a7e07 100644 | ||
864 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c | ||
865 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c | ||
866 | @@ -33,5 +33,5 @@ g84_disp_curs_oclass = { | ||
867 | .base.maxver = 0, | ||
868 | .ctor = nv50_disp_curs_new, | ||
869 | .func = &nv50_disp_pioc_func, | ||
870 | - .chid = 7, | ||
871 | + .chid = { 7, 7 }, | ||
872 | }; | ||
873 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c | ||
874 | index 2a1574e06ad6..2be6fb052c65 100644 | ||
875 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c | ||
876 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c | ||
877 | @@ -33,5 +33,5 @@ gf119_disp_curs_oclass = { | ||
878 | .base.maxver = 0, | ||
879 | .ctor = nv50_disp_curs_new, | ||
880 | .func = &gf119_disp_pioc_func, | ||
881 | - .chid = 13, | ||
882 | + .chid = { 13, 13 }, | ||
883 | }; | ||
884 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c | ||
885 | index 28e8f06c9472..2a99db4bf8f8 100644 | ||
886 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c | ||
887 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c | ||
888 | @@ -33,5 +33,5 @@ gk104_disp_curs_oclass = { | ||
889 | .base.maxver = 0, | ||
890 | .ctor = nv50_disp_curs_new, | ||
891 | .func = &gf119_disp_pioc_func, | ||
892 | - .chid = 13, | ||
893 | + .chid = { 13, 13 }, | ||
894 | }; | ||
895 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c | ||
896 | new file mode 100644 | ||
897 | index 000000000000..e958210d8105 | ||
898 | --- /dev/null | ||
899 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c | ||
900 | @@ -0,0 +1,37 @@ | ||
901 | +/* | ||
902 | + * Copyright 2016 Red Hat Inc. | ||
903 | + * | ||
904 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
905 | + * copy of this software and associated documentation files (the "Software"), | ||
906 | + * to deal in the Software without restriction, including without limitation | ||
907 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
908 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
909 | + * Software is furnished to do so, subject to the following conditions: | ||
910 | + * | ||
911 | + * The above copyright notice and this permission notice shall be included in | ||
912 | + * all copies or substantial portions of the Software. | ||
913 | + * | ||
914 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
915 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
916 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
917 | + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
918 | + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
919 | + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
920 | + * OTHER DEALINGS IN THE SOFTWARE. | ||
921 | + * | ||
922 | + * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
923 | + */ | ||
924 | +#include "channv50.h" | ||
925 | +#include "rootnv50.h" | ||
926 | + | ||
927 | +#include <nvif/class.h> | ||
928 | + | ||
929 | +const struct nv50_disp_pioc_oclass | ||
930 | +gp102_disp_curs_oclass = { | ||
931 | + .base.oclass = GK104_DISP_CURSOR, | ||
932 | + .base.minver = 0, | ||
933 | + .base.maxver = 0, | ||
934 | + .ctor = nv50_disp_curs_new, | ||
935 | + .func = &gf119_disp_pioc_func, | ||
936 | + .chid = { 13, 17 }, | ||
937 | +}; | ||
938 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c | ||
939 | index d8a4b9ca139c..00a7f3564450 100644 | ||
940 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c | ||
941 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c | ||
942 | @@ -33,5 +33,5 @@ gt215_disp_curs_oclass = { | ||
943 | .base.maxver = 0, | ||
944 | .ctor = nv50_disp_curs_new, | ||
945 | .func = &nv50_disp_pioc_func, | ||
946 | - .chid = 7, | ||
947 | + .chid = { 7, 7 }, | ||
948 | }; | ||
949 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c | ||
950 | index 8b1320499a0f..82ff82d8c1ab 100644 | ||
951 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c | ||
952 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c | ||
953 | @@ -33,7 +33,7 @@ | ||
954 | int | ||
955 | nv50_disp_curs_new(const struct nv50_disp_chan_func *func, | ||
956 | const struct nv50_disp_chan_mthd *mthd, | ||
957 | - struct nv50_disp_root *root, int chid, | ||
958 | + struct nv50_disp_root *root, int ctrl, int user, | ||
959 | const struct nvkm_oclass *oclass, void *data, u32 size, | ||
960 | struct nvkm_object **pobject) | ||
961 | { | ||
962 | @@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func, | ||
963 | } else | ||
964 | return ret; | ||
965 | |||
966 | - return nv50_disp_chan_new_(func, mthd, root, chid + head, | ||
967 | + return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, | ||
968 | head, oclass, pobject); | ||
969 | } | ||
970 | |||
971 | @@ -65,5 +65,5 @@ nv50_disp_curs_oclass = { | ||
972 | .base.maxver = 0, | ||
973 | .ctor = nv50_disp_curs_new, | ||
974 | .func = &nv50_disp_pioc_func, | ||
975 | - .chid = 7, | ||
976 | + .chid = { 7, 7 }, | ||
977 | }; | ||
978 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c | ||
979 | index a57f7cef307a..ce7cd74fbd5d 100644 | ||
980 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c | ||
981 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c | ||
982 | @@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, | ||
983 | struct nvkm_object *object, u32 handle) | ||
984 | { | ||
985 | return nvkm_ramht_insert(chan->base.root->ramht, object, | ||
986 | - chan->base.chid, -9, handle, | ||
987 | - chan->base.chid << 27 | 0x00000001); | ||
988 | + chan->base.chid.user, -9, handle, | ||
989 | + chan->base.chid.user << 27 | 0x00000001); | ||
990 | } | ||
991 | |||
992 | void | ||
993 | @@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) | ||
994 | struct nv50_disp *disp = chan->base.root->disp; | ||
995 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
996 | struct nvkm_device *device = subdev->device; | ||
997 | - int chid = chan->base.chid; | ||
998 | + int ctrl = chan->base.chid.ctrl; | ||
999 | + int user = chan->base.chid.user; | ||
1000 | |||
1001 | /* deactivate channel */ | ||
1002 | - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); | ||
1003 | - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); | ||
1004 | + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000); | ||
1005 | + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000); | ||
1006 | if (nvkm_msec(device, 2000, | ||
1007 | - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) | ||
1008 | + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000)) | ||
1009 | break; | ||
1010 | ) < 0) { | ||
1011 | - nvkm_error(subdev, "ch %d fini: %08x\n", chid, | ||
1012 | - nvkm_rd32(device, 0x610490 + (chid * 0x10))); | ||
1013 | + nvkm_error(subdev, "ch %d fini: %08x\n", user, | ||
1014 | + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); | ||
1015 | } | ||
1016 | |||
1017 | /* disable error reporting and completion notification */ | ||
1018 | - nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); | ||
1019 | - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); | ||
1020 | + nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); | ||
1021 | + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); | ||
1022 | } | ||
1023 | |||
1024 | static int | ||
1025 | @@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan) | ||
1026 | struct nv50_disp *disp = chan->base.root->disp; | ||
1027 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1028 | struct nvkm_device *device = subdev->device; | ||
1029 | - int chid = chan->base.chid; | ||
1030 | + int ctrl = chan->base.chid.ctrl; | ||
1031 | + int user = chan->base.chid.user; | ||
1032 | |||
1033 | /* enable error reporting */ | ||
1034 | - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); | ||
1035 | + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); | ||
1036 | |||
1037 | /* initialise channel for dma command submission */ | ||
1038 | - nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); | ||
1039 | - nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); | ||
1040 | - nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); | ||
1041 | - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); | ||
1042 | - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); | ||
1043 | - nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); | ||
1044 | + nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push); | ||
1045 | + nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000); | ||
1046 | + nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001); | ||
1047 | + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); | ||
1048 | + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); | ||
1049 | + nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); | ||
1050 | |||
1051 | /* wait for it to go inactive */ | ||
1052 | if (nvkm_msec(device, 2000, | ||
1053 | - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) | ||
1054 | + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) | ||
1055 | break; | ||
1056 | ) < 0) { | ||
1057 | - nvkm_error(subdev, "ch %d init: %08x\n", chid, | ||
1058 | - nvkm_rd32(device, 0x610490 + (chid * 0x10))); | ||
1059 | + nvkm_error(subdev, "ch %d init: %08x\n", user, | ||
1060 | + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); | ||
1061 | return -EBUSY; | ||
1062 | } | ||
1063 | |||
1064 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c | ||
1065 | index ad24c2c57696..d26d3b4c41a4 100644 | ||
1066 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c | ||
1067 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c | ||
1068 | @@ -32,26 +32,27 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan) | ||
1069 | struct nv50_disp *disp = chan->base.root->disp; | ||
1070 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1071 | struct nvkm_device *device = subdev->device; | ||
1072 | - int chid = chan->base.chid; | ||
1073 | + int ctrl = chan->base.chid.ctrl; | ||
1074 | + int user = chan->base.chid.user; | ||
1075 | |||
1076 | /* enable error reporting */ | ||
1077 | - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); | ||
1078 | + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); | ||
1079 | |||
1080 | /* initialise channel for dma command submission */ | ||
1081 | - nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); | ||
1082 | - nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); | ||
1083 | - nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); | ||
1084 | - nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); | ||
1085 | - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); | ||
1086 | - nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); | ||
1087 | + nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push); | ||
1088 | + nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000); | ||
1089 | + nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001); | ||
1090 | + nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); | ||
1091 | + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); | ||
1092 | + nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); | ||
1093 | |||
1094 | /* wait for it to go inactive */ | ||
1095 | if (nvkm_msec(device, 2000, | ||
1096 | - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) | ||
1097 | + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) | ||
1098 | break; | ||
1099 | ) < 0) { | ||
1100 | - nvkm_error(subdev, "ch %d init: %08x\n", chid, | ||
1101 | - nvkm_rd32(device, 0x610490 + (chid * 0x10))); | ||
1102 | + nvkm_error(subdev, "ch %d init: %08x\n", user, | ||
1103 | + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); | ||
1104 | return -EBUSY; | ||
1105 | } | ||
1106 | |||
1107 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c | ||
1108 | index 9c6645a357b9..0a1381a84552 100644 | ||
1109 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c | ||
1110 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c | ||
1111 | @@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, | ||
1112 | chan->func = func; | ||
1113 | |||
1114 | ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root, | ||
1115 | - chid, head, oclass, &chan->base); | ||
1116 | + chid, chid, head, oclass, &chan->base); | ||
1117 | if (ret) | ||
1118 | return ret; | ||
1119 | |||
1120 | @@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan, | ||
1121 | struct nvkm_object *object, u32 handle) | ||
1122 | { | ||
1123 | return nvkm_ramht_insert(chan->base.root->ramht, object, | ||
1124 | - chan->base.chid, -10, handle, | ||
1125 | - chan->base.chid << 28 | | ||
1126 | - chan->base.chid); | ||
1127 | + chan->base.chid.user, -10, handle, | ||
1128 | + chan->base.chid.user << 28 | | ||
1129 | + chan->base.chid.user); | ||
1130 | } | ||
1131 | |||
1132 | static void | ||
1133 | @@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan) | ||
1134 | struct nv50_disp *disp = chan->base.root->disp; | ||
1135 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1136 | struct nvkm_device *device = subdev->device; | ||
1137 | - int chid = chan->base.chid; | ||
1138 | + int ctrl = chan->base.chid.ctrl; | ||
1139 | + int user = chan->base.chid.user; | ||
1140 | |||
1141 | /* deactivate channel */ | ||
1142 | - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); | ||
1143 | - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); | ||
1144 | + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000); | ||
1145 | + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000); | ||
1146 | if (nvkm_msec(device, 2000, | ||
1147 | - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) | ||
1148 | + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000)) | ||
1149 | break; | ||
1150 | ) < 0) { | ||
1151 | - nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, | ||
1152 | - nvkm_rd32(device, 0x610200 + (chid * 0x10))); | ||
1153 | + nvkm_error(subdev, "ch %d fini timeout, %08x\n", user, | ||
1154 | + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); | ||
1155 | } | ||
1156 | |||
1157 | /* disable error reporting and completion notifications */ | ||
1158 | - nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); | ||
1159 | + nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user); | ||
1160 | } | ||
1161 | |||
1162 | static int | ||
1163 | @@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan) | ||
1164 | struct nv50_disp *disp = chan->base.root->disp; | ||
1165 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1166 | struct nvkm_device *device = subdev->device; | ||
1167 | - int chid = chan->base.chid; | ||
1168 | + int ctrl = chan->base.chid.ctrl; | ||
1169 | + int user = chan->base.chid.user; | ||
1170 | |||
1171 | /* enable error reporting */ | ||
1172 | - nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); | ||
1173 | + nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user); | ||
1174 | |||
1175 | /* initialise channel for dma command submission */ | ||
1176 | - nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push); | ||
1177 | - nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); | ||
1178 | - nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); | ||
1179 | - nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); | ||
1180 | - nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); | ||
1181 | - nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); | ||
1182 | + nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push); | ||
1183 | + nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000); | ||
1184 | + nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl); | ||
1185 | + nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010); | ||
1186 | + nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); | ||
1187 | + nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013); | ||
1188 | |||
1189 | /* wait for it to go inactive */ | ||
1190 | if (nvkm_msec(device, 2000, | ||
1191 | - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) | ||
1192 | + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000)) | ||
1193 | break; | ||
1194 | ) < 0) { | ||
1195 | - nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, | ||
1196 | - nvkm_rd32(device, 0x610200 + (chid * 0x10))); | ||
1197 | + nvkm_error(subdev, "ch %d init timeout, %08x\n", user, | ||
1198 | + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); | ||
1199 | return -EBUSY; | ||
1200 | } | ||
1201 | |||
1202 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c | ||
1203 | index 54a4ae8d66c6..5ad5d0f5db05 100644 | ||
1204 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c | ||
1205 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c | ||
1206 | @@ -33,5 +33,5 @@ g84_disp_oimm_oclass = { | ||
1207 | .base.maxver = 0, | ||
1208 | .ctor = nv50_disp_oimm_new, | ||
1209 | .func = &nv50_disp_pioc_func, | ||
1210 | - .chid = 5, | ||
1211 | + .chid = { 5, 5 }, | ||
1212 | }; | ||
1213 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c | ||
1214 | index c658db54afc5..1f9fd3403f07 100644 | ||
1215 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c | ||
1216 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c | ||
1217 | @@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = { | ||
1218 | .base.maxver = 0, | ||
1219 | .ctor = nv50_disp_oimm_new, | ||
1220 | .func = &gf119_disp_pioc_func, | ||
1221 | - .chid = 9, | ||
1222 | + .chid = { 9, 9 }, | ||
1223 | }; | ||
1224 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c | ||
1225 | index b1fde8c125d6..0c09fe85e952 100644 | ||
1226 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c | ||
1227 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c | ||
1228 | @@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = { | ||
1229 | .base.maxver = 0, | ||
1230 | .ctor = nv50_disp_oimm_new, | ||
1231 | .func = &gf119_disp_pioc_func, | ||
1232 | - .chid = 9, | ||
1233 | + .chid = { 9, 9 }, | ||
1234 | }; | ||
1235 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c | ||
1236 | new file mode 100644 | ||
1237 | index 000000000000..abf82365c671 | ||
1238 | --- /dev/null | ||
1239 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c | ||
1240 | @@ -0,0 +1,37 @@ | ||
1241 | +/* | ||
1242 | + * Copyright 2016 Red Hat Inc. | ||
1243 | + * | ||
1244 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
1245 | + * copy of this software and associated documentation files (the "Software"), | ||
1246 | + * to deal in the Software without restriction, including without limitation | ||
1247 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
1248 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
1249 | + * Software is furnished to do so, subject to the following conditions: | ||
1250 | + * | ||
1251 | + * The above copyright notice and this permission notice shall be included in | ||
1252 | + * all copies or substantial portions of the Software. | ||
1253 | + * | ||
1254 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
1255 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
1256 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
1257 | + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
1258 | + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
1259 | + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
1260 | + * OTHER DEALINGS IN THE SOFTWARE. | ||
1261 | + * | ||
1262 | + * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
1263 | + */ | ||
1264 | +#include "channv50.h" | ||
1265 | +#include "rootnv50.h" | ||
1266 | + | ||
1267 | +#include <nvif/class.h> | ||
1268 | + | ||
1269 | +const struct nv50_disp_pioc_oclass | ||
1270 | +gp102_disp_oimm_oclass = { | ||
1271 | + .base.oclass = GK104_DISP_OVERLAY, | ||
1272 | + .base.minver = 0, | ||
1273 | + .base.maxver = 0, | ||
1274 | + .ctor = nv50_disp_oimm_new, | ||
1275 | + .func = &gf119_disp_pioc_func, | ||
1276 | + .chid = { 9, 13 }, | ||
1277 | +}; | ||
1278 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c | ||
1279 | index f4e7eb3d1177..1281db28aebd 100644 | ||
1280 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c | ||
1281 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c | ||
1282 | @@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = { | ||
1283 | .base.maxver = 0, | ||
1284 | .ctor = nv50_disp_oimm_new, | ||
1285 | .func = &nv50_disp_pioc_func, | ||
1286 | - .chid = 5, | ||
1287 | + .chid = { 5, 5 }, | ||
1288 | }; | ||
1289 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c | ||
1290 | index 3940b9c966ec..07540f3d32dc 100644 | ||
1291 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c | ||
1292 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c | ||
1293 | @@ -33,7 +33,7 @@ | ||
1294 | int | ||
1295 | nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, | ||
1296 | const struct nv50_disp_chan_mthd *mthd, | ||
1297 | - struct nv50_disp_root *root, int chid, | ||
1298 | + struct nv50_disp_root *root, int ctrl, int user, | ||
1299 | const struct nvkm_oclass *oclass, void *data, u32 size, | ||
1300 | struct nvkm_object **pobject) | ||
1301 | { | ||
1302 | @@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, | ||
1303 | } else | ||
1304 | return ret; | ||
1305 | |||
1306 | - return nv50_disp_chan_new_(func, mthd, root, chid + head, | ||
1307 | + return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, | ||
1308 | head, oclass, pobject); | ||
1309 | } | ||
1310 | |||
1311 | @@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = { | ||
1312 | .base.maxver = 0, | ||
1313 | .ctor = nv50_disp_oimm_new, | ||
1314 | .func = &nv50_disp_pioc_func, | ||
1315 | - .chid = 5, | ||
1316 | + .chid = { 5, 5 }, | ||
1317 | }; | ||
1318 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c | ||
1319 | index a625a9876e34..0abaa6431943 100644 | ||
1320 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c | ||
1321 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c | ||
1322 | @@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan) | ||
1323 | struct nv50_disp *disp = chan->root->disp; | ||
1324 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1325 | struct nvkm_device *device = subdev->device; | ||
1326 | - int chid = chan->chid; | ||
1327 | + int ctrl = chan->chid.ctrl; | ||
1328 | + int user = chan->chid.user; | ||
1329 | |||
1330 | - nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); | ||
1331 | + nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000); | ||
1332 | if (nvkm_msec(device, 2000, | ||
1333 | - if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) | ||
1334 | + if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000)) | ||
1335 | break; | ||
1336 | ) < 0) { | ||
1337 | - nvkm_error(subdev, "ch %d fini: %08x\n", chid, | ||
1338 | - nvkm_rd32(device, 0x610490 + (chid * 0x10))); | ||
1339 | + nvkm_error(subdev, "ch %d fini: %08x\n", user, | ||
1340 | + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); | ||
1341 | } | ||
1342 | |||
1343 | /* disable error reporting and completion notification */ | ||
1344 | - nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); | ||
1345 | - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); | ||
1346 | + nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); | ||
1347 | + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); | ||
1348 | } | ||
1349 | |||
1350 | static int | ||
1351 | @@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan) | ||
1352 | struct nv50_disp *disp = chan->root->disp; | ||
1353 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1354 | struct nvkm_device *device = subdev->device; | ||
1355 | - int chid = chan->chid; | ||
1356 | + int ctrl = chan->chid.ctrl; | ||
1357 | + int user = chan->chid.user; | ||
1358 | |||
1359 | /* enable error reporting */ | ||
1360 | - nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); | ||
1361 | + nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); | ||
1362 | |||
1363 | /* activate channel */ | ||
1364 | - nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); | ||
1365 | + nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001); | ||
1366 | if (nvkm_msec(device, 2000, | ||
1367 | - u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); | ||
1368 | + u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10)); | ||
1369 | if ((tmp & 0x00030000) == 0x00010000) | ||
1370 | break; | ||
1371 | ) < 0) { | ||
1372 | - nvkm_error(subdev, "ch %d init: %08x\n", chid, | ||
1373 | - nvkm_rd32(device, 0x610490 + (chid * 0x10))); | ||
1374 | + nvkm_error(subdev, "ch %d init: %08x\n", user, | ||
1375 | + nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); | ||
1376 | return -EBUSY; | ||
1377 | } | ||
1378 | |||
1379 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c | ||
1380 | index 9d2618dacf20..0211e0e8a35f 100644 | ||
1381 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c | ||
1382 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c | ||
1383 | @@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan) | ||
1384 | struct nv50_disp *disp = chan->root->disp; | ||
1385 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1386 | struct nvkm_device *device = subdev->device; | ||
1387 | - int chid = chan->chid; | ||
1388 | + int ctrl = chan->chid.ctrl; | ||
1389 | + int user = chan->chid.user; | ||
1390 | |||
1391 | - nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); | ||
1392 | + nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000); | ||
1393 | if (nvkm_msec(device, 2000, | ||
1394 | - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) | ||
1395 | + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) | ||
1396 | break; | ||
1397 | ) < 0) { | ||
1398 | - nvkm_error(subdev, "ch %d timeout: %08x\n", chid, | ||
1399 | - nvkm_rd32(device, 0x610200 + (chid * 0x10))); | ||
1400 | + nvkm_error(subdev, "ch %d timeout: %08x\n", user, | ||
1401 | + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); | ||
1402 | } | ||
1403 | } | ||
1404 | |||
1405 | @@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan) | ||
1406 | struct nv50_disp *disp = chan->root->disp; | ||
1407 | struct nvkm_subdev *subdev = &disp->base.engine.subdev; | ||
1408 | struct nvkm_device *device = subdev->device; | ||
1409 | - int chid = chan->chid; | ||
1410 | + int ctrl = chan->chid.ctrl; | ||
1411 | + int user = chan->chid.user; | ||
1412 | |||
1413 | - nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); | ||
1414 | + nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000); | ||
1415 | if (nvkm_msec(device, 2000, | ||
1416 | - if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) | ||
1417 | + if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) | ||
1418 | break; | ||
1419 | ) < 0) { | ||
1420 | - nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, | ||
1421 | - nvkm_rd32(device, 0x610200 + (chid * 0x10))); | ||
1422 | + nvkm_error(subdev, "ch %d timeout0: %08x\n", user, | ||
1423 | + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); | ||
1424 | return -EBUSY; | ||
1425 | } | ||
1426 | |||
1427 | - nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); | ||
1428 | + nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001); | ||
1429 | if (nvkm_msec(device, 2000, | ||
1430 | - u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); | ||
1431 | + u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10)); | ||
1432 | if ((tmp & 0x00030000) == 0x00010000) | ||
1433 | break; | ||
1434 | ) < 0) { | ||
1435 | - nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, | ||
1436 | - nvkm_rd32(device, 0x610200 + (chid * 0x10))); | ||
1437 | + nvkm_error(subdev, "ch %d timeout1: %08x\n", user, | ||
1438 | + nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); | ||
1439 | return -EBUSY; | ||
1440 | } | ||
1441 | |||
1442 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c | ||
1443 | index 8443e04dc626..b053b291cd94 100644 | ||
1444 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c | ||
1445 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c | ||
1446 | @@ -36,8 +36,8 @@ gp104_disp_root = { | ||
1447 | &gp104_disp_ovly_oclass, | ||
1448 | }, | ||
1449 | .pioc = { | ||
1450 | - &gk104_disp_oimm_oclass, | ||
1451 | - &gk104_disp_curs_oclass, | ||
1452 | + &gp102_disp_oimm_oclass, | ||
1453 | + &gp102_disp_curs_oclass, | ||
1454 | }, | ||
1455 | }; | ||
1456 | |||
1457 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c | ||
1458 | index 2f9cecd81d04..05c829a603d1 100644 | ||
1459 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c | ||
1460 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c | ||
1461 | @@ -207,8 +207,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass, | ||
1462 | { | ||
1463 | const struct nv50_disp_pioc_oclass *sclass = oclass->priv; | ||
1464 | struct nv50_disp_root *root = nv50_disp_root(oclass->parent); | ||
1465 | - return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid, | ||
1466 | - oclass, data, size, pobject); | ||
1467 | + return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl, | ||
1468 | + sclass->chid.user, oclass, data, size, pobject); | ||
1469 | } | ||
1470 | |||
1471 | static int | ||
1472 | diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c | ||
1473 | index d544ff9b0d46..7aadce1f7e7a 100644 | ||
1474 | --- a/drivers/gpu/drm/vc4/vc4_crtc.c | ||
1475 | +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | ||
1476 | @@ -83,8 +83,7 @@ struct vc4_crtc_data { | ||
1477 | /* Which channel of the HVS this pixelvalve sources from. */ | ||
1478 | int hvs_channel; | ||
1479 | |||
1480 | - enum vc4_encoder_type encoder0_type; | ||
1481 | - enum vc4_encoder_type encoder1_type; | ||
1482 | + enum vc4_encoder_type encoder_types[4]; | ||
1483 | }; | ||
1484 | |||
1485 | #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) | ||
1486 | @@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id) | ||
1487 | CRTC_WRITE(PV_INTEN, 0); | ||
1488 | } | ||
1489 | |||
1490 | +/* Must be called with the event lock held */ | ||
1491 | +bool vc4_event_pending(struct drm_crtc *crtc) | ||
1492 | +{ | ||
1493 | + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); | ||
1494 | + | ||
1495 | + return !!vc4_crtc->event; | ||
1496 | +} | ||
1497 | + | ||
1498 | static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) | ||
1499 | { | ||
1500 | struct drm_crtc *crtc = &vc4_crtc->base; | ||
1501 | @@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { | ||
1502 | |||
1503 | static const struct vc4_crtc_data pv0_data = { | ||
1504 | .hvs_channel = 0, | ||
1505 | - .encoder0_type = VC4_ENCODER_TYPE_DSI0, | ||
1506 | - .encoder1_type = VC4_ENCODER_TYPE_DPI, | ||
1507 | + .encoder_types = { | ||
1508 | + [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, | ||
1509 | + [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, | ||
1510 | + }, | ||
1511 | }; | ||
1512 | |||
1513 | static const struct vc4_crtc_data pv1_data = { | ||
1514 | .hvs_channel = 2, | ||
1515 | - .encoder0_type = VC4_ENCODER_TYPE_DSI1, | ||
1516 | - .encoder1_type = VC4_ENCODER_TYPE_SMI, | ||
1517 | + .encoder_types = { | ||
1518 | + [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, | ||
1519 | + [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, | ||
1520 | + }, | ||
1521 | }; | ||
1522 | |||
1523 | static const struct vc4_crtc_data pv2_data = { | ||
1524 | .hvs_channel = 1, | ||
1525 | - .encoder0_type = VC4_ENCODER_TYPE_VEC, | ||
1526 | - .encoder1_type = VC4_ENCODER_TYPE_HDMI, | ||
1527 | + .encoder_types = { | ||
1528 | + [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, | ||
1529 | + [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, | ||
1530 | + }, | ||
1531 | }; | ||
1532 | |||
1533 | static const struct of_device_id vc4_crtc_dt_match[] = { | ||
1534 | @@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, | ||
1535 | struct drm_crtc *crtc) | ||
1536 | { | ||
1537 | struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); | ||
1538 | + const struct vc4_crtc_data *crtc_data = vc4_crtc->data; | ||
1539 | + const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types; | ||
1540 | struct drm_encoder *encoder; | ||
1541 | |||
1542 | drm_for_each_encoder(encoder, drm) { | ||
1543 | struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); | ||
1544 | - | ||
1545 | - if (vc4_encoder->type == vc4_crtc->data->encoder0_type) { | ||
1546 | - vc4_encoder->clock_select = 0; | ||
1547 | - encoder->possible_crtcs |= drm_crtc_mask(crtc); | ||
1548 | - } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) { | ||
1549 | - vc4_encoder->clock_select = 1; | ||
1550 | - encoder->possible_crtcs |= drm_crtc_mask(crtc); | ||
1551 | + int i; | ||
1552 | + | ||
1553 | + for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) { | ||
1554 | + if (vc4_encoder->type == encoder_types[i]) { | ||
1555 | + vc4_encoder->clock_select = i; | ||
1556 | + encoder->possible_crtcs |= drm_crtc_mask(crtc); | ||
1557 | + break; | ||
1558 | + } | ||
1559 | } | ||
1560 | } | ||
1561 | } | ||
1562 | diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h | ||
1563 | index 7c1e4d97486f..50a55ef999d6 100644 | ||
1564 | --- a/drivers/gpu/drm/vc4/vc4_drv.h | ||
1565 | +++ b/drivers/gpu/drm/vc4/vc4_drv.h | ||
1566 | @@ -194,6 +194,7 @@ to_vc4_plane(struct drm_plane *plane) | ||
1567 | } | ||
1568 | |||
1569 | enum vc4_encoder_type { | ||
1570 | + VC4_ENCODER_TYPE_NONE, | ||
1571 | VC4_ENCODER_TYPE_HDMI, | ||
1572 | VC4_ENCODER_TYPE_VEC, | ||
1573 | VC4_ENCODER_TYPE_DSI0, | ||
1574 | @@ -440,6 +441,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); | ||
1575 | extern struct platform_driver vc4_crtc_driver; | ||
1576 | int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); | ||
1577 | void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); | ||
1578 | +bool vc4_event_pending(struct drm_crtc *crtc); | ||
1579 | int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); | ||
1580 | int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, | ||
1581 | unsigned int flags, int *vpos, int *hpos, | ||
1582 | diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c | ||
1583 | index c1f65c6c8e60..67af2af70af0 100644 | ||
1584 | --- a/drivers/gpu/drm/vc4/vc4_kms.c | ||
1585 | +++ b/drivers/gpu/drm/vc4/vc4_kms.c | ||
1586 | @@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev, | ||
1587 | |||
1588 | /* Make sure that any outstanding modesets have finished. */ | ||
1589 | if (nonblock) { | ||
1590 | - ret = down_trylock(&vc4->async_modeset); | ||
1591 | - if (ret) { | ||
1592 | + struct drm_crtc *crtc; | ||
1593 | + struct drm_crtc_state *crtc_state; | ||
1594 | + unsigned long flags; | ||
1595 | + bool busy = false; | ||
1596 | + | ||
1597 | + /* | ||
1598 | + * If there's an undispatched event to send then we're | ||
1599 | + * obviously still busy. If there isn't, then we can | ||
1600 | + * unconditionally wait for the semaphore because it | ||
1601 | + * shouldn't be contended (for long). | ||
1602 | + * | ||
1603 | + * This is to prevent a race where queuing a new flip | ||
1604 | + * from userspace immediately on receipt of an event | ||
1605 | + * beats our clean-up and returns EBUSY. | ||
1606 | + */ | ||
1607 | + spin_lock_irqsave(&dev->event_lock, flags); | ||
1608 | + for_each_crtc_in_state(state, crtc, crtc_state, i) | ||
1609 | + busy |= vc4_event_pending(crtc); | ||
1610 | + spin_unlock_irqrestore(&dev->event_lock, flags); | ||
1611 | + if (busy) { | ||
1612 | kfree(c); | ||
1613 | return -EBUSY; | ||
1614 | } | ||
1615 | - } else { | ||
1616 | - ret = down_interruptible(&vc4->async_modeset); | ||
1617 | - if (ret) { | ||
1618 | - kfree(c); | ||
1619 | - return ret; | ||
1620 | - } | ||
1621 | + } | ||
1622 | + ret = down_interruptible(&vc4->async_modeset); | ||
1623 | + if (ret) { | ||
1624 | + kfree(c); | ||
1625 | + return ret; | ||
1626 | } | ||
1627 | |||
1628 | ret = drm_atomic_helper_prepare_planes(dev, state); | ||
1629 | diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h | ||
1630 | index 1aa44c2db556..39f6886b2410 100644 | ||
1631 | --- a/drivers/gpu/drm/vc4/vc4_regs.h | ||
1632 | +++ b/drivers/gpu/drm/vc4/vc4_regs.h | ||
1633 | @@ -177,8 +177,9 @@ | ||
1634 | # define PV_CONTROL_WAIT_HSTART BIT(12) | ||
1635 | # define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4) | ||
1636 | # define PV_CONTROL_PIXEL_REP_SHIFT 4 | ||
1637 | -# define PV_CONTROL_CLK_SELECT_DSI_VEC 0 | ||
1638 | +# define PV_CONTROL_CLK_SELECT_DSI 0 | ||
1639 | # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1 | ||
1640 | +# define PV_CONTROL_CLK_SELECT_VEC 2 | ||
1641 | # define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2) | ||
1642 | # define PV_CONTROL_CLK_SELECT_SHIFT 2 | ||
1643 | # define PV_CONTROL_FIFO_CLR BIT(1) | ||
1644 | diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c | ||
1645 | index c5dee300e8a3..acb9d250a905 100644 | ||
1646 | --- a/drivers/irqchip/irq-gic-v3-its.c | ||
1647 | +++ b/drivers/irqchip/irq-gic-v3-its.c | ||
1648 | @@ -1598,6 +1598,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | ||
1649 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | ||
1650 | } | ||
1651 | |||
1652 | +static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) | ||
1653 | +{ | ||
1654 | + struct its_node *its = data; | ||
1655 | + | ||
1656 | + /* On QDF2400, the size of the ITE is 16Bytes */ | ||
1657 | + its->ite_size = 16; | ||
1658 | +} | ||
1659 | + | ||
1660 | static const struct gic_quirk its_quirks[] = { | ||
1661 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | ||
1662 | { | ||
1663 | @@ -1615,6 +1623,14 @@ static const struct gic_quirk its_quirks[] = { | ||
1664 | .init = its_enable_quirk_cavium_23144, | ||
1665 | }, | ||
1666 | #endif | ||
1667 | +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 | ||
1668 | + { | ||
1669 | + .desc = "ITS: QDF2400 erratum 0065", | ||
1670 | + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ | ||
1671 | + .mask = 0xffffffff, | ||
1672 | + .init = its_enable_quirk_qdf2400_e0065, | ||
1673 | + }, | ||
1674 | +#endif | ||
1675 | { | ||
1676 | } | ||
1677 | }; | ||
1678 | diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c | ||
1679 | index 302e284a95eb..cde43b63c3da 100644 | ||
1680 | --- a/drivers/media/usb/uvc/uvc_driver.c | ||
1681 | +++ b/drivers/media/usb/uvc/uvc_driver.c | ||
1682 | @@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) | ||
1683 | return buffer; | ||
1684 | } | ||
1685 | |||
1686 | +static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) | ||
1687 | +{ | ||
1688 | + struct uvc_video_chain *chain; | ||
1689 | + | ||
1690 | + chain = kzalloc(sizeof(*chain), GFP_KERNEL); | ||
1691 | + if (chain == NULL) | ||
1692 | + return NULL; | ||
1693 | + | ||
1694 | + INIT_LIST_HEAD(&chain->entities); | ||
1695 | + mutex_init(&chain->ctrl_mutex); | ||
1696 | + chain->dev = dev; | ||
1697 | + v4l2_prio_init(&chain->prio); | ||
1698 | + | ||
1699 | + return chain; | ||
1700 | +} | ||
1701 | + | ||
1702 | +/* | ||
1703 | + * Fallback heuristic for devices that don't connect units and terminals in a | ||
1704 | + * valid chain. | ||
1705 | + * | ||
1706 | + * Some devices have invalid baSourceID references, causing uvc_scan_chain() | ||
1707 | + * to fail, but if we just take the entities we can find and put them together | ||
1708 | + * in the most sensible chain we can think of, turns out they do work anyway. | ||
1709 | + * Note: This heuristic assumes there is a single chain. | ||
1710 | + * | ||
1711 | + * At the time of writing, devices known to have such a broken chain are | ||
1712 | + * - Acer Integrated Camera (5986:055a) | ||
1713 | + * - Realtek rtl157a7 (0bda:57a7) | ||
1714 | + */ | ||
1715 | +static int uvc_scan_fallback(struct uvc_device *dev) | ||
1716 | +{ | ||
1717 | + struct uvc_video_chain *chain; | ||
1718 | + struct uvc_entity *iterm = NULL; | ||
1719 | + struct uvc_entity *oterm = NULL; | ||
1720 | + struct uvc_entity *entity; | ||
1721 | + struct uvc_entity *prev; | ||
1722 | + | ||
1723 | + /* | ||
1724 | + * Start by locating the input and output terminals. We only support | ||
1725 | + * devices with exactly one of each for now. | ||
1726 | + */ | ||
1727 | + list_for_each_entry(entity, &dev->entities, list) { | ||
1728 | + if (UVC_ENTITY_IS_ITERM(entity)) { | ||
1729 | + if (iterm) | ||
1730 | + return -EINVAL; | ||
1731 | + iterm = entity; | ||
1732 | + } | ||
1733 | + | ||
1734 | + if (UVC_ENTITY_IS_OTERM(entity)) { | ||
1735 | + if (oterm) | ||
1736 | + return -EINVAL; | ||
1737 | + oterm = entity; | ||
1738 | + } | ||
1739 | + } | ||
1740 | + | ||
1741 | + if (iterm == NULL || oterm == NULL) | ||
1742 | + return -EINVAL; | ||
1743 | + | ||
1744 | + /* Allocate the chain and fill it. */ | ||
1745 | + chain = uvc_alloc_chain(dev); | ||
1746 | + if (chain == NULL) | ||
1747 | + return -ENOMEM; | ||
1748 | + | ||
1749 | + if (uvc_scan_chain_entity(chain, oterm) < 0) | ||
1750 | + goto error; | ||
1751 | + | ||
1752 | + prev = oterm; | ||
1753 | + | ||
1754 | + /* | ||
1755 | + * Add all Processing and Extension Units with two pads. The order | ||
1756 | + * doesn't matter much, use reverse list traversal to connect units in | ||
1757 | + * UVC descriptor order as we build the chain from output to input. This | ||
1758 | + * leads to units appearing in the order meant by the manufacturer for | ||
1759 | + * the cameras known to require this heuristic. | ||
1760 | + */ | ||
1761 | + list_for_each_entry_reverse(entity, &dev->entities, list) { | ||
1762 | + if (entity->type != UVC_VC_PROCESSING_UNIT && | ||
1763 | + entity->type != UVC_VC_EXTENSION_UNIT) | ||
1764 | + continue; | ||
1765 | + | ||
1766 | + if (entity->num_pads != 2) | ||
1767 | + continue; | ||
1768 | + | ||
1769 | + if (uvc_scan_chain_entity(chain, entity) < 0) | ||
1770 | + goto error; | ||
1771 | + | ||
1772 | + prev->baSourceID[0] = entity->id; | ||
1773 | + prev = entity; | ||
1774 | + } | ||
1775 | + | ||
1776 | + if (uvc_scan_chain_entity(chain, iterm) < 0) | ||
1777 | + goto error; | ||
1778 | + | ||
1779 | + prev->baSourceID[0] = iterm->id; | ||
1780 | + | ||
1781 | + list_add_tail(&chain->list, &dev->chains); | ||
1782 | + | ||
1783 | + uvc_trace(UVC_TRACE_PROBE, | ||
1784 | + "Found a video chain by fallback heuristic (%s).\n", | ||
1785 | + uvc_print_chain(chain)); | ||
1786 | + | ||
1787 | + return 0; | ||
1788 | + | ||
1789 | +error: | ||
1790 | + kfree(chain); | ||
1791 | + return -EINVAL; | ||
1792 | +} | ||
1793 | + | ||
1794 | /* | ||
1795 | * Scan the device for video chains and register video devices. | ||
1796 | * | ||
1797 | @@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev) | ||
1798 | if (term->chain.next || term->chain.prev) | ||
1799 | continue; | ||
1800 | |||
1801 | - chain = kzalloc(sizeof(*chain), GFP_KERNEL); | ||
1802 | + chain = uvc_alloc_chain(dev); | ||
1803 | if (chain == NULL) | ||
1804 | return -ENOMEM; | ||
1805 | |||
1806 | - INIT_LIST_HEAD(&chain->entities); | ||
1807 | - mutex_init(&chain->ctrl_mutex); | ||
1808 | - chain->dev = dev; | ||
1809 | - v4l2_prio_init(&chain->prio); | ||
1810 | - | ||
1811 | term->flags |= UVC_ENTITY_FLAG_DEFAULT; | ||
1812 | |||
1813 | if (uvc_scan_chain(chain, term) < 0) { | ||
1814 | @@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev) | ||
1815 | list_add_tail(&chain->list, &dev->chains); | ||
1816 | } | ||
1817 | |||
1818 | + if (list_empty(&dev->chains)) | ||
1819 | + uvc_scan_fallback(dev); | ||
1820 | + | ||
1821 | if (list_empty(&dev->chains)) { | ||
1822 | uvc_printk(KERN_INFO, "No valid video chain found.\n"); | ||
1823 | return -1; | ||
1824 | diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c | ||
1825 | index a36022ba4e42..03dca732e4c6 100644 | ||
1826 | --- a/drivers/net/ethernet/ibm/ibmveth.c | ||
1827 | +++ b/drivers/net/ethernet/ibm/ibmveth.c | ||
1828 | @@ -1181,7 +1181,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, | ||
1829 | |||
1830 | static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) | ||
1831 | { | ||
1832 | + struct tcphdr *tcph; | ||
1833 | int offset = 0; | ||
1834 | + int hdr_len; | ||
1835 | |||
1836 | /* only TCP packets will be aggregated */ | ||
1837 | if (skb->protocol == htons(ETH_P_IP)) { | ||
1838 | @@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) | ||
1839 | /* if mss is not set through Large Packet bit/mss in rx buffer, | ||
1840 | * expect that the mss will be written to the tcp header checksum. | ||
1841 | */ | ||
1842 | + tcph = (struct tcphdr *)(skb->data + offset); | ||
1843 | if (lrg_pkt) { | ||
1844 | skb_shinfo(skb)->gso_size = mss; | ||
1845 | } else if (offset) { | ||
1846 | - struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); | ||
1847 | - | ||
1848 | skb_shinfo(skb)->gso_size = ntohs(tcph->check); | ||
1849 | tcph->check = 0; | ||
1850 | } | ||
1851 | + | ||
1852 | + if (skb_shinfo(skb)->gso_size) { | ||
1853 | + hdr_len = offset + tcph->doff * 4; | ||
1854 | + skb_shinfo(skb)->gso_segs = | ||
1855 | + DIV_ROUND_UP(skb->len - hdr_len, | ||
1856 | + skb_shinfo(skb)->gso_size); | ||
1857 | + } | ||
1858 | } | ||
1859 | |||
1860 | static int ibmveth_poll(struct napi_struct *napi, int budget) | ||
1861 | diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c | ||
1862 | index 5b54254aed4f..2788a5409023 100644 | ||
1863 | --- a/drivers/net/ethernet/intel/igb/e1000_phy.c | ||
1864 | +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c | ||
1865 | @@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) | ||
1866 | s32 ret_val = 0; | ||
1867 | u16 phy_id; | ||
1868 | |||
1869 | + /* ensure PHY page selection to fix misconfigured i210 */ | ||
1870 | + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) | ||
1871 | + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); | ||
1872 | + | ||
1873 | ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); | ||
1874 | if (ret_val) | ||
1875 | goto out; | ||
1876 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
1877 | index b30671376a3d..d4fa851ced2a 100644 | ||
1878 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
1879 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
1880 | @@ -81,6 +81,7 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) | ||
1881 | static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) | ||
1882 | { | ||
1883 | priv->params.rq_wq_type = rq_type; | ||
1884 | + priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; | ||
1885 | switch (priv->params.rq_wq_type) { | ||
1886 | case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: | ||
1887 | priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; | ||
1888 | @@ -92,6 +93,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) | ||
1889 | break; | ||
1890 | default: /* MLX5_WQ_TYPE_LINKED_LIST */ | ||
1891 | priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; | ||
1892 | + | ||
1893 | + /* Extra room needed for build_skb */ | ||
1894 | + priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM + | ||
1895 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
1896 | } | ||
1897 | priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, | ||
1898 | BIT(priv->params.log_rq_size)); | ||
1899 | @@ -3473,12 +3478,6 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, | ||
1900 | mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, | ||
1901 | MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); | ||
1902 | |||
1903 | - priv->params.lro_wqe_sz = | ||
1904 | - MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - | ||
1905 | - /* Extra room needed for build_skb */ | ||
1906 | - MLX5_RX_HEADROOM - | ||
1907 | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
1908 | - | ||
1909 | /* Initialize pflags */ | ||
1910 | MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, | ||
1911 | priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); | ||
1912 | @@ -3936,6 +3935,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) | ||
1913 | } | ||
1914 | } | ||
1915 | |||
1916 | +static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev) | ||
1917 | +{ | ||
1918 | + struct mlx5_eswitch *esw = mdev->priv.eswitch; | ||
1919 | + int total_vfs = MLX5_TOTAL_VPORTS(mdev); | ||
1920 | + int vport; | ||
1921 | + | ||
1922 | + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
1923 | + return; | ||
1924 | + | ||
1925 | + for (vport = 1; vport < total_vfs; vport++) | ||
1926 | + mlx5_eswitch_unregister_vport_rep(esw, vport); | ||
1927 | +} | ||
1928 | + | ||
1929 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) | ||
1930 | { | ||
1931 | struct mlx5e_priv *priv = netdev_priv(netdev); | ||
1932 | @@ -3983,6 +3995,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) | ||
1933 | return err; | ||
1934 | } | ||
1935 | |||
1936 | + mlx5e_register_vport_rep(mdev); | ||
1937 | return 0; | ||
1938 | } | ||
1939 | |||
1940 | @@ -3994,6 +4007,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) | ||
1941 | if (!netif_device_present(netdev)) | ||
1942 | return; | ||
1943 | |||
1944 | + mlx5e_unregister_vport_rep(mdev); | ||
1945 | mlx5e_detach_netdev(mdev, netdev); | ||
1946 | mlx5e_destroy_mdev_resources(mdev); | ||
1947 | } | ||
1948 | @@ -4012,8 +4026,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) | ||
1949 | if (err) | ||
1950 | return NULL; | ||
1951 | |||
1952 | - mlx5e_register_vport_rep(mdev); | ||
1953 | - | ||
1954 | if (MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
1955 | ppriv = &esw->offloads.vport_reps[0]; | ||
1956 | |||
1957 | @@ -4065,13 +4077,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) | ||
1958 | |||
1959 | static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) | ||
1960 | { | ||
1961 | - struct mlx5_eswitch *esw = mdev->priv.eswitch; | ||
1962 | - int total_vfs = MLX5_TOTAL_VPORTS(mdev); | ||
1963 | struct mlx5e_priv *priv = vpriv; | ||
1964 | - int vport; | ||
1965 | - | ||
1966 | - for (vport = 1; vport < total_vfs; vport++) | ||
1967 | - mlx5_eswitch_unregister_vport_rep(esw, vport); | ||
1968 | |||
1969 | unregister_netdev(priv->netdev); | ||
1970 | mlx5e_detach(mdev, vpriv); | ||
1971 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | ||
1972 | index e7b2158bb48a..796bdf06122c 100644 | ||
1973 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | ||
1974 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | ||
1975 | @@ -92,19 +92,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) | ||
1976 | static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, | ||
1977 | struct mlx5e_cq *cq, u32 cqcc) | ||
1978 | { | ||
1979 | - u16 wqe_cnt_step; | ||
1980 | - | ||
1981 | cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; | ||
1982 | cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; | ||
1983 | cq->title.op_own &= 0xf0; | ||
1984 | cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); | ||
1985 | cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); | ||
1986 | |||
1987 | - wqe_cnt_step = | ||
1988 | - rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? | ||
1989 | - mpwrq_get_cqe_consumed_strides(&cq->title) : 1; | ||
1990 | - cq->decmprs_wqe_counter = | ||
1991 | - (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; | ||
1992 | + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) | ||
1993 | + cq->decmprs_wqe_counter += | ||
1994 | + mpwrq_get_cqe_consumed_strides(&cq->title); | ||
1995 | + else | ||
1996 | + cq->decmprs_wqe_counter = | ||
1997 | + (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; | ||
1998 | } | ||
1999 | |||
2000 | static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, | ||
2001 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | ||
2002 | index e83072da6272..690563099313 100644 | ||
2003 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | ||
2004 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | ||
2005 | @@ -500,30 +500,40 @@ static int | ||
2006 | mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, | ||
2007 | struct mlxsw_sp_prefix_usage *req_prefix_usage) | ||
2008 | { | ||
2009 | - struct mlxsw_sp_lpm_tree *lpm_tree; | ||
2010 | + struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; | ||
2011 | + struct mlxsw_sp_lpm_tree *new_tree; | ||
2012 | + int err; | ||
2013 | |||
2014 | - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, | ||
2015 | - &vr->lpm_tree->prefix_usage)) | ||
2016 | + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) | ||
2017 | return 0; | ||
2018 | |||
2019 | - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, | ||
2020 | + new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, | ||
2021 | vr->proto, false); | ||
2022 | - if (IS_ERR(lpm_tree)) { | ||
2023 | + if (IS_ERR(new_tree)) { | ||
2024 | /* We failed to get a tree according to the required | ||
2025 | * prefix usage. However, the current tree might be still good | ||
2026 | * for us if our requirement is subset of the prefixes used | ||
2027 | * in the tree. | ||
2028 | */ | ||
2029 | if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, | ||
2030 | - &vr->lpm_tree->prefix_usage)) | ||
2031 | + &lpm_tree->prefix_usage)) | ||
2032 | return 0; | ||
2033 | - return PTR_ERR(lpm_tree); | ||
2034 | + return PTR_ERR(new_tree); | ||
2035 | } | ||
2036 | |||
2037 | - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); | ||
2038 | - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); | ||
2039 | + /* Prevent packet loss by overwriting existing binding */ | ||
2040 | + vr->lpm_tree = new_tree; | ||
2041 | + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); | ||
2042 | + if (err) | ||
2043 | + goto err_tree_bind; | ||
2044 | + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); | ||
2045 | + | ||
2046 | + return 0; | ||
2047 | + | ||
2048 | +err_tree_bind: | ||
2049 | vr->lpm_tree = lpm_tree; | ||
2050 | - return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); | ||
2051 | + mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); | ||
2052 | + return err; | ||
2053 | } | ||
2054 | |||
2055 | static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, | ||
2056 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c | ||
2057 | index 8b4822ad27cb..3c1f89ab0110 100644 | ||
2058 | --- a/drivers/net/geneve.c | ||
2059 | +++ b/drivers/net/geneve.c | ||
2060 | @@ -1039,16 +1039,22 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2061 | { | ||
2062 | struct geneve_dev *geneve = netdev_priv(dev); | ||
2063 | struct ip_tunnel_info *info = NULL; | ||
2064 | + int err; | ||
2065 | |||
2066 | if (geneve->collect_md) | ||
2067 | info = skb_tunnel_info(skb); | ||
2068 | |||
2069 | + rcu_read_lock(); | ||
2070 | #if IS_ENABLED(CONFIG_IPV6) | ||
2071 | if ((info && ip_tunnel_info_af(info) == AF_INET6) || | ||
2072 | (!info && geneve->remote.sa.sa_family == AF_INET6)) | ||
2073 | - return geneve6_xmit_skb(skb, dev, info); | ||
2074 | + err = geneve6_xmit_skb(skb, dev, info); | ||
2075 | + else | ||
2076 | #endif | ||
2077 | - return geneve_xmit_skb(skb, dev, info); | ||
2078 | + err = geneve_xmit_skb(skb, dev, info); | ||
2079 | + rcu_read_unlock(); | ||
2080 | + | ||
2081 | + return err; | ||
2082 | } | ||
2083 | |||
2084 | static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) | ||
2085 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c | ||
2086 | index f424b867f73e..201ffa5fe4f7 100644 | ||
2087 | --- a/drivers/net/phy/phy.c | ||
2088 | +++ b/drivers/net/phy/phy.c | ||
2089 | @@ -611,14 +611,18 @@ void phy_start_machine(struct phy_device *phydev) | ||
2090 | * phy_trigger_machine - trigger the state machine to run | ||
2091 | * | ||
2092 | * @phydev: the phy_device struct | ||
2093 | + * @sync: indicate whether we should wait for the workqueue cancelation | ||
2094 | * | ||
2095 | * Description: There has been a change in state which requires that the | ||
2096 | * state machine runs. | ||
2097 | */ | ||
2098 | |||
2099 | -static void phy_trigger_machine(struct phy_device *phydev) | ||
2100 | +static void phy_trigger_machine(struct phy_device *phydev, bool sync) | ||
2101 | { | ||
2102 | - cancel_delayed_work_sync(&phydev->state_queue); | ||
2103 | + if (sync) | ||
2104 | + cancel_delayed_work_sync(&phydev->state_queue); | ||
2105 | + else | ||
2106 | + cancel_delayed_work(&phydev->state_queue); | ||
2107 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); | ||
2108 | } | ||
2109 | |||
2110 | @@ -655,7 +659,7 @@ static void phy_error(struct phy_device *phydev) | ||
2111 | phydev->state = PHY_HALTED; | ||
2112 | mutex_unlock(&phydev->lock); | ||
2113 | |||
2114 | - phy_trigger_machine(phydev); | ||
2115 | + phy_trigger_machine(phydev, false); | ||
2116 | } | ||
2117 | |||
2118 | /** | ||
2119 | @@ -817,7 +821,7 @@ void phy_change(struct work_struct *work) | ||
2120 | } | ||
2121 | |||
2122 | /* reschedule state queue work to run as soon as possible */ | ||
2123 | - phy_trigger_machine(phydev); | ||
2124 | + phy_trigger_machine(phydev, true); | ||
2125 | return; | ||
2126 | |||
2127 | ignore: | ||
2128 | @@ -907,7 +911,7 @@ void phy_start(struct phy_device *phydev) | ||
2129 | if (do_resume) | ||
2130 | phy_resume(phydev); | ||
2131 | |||
2132 | - phy_trigger_machine(phydev); | ||
2133 | + phy_trigger_machine(phydev, true); | ||
2134 | } | ||
2135 | EXPORT_SYMBOL(phy_start); | ||
2136 | |||
2137 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c | ||
2138 | index b31aca8146bb..a931b73393c8 100644 | ||
2139 | --- a/drivers/net/tun.c | ||
2140 | +++ b/drivers/net/tun.c | ||
2141 | @@ -819,7 +819,18 @@ static void tun_net_uninit(struct net_device *dev) | ||
2142 | /* Net device open. */ | ||
2143 | static int tun_net_open(struct net_device *dev) | ||
2144 | { | ||
2145 | + struct tun_struct *tun = netdev_priv(dev); | ||
2146 | + int i; | ||
2147 | + | ||
2148 | netif_tx_start_all_queues(dev); | ||
2149 | + | ||
2150 | + for (i = 0; i < tun->numqueues; i++) { | ||
2151 | + struct tun_file *tfile; | ||
2152 | + | ||
2153 | + tfile = rtnl_dereference(tun->tfiles[i]); | ||
2154 | + tfile->socket.sk->sk_write_space(tfile->socket.sk); | ||
2155 | + } | ||
2156 | + | ||
2157 | return 0; | ||
2158 | } | ||
2159 | |||
2160 | @@ -1116,9 +1127,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) | ||
2161 | if (!skb_array_empty(&tfile->tx_array)) | ||
2162 | mask |= POLLIN | POLLRDNORM; | ||
2163 | |||
2164 | - if (sock_writeable(sk) || | ||
2165 | - (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && | ||
2166 | - sock_writeable(sk))) | ||
2167 | + if (tun->dev->flags & IFF_UP && | ||
2168 | + (sock_writeable(sk) || | ||
2169 | + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && | ||
2170 | + sock_writeable(sk)))) | ||
2171 | mask |= POLLOUT | POLLWRNORM; | ||
2172 | |||
2173 | if (tun->dev->reg_state != NETREG_REGISTERED) | ||
2174 | diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c | ||
2175 | index 95cf1d844781..bc744acabf98 100644 | ||
2176 | --- a/drivers/net/vrf.c | ||
2177 | +++ b/drivers/net/vrf.c | ||
2178 | @@ -346,6 +346,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) | ||
2179 | |||
2180 | static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2181 | { | ||
2182 | + int len = skb->len; | ||
2183 | netdev_tx_t ret = is_ip_tx_frame(skb, dev); | ||
2184 | |||
2185 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { | ||
2186 | @@ -353,7 +354,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2187 | |||
2188 | u64_stats_update_begin(&dstats->syncp); | ||
2189 | dstats->tx_pkts++; | ||
2190 | - dstats->tx_bytes += skb->len; | ||
2191 | + dstats->tx_bytes += len; | ||
2192 | u64_stats_update_end(&dstats->syncp); | ||
2193 | } else { | ||
2194 | this_cpu_inc(dev->dstats->tx_drps); | ||
2195 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c | ||
2196 | index d4f495b41bd4..3c4c2cf6d444 100644 | ||
2197 | --- a/drivers/net/vxlan.c | ||
2198 | +++ b/drivers/net/vxlan.c | ||
2199 | @@ -1942,7 +1942,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2200 | const struct iphdr *old_iph; | ||
2201 | union vxlan_addr *dst; | ||
2202 | union vxlan_addr remote_ip, local_ip; | ||
2203 | - union vxlan_addr *src; | ||
2204 | struct vxlan_metadata _md; | ||
2205 | struct vxlan_metadata *md = &_md; | ||
2206 | __be16 src_port = 0, dst_port; | ||
2207 | @@ -1956,11 +1955,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2208 | |||
2209 | info = skb_tunnel_info(skb); | ||
2210 | |||
2211 | + rcu_read_lock(); | ||
2212 | if (rdst) { | ||
2213 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; | ||
2214 | vni = rdst->remote_vni; | ||
2215 | dst = &rdst->remote_ip; | ||
2216 | - src = &vxlan->cfg.saddr; | ||
2217 | + local_ip = vxlan->cfg.saddr; | ||
2218 | dst_cache = &rdst->dst_cache; | ||
2219 | } else { | ||
2220 | if (!info) { | ||
2221 | @@ -1979,7 +1979,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2222 | local_ip.sin6.sin6_addr = info->key.u.ipv6.src; | ||
2223 | } | ||
2224 | dst = &remote_ip; | ||
2225 | - src = &local_ip; | ||
2226 | dst_cache = &info->dst_cache; | ||
2227 | } | ||
2228 | |||
2229 | @@ -1987,7 +1986,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2230 | if (did_rsc) { | ||
2231 | /* short-circuited back to local bridge */ | ||
2232 | vxlan_encap_bypass(skb, vxlan, vxlan); | ||
2233 | - return; | ||
2234 | + goto out_unlock; | ||
2235 | } | ||
2236 | goto drop; | ||
2237 | } | ||
2238 | @@ -2028,7 +2027,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2239 | rt = vxlan_get_route(vxlan, skb, | ||
2240 | rdst ? rdst->remote_ifindex : 0, tos, | ||
2241 | dst->sin.sin_addr.s_addr, | ||
2242 | - &src->sin.sin_addr.s_addr, | ||
2243 | + &local_ip.sin.sin_addr.s_addr, | ||
2244 | dst_cache, info); | ||
2245 | if (IS_ERR(rt)) { | ||
2246 | netdev_dbg(dev, "no route to %pI4\n", | ||
2247 | @@ -2056,7 +2055,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2248 | if (!dst_vxlan) | ||
2249 | goto tx_error; | ||
2250 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); | ||
2251 | - return; | ||
2252 | + goto out_unlock; | ||
2253 | } | ||
2254 | |||
2255 | if (!info) | ||
2256 | @@ -2071,7 +2070,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2257 | if (err < 0) | ||
2258 | goto xmit_tx_error; | ||
2259 | |||
2260 | - udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, | ||
2261 | + udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr, | ||
2262 | dst->sin.sin_addr.s_addr, tos, ttl, df, | ||
2263 | src_port, dst_port, xnet, !udp_sum); | ||
2264 | #if IS_ENABLED(CONFIG_IPV6) | ||
2265 | @@ -2087,7 +2086,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2266 | ndst = vxlan6_get_route(vxlan, skb, | ||
2267 | rdst ? rdst->remote_ifindex : 0, tos, | ||
2268 | label, &dst->sin6.sin6_addr, | ||
2269 | - &src->sin6.sin6_addr, | ||
2270 | + &local_ip.sin6.sin6_addr, | ||
2271 | dst_cache, info); | ||
2272 | if (IS_ERR(ndst)) { | ||
2273 | netdev_dbg(dev, "no route to %pI6\n", | ||
2274 | @@ -2117,7 +2116,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2275 | if (!dst_vxlan) | ||
2276 | goto tx_error; | ||
2277 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); | ||
2278 | - return; | ||
2279 | + goto out_unlock; | ||
2280 | } | ||
2281 | |||
2282 | if (!info) | ||
2283 | @@ -2131,15 +2130,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2284 | if (err < 0) { | ||
2285 | dst_release(ndst); | ||
2286 | dev->stats.tx_errors++; | ||
2287 | - return; | ||
2288 | + goto out_unlock; | ||
2289 | } | ||
2290 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, | ||
2291 | - &src->sin6.sin6_addr, | ||
2292 | + &local_ip.sin6.sin6_addr, | ||
2293 | &dst->sin6.sin6_addr, tos, ttl, | ||
2294 | label, src_port, dst_port, !udp_sum); | ||
2295 | #endif | ||
2296 | } | ||
2297 | - | ||
2298 | +out_unlock: | ||
2299 | + rcu_read_unlock(); | ||
2300 | return; | ||
2301 | |||
2302 | drop: | ||
2303 | @@ -2155,6 +2155,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | ||
2304 | dev->stats.tx_errors++; | ||
2305 | tx_free: | ||
2306 | dev_kfree_skb(skb); | ||
2307 | + rcu_read_unlock(); | ||
2308 | } | ||
2309 | |||
2310 | /* Transmit local packets over Vxlan | ||
2311 | @@ -2637,7 +2638,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) | ||
2312 | |||
2313 | if (data[IFLA_VXLAN_ID]) { | ||
2314 | __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); | ||
2315 | - if (id >= VXLAN_VID_MASK) | ||
2316 | + if (id >= VXLAN_N_VID) | ||
2317 | return -ERANGE; | ||
2318 | } | ||
2319 | |||
2320 | diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c | ||
2321 | index e30f05c8517f..47227820406d 100644 | ||
2322 | --- a/drivers/pci/iov.c | ||
2323 | +++ b/drivers/pci/iov.c | ||
2324 | @@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) | ||
2325 | return rc; | ||
2326 | } | ||
2327 | |||
2328 | - pci_iov_set_numvfs(dev, nr_virtfn); | ||
2329 | - iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; | ||
2330 | - pci_cfg_access_lock(dev); | ||
2331 | - pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); | ||
2332 | - msleep(100); | ||
2333 | - pci_cfg_access_unlock(dev); | ||
2334 | - | ||
2335 | iov->initial_VFs = initial; | ||
2336 | if (nr_virtfn < initial) | ||
2337 | initial = nr_virtfn; | ||
2338 | @@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) | ||
2339 | goto err_pcibios; | ||
2340 | } | ||
2341 | |||
2342 | + pci_iov_set_numvfs(dev, nr_virtfn); | ||
2343 | + iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; | ||
2344 | + pci_cfg_access_lock(dev); | ||
2345 | + pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); | ||
2346 | + msleep(100); | ||
2347 | + pci_cfg_access_unlock(dev); | ||
2348 | + | ||
2349 | for (i = 0; i < initial; i++) { | ||
2350 | rc = pci_iov_add_virtfn(dev, i, 0); | ||
2351 | if (rc) | ||
2352 | @@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev) | ||
2353 | } | ||
2354 | |||
2355 | /** | ||
2356 | - * pci_iov_resource_bar - get position of the SR-IOV BAR | ||
2357 | + * pci_iov_update_resource - update a VF BAR | ||
2358 | * @dev: the PCI device | ||
2359 | * @resno: the resource number | ||
2360 | * | ||
2361 | - * Returns position of the BAR encapsulated in the SR-IOV capability. | ||
2362 | + * Update a VF BAR in the SR-IOV capability of a PF. | ||
2363 | */ | ||
2364 | -int pci_iov_resource_bar(struct pci_dev *dev, int resno) | ||
2365 | +void pci_iov_update_resource(struct pci_dev *dev, int resno) | ||
2366 | { | ||
2367 | - if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) | ||
2368 | - return 0; | ||
2369 | + struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; | ||
2370 | + struct resource *res = dev->resource + resno; | ||
2371 | + int vf_bar = resno - PCI_IOV_RESOURCES; | ||
2372 | + struct pci_bus_region region; | ||
2373 | + u16 cmd; | ||
2374 | + u32 new; | ||
2375 | + int reg; | ||
2376 | + | ||
2377 | + /* | ||
2378 | + * The generic pci_restore_bars() path calls this for all devices, | ||
2379 | + * including VFs and non-SR-IOV devices. If this is not a PF, we | ||
2380 | + * have nothing to do. | ||
2381 | + */ | ||
2382 | + if (!iov) | ||
2383 | + return; | ||
2384 | + | ||
2385 | + pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); | ||
2386 | + if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { | ||
2387 | + dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", | ||
2388 | + vf_bar, res); | ||
2389 | + return; | ||
2390 | + } | ||
2391 | + | ||
2392 | + /* | ||
2393 | + * Ignore unimplemented BARs, unused resource slots for 64-bit | ||
2394 | + * BARs, and non-movable resources, e.g., those described via | ||
2395 | + * Enhanced Allocation. | ||
2396 | + */ | ||
2397 | + if (!res->flags) | ||
2398 | + return; | ||
2399 | + | ||
2400 | + if (res->flags & IORESOURCE_UNSET) | ||
2401 | + return; | ||
2402 | + | ||
2403 | + if (res->flags & IORESOURCE_PCI_FIXED) | ||
2404 | + return; | ||
2405 | |||
2406 | - BUG_ON(!dev->is_physfn); | ||
2407 | + pcibios_resource_to_bus(dev->bus, ®ion, res); | ||
2408 | + new = region.start; | ||
2409 | + new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; | ||
2410 | |||
2411 | - return dev->sriov->pos + PCI_SRIOV_BAR + | ||
2412 | - 4 * (resno - PCI_IOV_RESOURCES); | ||
2413 | + reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; | ||
2414 | + pci_write_config_dword(dev, reg, new); | ||
2415 | + if (res->flags & IORESOURCE_MEM_64) { | ||
2416 | + new = region.start >> 16 >> 16; | ||
2417 | + pci_write_config_dword(dev, reg + 4, new); | ||
2418 | + } | ||
2419 | } | ||
2420 | |||
2421 | resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, | ||
2422 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c | ||
2423 | index eda6a7cf0e54..6922964e3dff 100644 | ||
2424 | --- a/drivers/pci/pci.c | ||
2425 | +++ b/drivers/pci/pci.c | ||
2426 | @@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev) | ||
2427 | { | ||
2428 | int i; | ||
2429 | |||
2430 | - /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ | ||
2431 | - if (dev->is_virtfn) | ||
2432 | - return; | ||
2433 | - | ||
2434 | for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) | ||
2435 | pci_update_resource(dev, i); | ||
2436 | } | ||
2437 | @@ -4835,36 +4831,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) | ||
2438 | } | ||
2439 | EXPORT_SYMBOL(pci_select_bars); | ||
2440 | |||
2441 | -/** | ||
2442 | - * pci_resource_bar - get position of the BAR associated with a resource | ||
2443 | - * @dev: the PCI device | ||
2444 | - * @resno: the resource number | ||
2445 | - * @type: the BAR type to be filled in | ||
2446 | - * | ||
2447 | - * Returns BAR position in config space, or 0 if the BAR is invalid. | ||
2448 | - */ | ||
2449 | -int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) | ||
2450 | -{ | ||
2451 | - int reg; | ||
2452 | - | ||
2453 | - if (resno < PCI_ROM_RESOURCE) { | ||
2454 | - *type = pci_bar_unknown; | ||
2455 | - return PCI_BASE_ADDRESS_0 + 4 * resno; | ||
2456 | - } else if (resno == PCI_ROM_RESOURCE) { | ||
2457 | - *type = pci_bar_mem32; | ||
2458 | - return dev->rom_base_reg; | ||
2459 | - } else if (resno < PCI_BRIDGE_RESOURCES) { | ||
2460 | - /* device specific resource */ | ||
2461 | - *type = pci_bar_unknown; | ||
2462 | - reg = pci_iov_resource_bar(dev, resno); | ||
2463 | - if (reg) | ||
2464 | - return reg; | ||
2465 | - } | ||
2466 | - | ||
2467 | - dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); | ||
2468 | - return 0; | ||
2469 | -} | ||
2470 | - | ||
2471 | /* Some architectures require additional programming to enable VGA */ | ||
2472 | static arch_set_vga_state_t arch_set_vga_state; | ||
2473 | |||
2474 | diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h | ||
2475 | index 451856210e18..a5d37f6a9fb5 100644 | ||
2476 | --- a/drivers/pci/pci.h | ||
2477 | +++ b/drivers/pci/pci.h | ||
2478 | @@ -245,7 +245,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, | ||
2479 | int pci_setup_device(struct pci_dev *dev); | ||
2480 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | ||
2481 | struct resource *res, unsigned int reg); | ||
2482 | -int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); | ||
2483 | void pci_configure_ari(struct pci_dev *dev); | ||
2484 | void __pci_bus_size_bridges(struct pci_bus *bus, | ||
2485 | struct list_head *realloc_head); | ||
2486 | @@ -289,7 +288,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) | ||
2487 | #ifdef CONFIG_PCI_IOV | ||
2488 | int pci_iov_init(struct pci_dev *dev); | ||
2489 | void pci_iov_release(struct pci_dev *dev); | ||
2490 | -int pci_iov_resource_bar(struct pci_dev *dev, int resno); | ||
2491 | +void pci_iov_update_resource(struct pci_dev *dev, int resno); | ||
2492 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | ||
2493 | void pci_restore_iov_state(struct pci_dev *dev); | ||
2494 | int pci_iov_bus_range(struct pci_bus *bus); | ||
2495 | @@ -303,10 +302,6 @@ static inline void pci_iov_release(struct pci_dev *dev) | ||
2496 | |||
2497 | { | ||
2498 | } | ||
2499 | -static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno) | ||
2500 | -{ | ||
2501 | - return 0; | ||
2502 | -} | ||
2503 | static inline void pci_restore_iov_state(struct pci_dev *dev) | ||
2504 | { | ||
2505 | } | ||
2506 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c | ||
2507 | index 300770cdc084..d266d800f246 100644 | ||
2508 | --- a/drivers/pci/probe.c | ||
2509 | +++ b/drivers/pci/probe.c | ||
2510 | @@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | ||
2511 | mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; | ||
2512 | } | ||
2513 | } else { | ||
2514 | - res->flags |= (l & IORESOURCE_ROM_ENABLE); | ||
2515 | + if (l & PCI_ROM_ADDRESS_ENABLE) | ||
2516 | + res->flags |= IORESOURCE_ROM_ENABLE; | ||
2517 | l64 = l & PCI_ROM_ADDRESS_MASK; | ||
2518 | sz64 = sz & PCI_ROM_ADDRESS_MASK; | ||
2519 | mask64 = (u32)PCI_ROM_ADDRESS_MASK; | ||
2520 | diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c | ||
2521 | index 06663d391b39..b6edb187d160 100644 | ||
2522 | --- a/drivers/pci/rom.c | ||
2523 | +++ b/drivers/pci/rom.c | ||
2524 | @@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev) | ||
2525 | if (res->flags & IORESOURCE_ROM_SHADOW) | ||
2526 | return 0; | ||
2527 | |||
2528 | + /* | ||
2529 | + * Ideally pci_update_resource() would update the ROM BAR address, | ||
2530 | + * and we would only set the enable bit here. But apparently some | ||
2531 | + * devices have buggy ROM BARs that read as zero when disabled. | ||
2532 | + */ | ||
2533 | pcibios_resource_to_bus(pdev->bus, ®ion, res); | ||
2534 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); | ||
2535 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; | ||
2536 | diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c | ||
2537 | index 9526e341988b..4bc589ee78d0 100644 | ||
2538 | --- a/drivers/pci/setup-res.c | ||
2539 | +++ b/drivers/pci/setup-res.c | ||
2540 | @@ -25,21 +25,18 @@ | ||
2541 | #include <linux/slab.h> | ||
2542 | #include "pci.h" | ||
2543 | |||
2544 | - | ||
2545 | -void pci_update_resource(struct pci_dev *dev, int resno) | ||
2546 | +static void pci_std_update_resource(struct pci_dev *dev, int resno) | ||
2547 | { | ||
2548 | struct pci_bus_region region; | ||
2549 | bool disable; | ||
2550 | u16 cmd; | ||
2551 | u32 new, check, mask; | ||
2552 | int reg; | ||
2553 | - enum pci_bar_type type; | ||
2554 | struct resource *res = dev->resource + resno; | ||
2555 | |||
2556 | - if (dev->is_virtfn) { | ||
2557 | - dev_warn(&dev->dev, "can't update VF BAR%d\n", resno); | ||
2558 | + /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ | ||
2559 | + if (dev->is_virtfn) | ||
2560 | return; | ||
2561 | - } | ||
2562 | |||
2563 | /* | ||
2564 | * Ignore resources for unimplemented BARs and unused resource slots | ||
2565 | @@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno) | ||
2566 | return; | ||
2567 | |||
2568 | pcibios_resource_to_bus(dev->bus, ®ion, res); | ||
2569 | + new = region.start; | ||
2570 | |||
2571 | - new = region.start | (res->flags & PCI_REGION_FLAG_MASK); | ||
2572 | - if (res->flags & IORESOURCE_IO) | ||
2573 | + if (res->flags & IORESOURCE_IO) { | ||
2574 | mask = (u32)PCI_BASE_ADDRESS_IO_MASK; | ||
2575 | - else | ||
2576 | + new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; | ||
2577 | + } else if (resno == PCI_ROM_RESOURCE) { | ||
2578 | + mask = (u32)PCI_ROM_ADDRESS_MASK; | ||
2579 | + } else { | ||
2580 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; | ||
2581 | + new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; | ||
2582 | + } | ||
2583 | |||
2584 | - reg = pci_resource_bar(dev, resno, &type); | ||
2585 | - if (!reg) | ||
2586 | - return; | ||
2587 | - if (type != pci_bar_unknown) { | ||
2588 | + if (resno < PCI_ROM_RESOURCE) { | ||
2589 | + reg = PCI_BASE_ADDRESS_0 + 4 * resno; | ||
2590 | + } else if (resno == PCI_ROM_RESOURCE) { | ||
2591 | + | ||
2592 | + /* | ||
2593 | + * Apparently some Matrox devices have ROM BARs that read | ||
2594 | + * as zero when disabled, so don't update ROM BARs unless | ||
2595 | + * they're enabled. See https://lkml.org/lkml/2005/8/30/138. | ||
2596 | + */ | ||
2597 | if (!(res->flags & IORESOURCE_ROM_ENABLE)) | ||
2598 | return; | ||
2599 | + | ||
2600 | + reg = dev->rom_base_reg; | ||
2601 | new |= PCI_ROM_ADDRESS_ENABLE; | ||
2602 | - } | ||
2603 | + } else | ||
2604 | + return; | ||
2605 | |||
2606 | /* | ||
2607 | * We can't update a 64-bit BAR atomically, so when possible, | ||
2608 | @@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno) | ||
2609 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
2610 | } | ||
2611 | |||
2612 | +void pci_update_resource(struct pci_dev *dev, int resno) | ||
2613 | +{ | ||
2614 | + if (resno <= PCI_ROM_RESOURCE) | ||
2615 | + pci_std_update_resource(dev, resno); | ||
2616 | +#ifdef CONFIG_PCI_IOV | ||
2617 | + else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | ||
2618 | + pci_iov_update_resource(dev, resno); | ||
2619 | +#endif | ||
2620 | +} | ||
2621 | + | ||
2622 | int pci_claim_resource(struct pci_dev *dev, int resource) | ||
2623 | { | ||
2624 | struct resource *res = &dev->resource[resource]; | ||
2625 | diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c | ||
2626 | index ed92fb09fc8e..76b802cf2f0b 100644 | ||
2627 | --- a/drivers/s390/crypto/ap_bus.c | ||
2628 | +++ b/drivers/s390/crypto/ap_bus.c | ||
2629 | @@ -1712,6 +1712,9 @@ static void ap_scan_bus(struct work_struct *unused) | ||
2630 | ap_dev->queue_depth = queue_depth; | ||
2631 | ap_dev->raw_hwtype = device_type; | ||
2632 | ap_dev->device_type = device_type; | ||
2633 | + /* CEX6 toleration: map to CEX5 */ | ||
2634 | + if (device_type == AP_DEVICE_TYPE_CEX6) | ||
2635 | + ap_dev->device_type = AP_DEVICE_TYPE_CEX5; | ||
2636 | ap_dev->functions = device_functions; | ||
2637 | spin_lock_init(&ap_dev->lock); | ||
2638 | INIT_LIST_HEAD(&ap_dev->pendingq); | ||
2639 | diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h | ||
2640 | index d7fdf5c024d7..fd66d2c450d5 100644 | ||
2641 | --- a/drivers/s390/crypto/ap_bus.h | ||
2642 | +++ b/drivers/s390/crypto/ap_bus.h | ||
2643 | @@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) | ||
2644 | #define AP_DEVICE_TYPE_CEX3C 9 | ||
2645 | #define AP_DEVICE_TYPE_CEX4 10 | ||
2646 | #define AP_DEVICE_TYPE_CEX5 11 | ||
2647 | +#define AP_DEVICE_TYPE_CEX6 12 | ||
2648 | |||
2649 | /* | ||
2650 | * Known function facilities | ||
2651 | diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | ||
2652 | index 91dfd58b175d..c4fe95a25621 100644 | ||
2653 | --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | ||
2654 | +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | ||
2655 | @@ -22,7 +22,7 @@ | ||
2656 | * | ||
2657 | ****************************************************************************/ | ||
2658 | |||
2659 | -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
2660 | +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
2661 | |||
2662 | #include <linux/module.h> | ||
2663 | #include <linux/kernel.h> | ||
2664 | @@ -82,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, | ||
2665 | } | ||
2666 | } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | ||
2667 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | ||
2668 | - /* residual data from an overflow write */ | ||
2669 | + /* residual data from an overflow write */ | ||
2670 | rsp->flags = SRP_RSP_FLAG_DOOVER; | ||
2671 | rsp->data_out_res_cnt = cpu_to_be32(residual_count); | ||
2672 | } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { | ||
2673 | @@ -102,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, | ||
2674 | * and the function returns TRUE. | ||
2675 | * | ||
2676 | * EXECUTION ENVIRONMENT: | ||
2677 | - * Interrupt or Process environment | ||
2678 | + * Interrupt or Process environment | ||
2679 | */ | ||
2680 | static bool connection_broken(struct scsi_info *vscsi) | ||
2681 | { | ||
2682 | @@ -325,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, | ||
2683 | } | ||
2684 | |||
2685 | /** | ||
2686 | - * ibmvscsis_send_init_message() - send initialize message to the client | ||
2687 | + * ibmvscsis_send_init_message() - send initialize message to the client | ||
2688 | * @vscsi: Pointer to our adapter structure | ||
2689 | * @format: Which Init Message format to send | ||
2690 | * | ||
2691 | @@ -383,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) | ||
2692 | vscsi->cmd_q.base_addr); | ||
2693 | if (crq) { | ||
2694 | *format = (uint)(crq->format); | ||
2695 | - rc = ERROR; | ||
2696 | + rc = ERROR; | ||
2697 | crq->valid = INVALIDATE_CMD_RESP_EL; | ||
2698 | dma_rmb(); | ||
2699 | } | ||
2700 | } else { | ||
2701 | *format = (uint)(crq->format); | ||
2702 | - rc = ERROR; | ||
2703 | + rc = ERROR; | ||
2704 | crq->valid = INVALIDATE_CMD_RESP_EL; | ||
2705 | dma_rmb(); | ||
2706 | } | ||
2707 | @@ -398,166 +398,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) | ||
2708 | } | ||
2709 | |||
2710 | /** | ||
2711 | - * ibmvscsis_establish_new_q() - Establish new CRQ queue | ||
2712 | - * @vscsi: Pointer to our adapter structure | ||
2713 | - * @new_state: New state being established after resetting the queue | ||
2714 | - * | ||
2715 | - * Must be called with interrupt lock held. | ||
2716 | - */ | ||
2717 | -static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) | ||
2718 | -{ | ||
2719 | - long rc = ADAPT_SUCCESS; | ||
2720 | - uint format; | ||
2721 | - | ||
2722 | - vscsi->flags &= PRESERVE_FLAG_FIELDS; | ||
2723 | - vscsi->rsp_q_timer.timer_pops = 0; | ||
2724 | - vscsi->debit = 0; | ||
2725 | - vscsi->credit = 0; | ||
2726 | - | ||
2727 | - rc = vio_enable_interrupts(vscsi->dma_dev); | ||
2728 | - if (rc) { | ||
2729 | - pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", | ||
2730 | - rc); | ||
2731 | - return rc; | ||
2732 | - } | ||
2733 | - | ||
2734 | - rc = ibmvscsis_check_init_msg(vscsi, &format); | ||
2735 | - if (rc) { | ||
2736 | - dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", | ||
2737 | - rc); | ||
2738 | - return rc; | ||
2739 | - } | ||
2740 | - | ||
2741 | - if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { | ||
2742 | - rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); | ||
2743 | - switch (rc) { | ||
2744 | - case H_SUCCESS: | ||
2745 | - case H_DROPPED: | ||
2746 | - case H_CLOSED: | ||
2747 | - rc = ADAPT_SUCCESS; | ||
2748 | - break; | ||
2749 | - | ||
2750 | - case H_PARAMETER: | ||
2751 | - case H_HARDWARE: | ||
2752 | - break; | ||
2753 | - | ||
2754 | - default: | ||
2755 | - vscsi->state = UNDEFINED; | ||
2756 | - rc = H_HARDWARE; | ||
2757 | - break; | ||
2758 | - } | ||
2759 | - } | ||
2760 | - | ||
2761 | - return rc; | ||
2762 | -} | ||
2763 | - | ||
2764 | -/** | ||
2765 | - * ibmvscsis_reset_queue() - Reset CRQ Queue | ||
2766 | - * @vscsi: Pointer to our adapter structure | ||
2767 | - * @new_state: New state to establish after resetting the queue | ||
2768 | - * | ||
2769 | - * This function calls h_free_q and then calls h_reg_q and does all | ||
2770 | - * of the bookkeeping to get us back to where we can communicate. | ||
2771 | - * | ||
2772 | - * Actually, we don't always call h_free_crq. A problem was discovered | ||
2773 | - * where one partition would close and reopen his queue, which would | ||
2774 | - * cause his partner to get a transport event, which would cause him to | ||
2775 | - * close and reopen his queue, which would cause the original partition | ||
2776 | - * to get a transport event, etc., etc. To prevent this, we don't | ||
2777 | - * actually close our queue if the client initiated the reset, (i.e. | ||
2778 | - * either we got a transport event or we have detected that the client's | ||
2779 | - * queue is gone) | ||
2780 | - * | ||
2781 | - * EXECUTION ENVIRONMENT: | ||
2782 | - * Process environment, called with interrupt lock held | ||
2783 | - */ | ||
2784 | -static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) | ||
2785 | -{ | ||
2786 | - int bytes; | ||
2787 | - long rc = ADAPT_SUCCESS; | ||
2788 | - | ||
2789 | - pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); | ||
2790 | - | ||
2791 | - /* don't reset, the client did it for us */ | ||
2792 | - if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { | ||
2793 | - vscsi->flags &= PRESERVE_FLAG_FIELDS; | ||
2794 | - vscsi->rsp_q_timer.timer_pops = 0; | ||
2795 | - vscsi->debit = 0; | ||
2796 | - vscsi->credit = 0; | ||
2797 | - vscsi->state = new_state; | ||
2798 | - vio_enable_interrupts(vscsi->dma_dev); | ||
2799 | - } else { | ||
2800 | - rc = ibmvscsis_free_command_q(vscsi); | ||
2801 | - if (rc == ADAPT_SUCCESS) { | ||
2802 | - vscsi->state = new_state; | ||
2803 | - | ||
2804 | - bytes = vscsi->cmd_q.size * PAGE_SIZE; | ||
2805 | - rc = h_reg_crq(vscsi->dds.unit_id, | ||
2806 | - vscsi->cmd_q.crq_token, bytes); | ||
2807 | - if (rc == H_CLOSED || rc == H_SUCCESS) { | ||
2808 | - rc = ibmvscsis_establish_new_q(vscsi, | ||
2809 | - new_state); | ||
2810 | - } | ||
2811 | - | ||
2812 | - if (rc != ADAPT_SUCCESS) { | ||
2813 | - pr_debug("reset_queue: reg_crq rc %ld\n", rc); | ||
2814 | - | ||
2815 | - vscsi->state = ERR_DISCONNECTED; | ||
2816 | - vscsi->flags |= RESPONSE_Q_DOWN; | ||
2817 | - ibmvscsis_free_command_q(vscsi); | ||
2818 | - } | ||
2819 | - } else { | ||
2820 | - vscsi->state = ERR_DISCONNECTED; | ||
2821 | - vscsi->flags |= RESPONSE_Q_DOWN; | ||
2822 | - } | ||
2823 | - } | ||
2824 | -} | ||
2825 | - | ||
2826 | -/** | ||
2827 | - * ibmvscsis_free_cmd_resources() - Free command resources | ||
2828 | - * @vscsi: Pointer to our adapter structure | ||
2829 | - * @cmd: Command which is not longer in use | ||
2830 | - * | ||
2831 | - * Must be called with interrupt lock held. | ||
2832 | - */ | ||
2833 | -static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, | ||
2834 | - struct ibmvscsis_cmd *cmd) | ||
2835 | -{ | ||
2836 | - struct iu_entry *iue = cmd->iue; | ||
2837 | - | ||
2838 | - switch (cmd->type) { | ||
2839 | - case TASK_MANAGEMENT: | ||
2840 | - case SCSI_CDB: | ||
2841 | - /* | ||
2842 | - * When the queue goes down this value is cleared, so it | ||
2843 | - * cannot be cleared in this general purpose function. | ||
2844 | - */ | ||
2845 | - if (vscsi->debit) | ||
2846 | - vscsi->debit -= 1; | ||
2847 | - break; | ||
2848 | - case ADAPTER_MAD: | ||
2849 | - vscsi->flags &= ~PROCESSING_MAD; | ||
2850 | - break; | ||
2851 | - case UNSET_TYPE: | ||
2852 | - break; | ||
2853 | - default: | ||
2854 | - dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", | ||
2855 | - cmd->type); | ||
2856 | - break; | ||
2857 | - } | ||
2858 | - | ||
2859 | - cmd->iue = NULL; | ||
2860 | - list_add_tail(&cmd->list, &vscsi->free_cmd); | ||
2861 | - srp_iu_put(iue); | ||
2862 | - | ||
2863 | - if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && | ||
2864 | - list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { | ||
2865 | - vscsi->flags &= ~WAIT_FOR_IDLE; | ||
2866 | - complete(&vscsi->wait_idle); | ||
2867 | - } | ||
2868 | -} | ||
2869 | - | ||
2870 | -/** | ||
2871 | * ibmvscsis_disconnect() - Helper function to disconnect | ||
2872 | * @work: Pointer to work_struct, gives access to our adapter structure | ||
2873 | * | ||
2874 | @@ -576,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work) | ||
2875 | proc_work); | ||
2876 | u16 new_state; | ||
2877 | bool wait_idle = false; | ||
2878 | - long rc = ADAPT_SUCCESS; | ||
2879 | |||
2880 | spin_lock_bh(&vscsi->intr_lock); | ||
2881 | new_state = vscsi->new_state; | ||
2882 | @@ -590,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work) | ||
2883 | * should transitition to the new state | ||
2884 | */ | ||
2885 | switch (vscsi->state) { | ||
2886 | - /* Should never be called while in this state. */ | ||
2887 | + /* Should never be called while in this state. */ | ||
2888 | case NO_QUEUE: | ||
2889 | /* | ||
2890 | * Can never transition from this state; | ||
2891 | @@ -629,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work) | ||
2892 | vscsi->state = new_state; | ||
2893 | break; | ||
2894 | |||
2895 | - /* | ||
2896 | - * If this is a transition into an error state. | ||
2897 | - * a client is attempting to establish a connection | ||
2898 | - * and has violated the RPA protocol. | ||
2899 | - * There can be nothing pending on the adapter although | ||
2900 | - * there can be requests in the command queue. | ||
2901 | - */ | ||
2902 | case WAIT_ENABLED: | ||
2903 | - case PART_UP_WAIT_ENAB: | ||
2904 | switch (new_state) { | ||
2905 | - case ERR_DISCONNECT: | ||
2906 | - vscsi->flags |= RESPONSE_Q_DOWN; | ||
2907 | + case UNCONFIGURING: | ||
2908 | vscsi->state = new_state; | ||
2909 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
2910 | vscsi->flags &= ~(SCHEDULE_DISCONNECT | | ||
2911 | DISCONNECT_SCHEDULED); | ||
2912 | - ibmvscsis_free_command_q(vscsi); | ||
2913 | - break; | ||
2914 | - case ERR_DISCONNECT_RECONNECT: | ||
2915 | - ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); | ||
2916 | + dma_rmb(); | ||
2917 | + if (vscsi->flags & CFG_SLEEPING) { | ||
2918 | + vscsi->flags &= ~CFG_SLEEPING; | ||
2919 | + complete(&vscsi->unconfig); | ||
2920 | + } | ||
2921 | break; | ||
2922 | |||
2923 | /* should never happen */ | ||
2924 | + case ERR_DISCONNECT: | ||
2925 | + case ERR_DISCONNECT_RECONNECT: | ||
2926 | case WAIT_IDLE: | ||
2927 | - rc = ERROR; | ||
2928 | dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", | ||
2929 | vscsi->state); | ||
2930 | break; | ||
2931 | @@ -661,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work) | ||
2932 | |||
2933 | case WAIT_IDLE: | ||
2934 | switch (new_state) { | ||
2935 | + case UNCONFIGURING: | ||
2936 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
2937 | + vscsi->state = new_state; | ||
2938 | + vscsi->flags &= ~(SCHEDULE_DISCONNECT | | ||
2939 | + DISCONNECT_SCHEDULED); | ||
2940 | + ibmvscsis_free_command_q(vscsi); | ||
2941 | + break; | ||
2942 | case ERR_DISCONNECT: | ||
2943 | case ERR_DISCONNECT_RECONNECT: | ||
2944 | vscsi->state = new_state; | ||
2945 | @@ -765,45 +605,348 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, | ||
2946 | else | ||
2947 | state = vscsi->state; | ||
2948 | |||
2949 | - switch (state) { | ||
2950 | - case NO_QUEUE: | ||
2951 | - case UNCONFIGURING: | ||
2952 | - break; | ||
2953 | + switch (state) { | ||
2954 | + case NO_QUEUE: | ||
2955 | + case UNCONFIGURING: | ||
2956 | + break; | ||
2957 | + | ||
2958 | + case ERR_DISCONNECTED: | ||
2959 | + case ERR_DISCONNECT: | ||
2960 | + case UNDEFINED: | ||
2961 | + if (new_state == UNCONFIGURING) | ||
2962 | + vscsi->new_state = new_state; | ||
2963 | + break; | ||
2964 | + | ||
2965 | + case ERR_DISCONNECT_RECONNECT: | ||
2966 | + switch (new_state) { | ||
2967 | + case UNCONFIGURING: | ||
2968 | + case ERR_DISCONNECT: | ||
2969 | + vscsi->new_state = new_state; | ||
2970 | + break; | ||
2971 | + default: | ||
2972 | + break; | ||
2973 | + } | ||
2974 | + break; | ||
2975 | + | ||
2976 | + case WAIT_ENABLED: | ||
2977 | + case WAIT_IDLE: | ||
2978 | + case WAIT_CONNECTION: | ||
2979 | + case CONNECTED: | ||
2980 | + case SRP_PROCESSING: | ||
2981 | + vscsi->new_state = new_state; | ||
2982 | + break; | ||
2983 | + | ||
2984 | + default: | ||
2985 | + break; | ||
2986 | + } | ||
2987 | + } | ||
2988 | + | ||
2989 | + pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", | ||
2990 | + vscsi->flags, vscsi->new_state); | ||
2991 | +} | ||
2992 | + | ||
2993 | +/** | ||
2994 | + * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message | ||
2995 | + * @vscsi: Pointer to our adapter structure | ||
2996 | + * | ||
2997 | + * Must be called with interrupt lock held. | ||
2998 | + */ | ||
2999 | +static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) | ||
3000 | +{ | ||
3001 | + long rc = ADAPT_SUCCESS; | ||
3002 | + | ||
3003 | + switch (vscsi->state) { | ||
3004 | + case NO_QUEUE: | ||
3005 | + case ERR_DISCONNECT: | ||
3006 | + case ERR_DISCONNECT_RECONNECT: | ||
3007 | + case ERR_DISCONNECTED: | ||
3008 | + case UNCONFIGURING: | ||
3009 | + case UNDEFINED: | ||
3010 | + rc = ERROR; | ||
3011 | + break; | ||
3012 | + | ||
3013 | + case WAIT_CONNECTION: | ||
3014 | + vscsi->state = CONNECTED; | ||
3015 | + break; | ||
3016 | + | ||
3017 | + case WAIT_IDLE: | ||
3018 | + case SRP_PROCESSING: | ||
3019 | + case CONNECTED: | ||
3020 | + case WAIT_ENABLED: | ||
3021 | + default: | ||
3022 | + rc = ERROR; | ||
3023 | + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", | ||
3024 | + vscsi->state); | ||
3025 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3026 | + break; | ||
3027 | + } | ||
3028 | + | ||
3029 | + return rc; | ||
3030 | +} | ||
3031 | + | ||
3032 | +/** | ||
3033 | + * ibmvscsis_handle_init_msg() - Respond to an Init Message | ||
3034 | + * @vscsi: Pointer to our adapter structure | ||
3035 | + * | ||
3036 | + * Must be called with interrupt lock held. | ||
3037 | + */ | ||
3038 | +static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) | ||
3039 | +{ | ||
3040 | + long rc = ADAPT_SUCCESS; | ||
3041 | + | ||
3042 | + switch (vscsi->state) { | ||
3043 | + case WAIT_CONNECTION: | ||
3044 | + rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); | ||
3045 | + switch (rc) { | ||
3046 | + case H_SUCCESS: | ||
3047 | + vscsi->state = CONNECTED; | ||
3048 | + break; | ||
3049 | + | ||
3050 | + case H_PARAMETER: | ||
3051 | + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", | ||
3052 | + rc); | ||
3053 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); | ||
3054 | + break; | ||
3055 | + | ||
3056 | + case H_DROPPED: | ||
3057 | + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", | ||
3058 | + rc); | ||
3059 | + rc = ERROR; | ||
3060 | + ibmvscsis_post_disconnect(vscsi, | ||
3061 | + ERR_DISCONNECT_RECONNECT, 0); | ||
3062 | + break; | ||
3063 | + | ||
3064 | + case H_CLOSED: | ||
3065 | + pr_warn("init_msg: failed to send, rc %ld\n", rc); | ||
3066 | + rc = 0; | ||
3067 | + break; | ||
3068 | + } | ||
3069 | + break; | ||
3070 | + | ||
3071 | + case UNDEFINED: | ||
3072 | + rc = ERROR; | ||
3073 | + break; | ||
3074 | + | ||
3075 | + case UNCONFIGURING: | ||
3076 | + break; | ||
3077 | + | ||
3078 | + case WAIT_ENABLED: | ||
3079 | + case CONNECTED: | ||
3080 | + case SRP_PROCESSING: | ||
3081 | + case WAIT_IDLE: | ||
3082 | + case NO_QUEUE: | ||
3083 | + case ERR_DISCONNECT: | ||
3084 | + case ERR_DISCONNECT_RECONNECT: | ||
3085 | + case ERR_DISCONNECTED: | ||
3086 | + default: | ||
3087 | + rc = ERROR; | ||
3088 | + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", | ||
3089 | + vscsi->state); | ||
3090 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3091 | + break; | ||
3092 | + } | ||
3093 | + | ||
3094 | + return rc; | ||
3095 | +} | ||
3096 | + | ||
3097 | +/** | ||
3098 | + * ibmvscsis_init_msg() - Respond to an init message | ||
3099 | + * @vscsi: Pointer to our adapter structure | ||
3100 | + * @crq: Pointer to CRQ element containing the Init Message | ||
3101 | + * | ||
3102 | + * EXECUTION ENVIRONMENT: | ||
3103 | + * Interrupt, interrupt lock held | ||
3104 | + */ | ||
3105 | +static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) | ||
3106 | +{ | ||
3107 | + long rc = ADAPT_SUCCESS; | ||
3108 | + | ||
3109 | + pr_debug("init_msg: state 0x%hx\n", vscsi->state); | ||
3110 | + | ||
3111 | + rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, | ||
3112 | + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, | ||
3113 | + 0); | ||
3114 | + if (rc == H_SUCCESS) { | ||
3115 | + vscsi->client_data.partition_number = | ||
3116 | + be64_to_cpu(*(u64 *)vscsi->map_buf); | ||
3117 | + pr_debug("init_msg, part num %d\n", | ||
3118 | + vscsi->client_data.partition_number); | ||
3119 | + } else { | ||
3120 | + pr_debug("init_msg h_vioctl rc %ld\n", rc); | ||
3121 | + rc = ADAPT_SUCCESS; | ||
3122 | + } | ||
3123 | + | ||
3124 | + if (crq->format == INIT_MSG) { | ||
3125 | + rc = ibmvscsis_handle_init_msg(vscsi); | ||
3126 | + } else if (crq->format == INIT_COMPLETE_MSG) { | ||
3127 | + rc = ibmvscsis_handle_init_compl_msg(vscsi); | ||
3128 | + } else { | ||
3129 | + rc = ERROR; | ||
3130 | + dev_err(&vscsi->dev, "init_msg: invalid format %d\n", | ||
3131 | + (uint)crq->format); | ||
3132 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3133 | + } | ||
3134 | + | ||
3135 | + return rc; | ||
3136 | +} | ||
3137 | + | ||
3138 | +/** | ||
3139 | + * ibmvscsis_establish_new_q() - Establish new CRQ queue | ||
3140 | + * @vscsi: Pointer to our adapter structure | ||
3141 | + * | ||
3142 | + * Must be called with interrupt lock held. | ||
3143 | + */ | ||
3144 | +static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) | ||
3145 | +{ | ||
3146 | + long rc = ADAPT_SUCCESS; | ||
3147 | + uint format; | ||
3148 | + | ||
3149 | + vscsi->flags &= PRESERVE_FLAG_FIELDS; | ||
3150 | + vscsi->rsp_q_timer.timer_pops = 0; | ||
3151 | + vscsi->debit = 0; | ||
3152 | + vscsi->credit = 0; | ||
3153 | + | ||
3154 | + rc = vio_enable_interrupts(vscsi->dma_dev); | ||
3155 | + if (rc) { | ||
3156 | + pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", | ||
3157 | + rc); | ||
3158 | + return rc; | ||
3159 | + } | ||
3160 | + | ||
3161 | + rc = ibmvscsis_check_init_msg(vscsi, &format); | ||
3162 | + if (rc) { | ||
3163 | + dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", | ||
3164 | + rc); | ||
3165 | + return rc; | ||
3166 | + } | ||
3167 | + | ||
3168 | + if (format == UNUSED_FORMAT) { | ||
3169 | + rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); | ||
3170 | + switch (rc) { | ||
3171 | + case H_SUCCESS: | ||
3172 | + case H_DROPPED: | ||
3173 | + case H_CLOSED: | ||
3174 | + rc = ADAPT_SUCCESS; | ||
3175 | + break; | ||
3176 | + | ||
3177 | + case H_PARAMETER: | ||
3178 | + case H_HARDWARE: | ||
3179 | + break; | ||
3180 | + | ||
3181 | + default: | ||
3182 | + vscsi->state = UNDEFINED; | ||
3183 | + rc = H_HARDWARE; | ||
3184 | + break; | ||
3185 | + } | ||
3186 | + } else if (format == INIT_MSG) { | ||
3187 | + rc = ibmvscsis_handle_init_msg(vscsi); | ||
3188 | + } | ||
3189 | + | ||
3190 | + return rc; | ||
3191 | +} | ||
3192 | + | ||
3193 | +/** | ||
3194 | + * ibmvscsis_reset_queue() - Reset CRQ Queue | ||
3195 | + * @vscsi: Pointer to our adapter structure | ||
3196 | + * | ||
3197 | + * This function calls h_free_q and then calls h_reg_q and does all | ||
3198 | + * of the bookkeeping to get us back to where we can communicate. | ||
3199 | + * | ||
3200 | + * Actually, we don't always call h_free_crq. A problem was discovered | ||
3201 | + * where one partition would close and reopen his queue, which would | ||
3202 | + * cause his partner to get a transport event, which would cause him to | ||
3203 | + * close and reopen his queue, which would cause the original partition | ||
3204 | + * to get a transport event, etc., etc. To prevent this, we don't | ||
3205 | + * actually close our queue if the client initiated the reset, (i.e. | ||
3206 | + * either we got a transport event or we have detected that the client's | ||
3207 | + * queue is gone) | ||
3208 | + * | ||
3209 | + * EXECUTION ENVIRONMENT: | ||
3210 | + * Process environment, called with interrupt lock held | ||
3211 | + */ | ||
3212 | +static void ibmvscsis_reset_queue(struct scsi_info *vscsi) | ||
3213 | +{ | ||
3214 | + int bytes; | ||
3215 | + long rc = ADAPT_SUCCESS; | ||
3216 | + | ||
3217 | + pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); | ||
3218 | + | ||
3219 | + /* don't reset, the client did it for us */ | ||
3220 | + if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { | ||
3221 | + vscsi->flags &= PRESERVE_FLAG_FIELDS; | ||
3222 | + vscsi->rsp_q_timer.timer_pops = 0; | ||
3223 | + vscsi->debit = 0; | ||
3224 | + vscsi->credit = 0; | ||
3225 | + vscsi->state = WAIT_CONNECTION; | ||
3226 | + vio_enable_interrupts(vscsi->dma_dev); | ||
3227 | + } else { | ||
3228 | + rc = ibmvscsis_free_command_q(vscsi); | ||
3229 | + if (rc == ADAPT_SUCCESS) { | ||
3230 | + vscsi->state = WAIT_CONNECTION; | ||
3231 | + | ||
3232 | + bytes = vscsi->cmd_q.size * PAGE_SIZE; | ||
3233 | + rc = h_reg_crq(vscsi->dds.unit_id, | ||
3234 | + vscsi->cmd_q.crq_token, bytes); | ||
3235 | + if (rc == H_CLOSED || rc == H_SUCCESS) { | ||
3236 | + rc = ibmvscsis_establish_new_q(vscsi); | ||
3237 | + } | ||
3238 | |||
3239 | - case ERR_DISCONNECTED: | ||
3240 | - case ERR_DISCONNECT: | ||
3241 | - case UNDEFINED: | ||
3242 | - if (new_state == UNCONFIGURING) | ||
3243 | - vscsi->new_state = new_state; | ||
3244 | - break; | ||
3245 | + if (rc != ADAPT_SUCCESS) { | ||
3246 | + pr_debug("reset_queue: reg_crq rc %ld\n", rc); | ||
3247 | |||
3248 | - case ERR_DISCONNECT_RECONNECT: | ||
3249 | - switch (new_state) { | ||
3250 | - case UNCONFIGURING: | ||
3251 | - case ERR_DISCONNECT: | ||
3252 | - vscsi->new_state = new_state; | ||
3253 | - break; | ||
3254 | - default: | ||
3255 | - break; | ||
3256 | + vscsi->state = ERR_DISCONNECTED; | ||
3257 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
3258 | + ibmvscsis_free_command_q(vscsi); | ||
3259 | } | ||
3260 | - break; | ||
3261 | + } else { | ||
3262 | + vscsi->state = ERR_DISCONNECTED; | ||
3263 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
3264 | + } | ||
3265 | + } | ||
3266 | +} | ||
3267 | |||
3268 | - case WAIT_ENABLED: | ||
3269 | - case PART_UP_WAIT_ENAB: | ||
3270 | - case WAIT_IDLE: | ||
3271 | - case WAIT_CONNECTION: | ||
3272 | - case CONNECTED: | ||
3273 | - case SRP_PROCESSING: | ||
3274 | - vscsi->new_state = new_state; | ||
3275 | - break; | ||
3276 | +/** | ||
3277 | + * ibmvscsis_free_cmd_resources() - Free command resources | ||
3278 | + * @vscsi: Pointer to our adapter structure | ||
3279 | + * @cmd: Command which is not longer in use | ||
3280 | + * | ||
3281 | + * Must be called with interrupt lock held. | ||
3282 | + */ | ||
3283 | +static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, | ||
3284 | + struct ibmvscsis_cmd *cmd) | ||
3285 | +{ | ||
3286 | + struct iu_entry *iue = cmd->iue; | ||
3287 | |||
3288 | - default: | ||
3289 | - break; | ||
3290 | - } | ||
3291 | + switch (cmd->type) { | ||
3292 | + case TASK_MANAGEMENT: | ||
3293 | + case SCSI_CDB: | ||
3294 | + /* | ||
3295 | + * When the queue goes down this value is cleared, so it | ||
3296 | + * cannot be cleared in this general purpose function. | ||
3297 | + */ | ||
3298 | + if (vscsi->debit) | ||
3299 | + vscsi->debit -= 1; | ||
3300 | + break; | ||
3301 | + case ADAPTER_MAD: | ||
3302 | + vscsi->flags &= ~PROCESSING_MAD; | ||
3303 | + break; | ||
3304 | + case UNSET_TYPE: | ||
3305 | + break; | ||
3306 | + default: | ||
3307 | + dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", | ||
3308 | + cmd->type); | ||
3309 | + break; | ||
3310 | } | ||
3311 | |||
3312 | - pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", | ||
3313 | - vscsi->flags, vscsi->new_state); | ||
3314 | + cmd->iue = NULL; | ||
3315 | + list_add_tail(&cmd->list, &vscsi->free_cmd); | ||
3316 | + srp_iu_put(iue); | ||
3317 | + | ||
3318 | + if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && | ||
3319 | + list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { | ||
3320 | + vscsi->flags &= ~WAIT_FOR_IDLE; | ||
3321 | + complete(&vscsi->wait_idle); | ||
3322 | + } | ||
3323 | } | ||
3324 | |||
3325 | /** | ||
3326 | @@ -864,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, | ||
3327 | TRANS_EVENT)); | ||
3328 | break; | ||
3329 | |||
3330 | - case PART_UP_WAIT_ENAB: | ||
3331 | - vscsi->state = WAIT_ENABLED; | ||
3332 | - break; | ||
3333 | - | ||
3334 | case SRP_PROCESSING: | ||
3335 | if ((vscsi->debit > 0) || | ||
3336 | !list_empty(&vscsi->schedule_q) || | ||
3337 | @@ -896,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | - rc = vscsi->flags & SCHEDULE_DISCONNECT; | ||
3342 | + rc = vscsi->flags & SCHEDULE_DISCONNECT; | ||
3343 | |||
3344 | pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", | ||
3345 | vscsi->flags, vscsi->state, rc); | ||
3346 | @@ -1067,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) | ||
3347 | free_qs = true; | ||
3348 | |||
3349 | switch (vscsi->state) { | ||
3350 | + case UNCONFIGURING: | ||
3351 | + ibmvscsis_free_command_q(vscsi); | ||
3352 | + dma_rmb(); | ||
3353 | + isync(); | ||
3354 | + if (vscsi->flags & CFG_SLEEPING) { | ||
3355 | + vscsi->flags &= ~CFG_SLEEPING; | ||
3356 | + complete(&vscsi->unconfig); | ||
3357 | + } | ||
3358 | + break; | ||
3359 | case ERR_DISCONNECT_RECONNECT: | ||
3360 | - ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); | ||
3361 | + ibmvscsis_reset_queue(vscsi); | ||
3362 | pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); | ||
3363 | break; | ||
3364 | |||
3365 | case ERR_DISCONNECT: | ||
3366 | ibmvscsis_free_command_q(vscsi); | ||
3367 | - vscsi->flags &= ~DISCONNECT_SCHEDULED; | ||
3368 | + vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); | ||
3369 | vscsi->flags |= RESPONSE_Q_DOWN; | ||
3370 | - vscsi->state = ERR_DISCONNECTED; | ||
3371 | + if (vscsi->tport.enabled) | ||
3372 | + vscsi->state = ERR_DISCONNECTED; | ||
3373 | + else | ||
3374 | + vscsi->state = WAIT_ENABLED; | ||
3375 | pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", | ||
3376 | vscsi->flags, vscsi->state); | ||
3377 | break; | ||
3378 | @@ -1221,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, | ||
3379 | * @iue: Information Unit containing the Adapter Info MAD request | ||
3380 | * | ||
3381 | * EXECUTION ENVIRONMENT: | ||
3382 | - * Interrupt adpater lock is held | ||
3383 | + * Interrupt adapter lock is held | ||
3384 | */ | ||
3385 | static long ibmvscsis_adapter_info(struct scsi_info *vscsi, | ||
3386 | struct iu_entry *iue) | ||
3387 | @@ -1621,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | ||
3388 | be64_to_cpu(msg_hi), | ||
3389 | be64_to_cpu(cmd->rsp.tag)); | ||
3390 | |||
3391 | - pr_debug("send_messages: tag 0x%llx, rc %ld\n", | ||
3392 | - be64_to_cpu(cmd->rsp.tag), rc); | ||
3393 | + pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", | ||
3394 | + cmd, be64_to_cpu(cmd->rsp.tag), rc); | ||
3395 | |||
3396 | /* if all ok free up the command element resources */ | ||
3397 | if (rc == H_SUCCESS) { | ||
3398 | @@ -1692,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, | ||
3399 | * @crq: Pointer to the CRQ entry containing the MAD request | ||
3400 | * | ||
3401 | * EXECUTION ENVIRONMENT: | ||
3402 | - * Interrupt called with adapter lock held | ||
3403 | + * Interrupt, called with adapter lock held | ||
3404 | */ | ||
3405 | static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) | ||
3406 | { | ||
3407 | @@ -1746,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) | ||
3408 | |||
3409 | pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); | ||
3410 | |||
3411 | - if (be16_to_cpu(mad->length) < 0) { | ||
3412 | - dev_err(&vscsi->dev, "mad: length is < 0\n"); | ||
3413 | - ibmvscsis_post_disconnect(vscsi, | ||
3414 | - ERR_DISCONNECT_RECONNECT, 0); | ||
3415 | - rc = SRP_VIOLATION; | ||
3416 | - } else { | ||
3417 | - rc = ibmvscsis_process_mad(vscsi, iue); | ||
3418 | - } | ||
3419 | + rc = ibmvscsis_process_mad(vscsi, iue); | ||
3420 | |||
3421 | pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), | ||
3422 | rc); | ||
3423 | @@ -1865,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, | ||
3424 | break; | ||
3425 | case H_PERMISSION: | ||
3426 | if (connection_broken(vscsi)) | ||
3427 | - flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; | ||
3428 | + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; | ||
3429 | dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", | ||
3430 | rc); | ||
3431 | ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, | ||
3432 | @@ -2090,248 +2234,98 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) | ||
3433 | break; | ||
3434 | |||
3435 | case SRP_TSK_MGMT: | ||
3436 | - tsk = &vio_iu(iue)->srp.tsk_mgmt; | ||
3437 | - pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, | ||
3438 | - tsk->tag); | ||
3439 | - cmd->rsp.tag = tsk->tag; | ||
3440 | - vscsi->debit += 1; | ||
3441 | - cmd->type = TASK_MANAGEMENT; | ||
3442 | - list_add_tail(&cmd->list, &vscsi->schedule_q); | ||
3443 | - queue_work(vscsi->work_q, &cmd->work); | ||
3444 | - break; | ||
3445 | - | ||
3446 | - case SRP_CMD: | ||
3447 | - pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, | ||
3448 | - srp->tag); | ||
3449 | - cmd->rsp.tag = srp->tag; | ||
3450 | - vscsi->debit += 1; | ||
3451 | - cmd->type = SCSI_CDB; | ||
3452 | - /* | ||
3453 | - * We want to keep track of work waiting for | ||
3454 | - * the workqueue. | ||
3455 | - */ | ||
3456 | - list_add_tail(&cmd->list, &vscsi->schedule_q); | ||
3457 | - queue_work(vscsi->work_q, &cmd->work); | ||
3458 | - break; | ||
3459 | - | ||
3460 | - case SRP_I_LOGOUT: | ||
3461 | - rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); | ||
3462 | - break; | ||
3463 | - | ||
3464 | - case SRP_CRED_RSP: | ||
3465 | - case SRP_AER_RSP: | ||
3466 | - default: | ||
3467 | - ibmvscsis_free_cmd_resources(vscsi, cmd); | ||
3468 | - dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", | ||
3469 | - (uint)srp->opcode); | ||
3470 | - ibmvscsis_post_disconnect(vscsi, | ||
3471 | - ERR_DISCONNECT_RECONNECT, 0); | ||
3472 | - break; | ||
3473 | - } | ||
3474 | - } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { | ||
3475 | - rc = ibmvscsis_srp_login(vscsi, cmd, crq); | ||
3476 | - } else { | ||
3477 | - ibmvscsis_free_cmd_resources(vscsi, cmd); | ||
3478 | - dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", | ||
3479 | - vscsi->state); | ||
3480 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3481 | - } | ||
3482 | -} | ||
3483 | - | ||
3484 | -/** | ||
3485 | - * ibmvscsis_ping_response() - Respond to a ping request | ||
3486 | - * @vscsi: Pointer to our adapter structure | ||
3487 | - * | ||
3488 | - * Let the client know that the server is alive and waiting on | ||
3489 | - * its native I/O stack. | ||
3490 | - * If any type of error occurs from the call to queue a ping | ||
3491 | - * response then the client is either not accepting or receiving | ||
3492 | - * interrupts. Disconnect with an error. | ||
3493 | - * | ||
3494 | - * EXECUTION ENVIRONMENT: | ||
3495 | - * Interrupt, interrupt lock held | ||
3496 | - */ | ||
3497 | -static long ibmvscsis_ping_response(struct scsi_info *vscsi) | ||
3498 | -{ | ||
3499 | - struct viosrp_crq *crq; | ||
3500 | - u64 buffer[2] = { 0, 0 }; | ||
3501 | - long rc; | ||
3502 | - | ||
3503 | - crq = (struct viosrp_crq *)&buffer; | ||
3504 | - crq->valid = VALID_CMD_RESP_EL; | ||
3505 | - crq->format = (u8)MESSAGE_IN_CRQ; | ||
3506 | - crq->status = PING_RESPONSE; | ||
3507 | - | ||
3508 | - rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), | ||
3509 | - cpu_to_be64(buffer[MSG_LOW])); | ||
3510 | - | ||
3511 | - switch (rc) { | ||
3512 | - case H_SUCCESS: | ||
3513 | - break; | ||
3514 | - case H_CLOSED: | ||
3515 | - vscsi->flags |= CLIENT_FAILED; | ||
3516 | - case H_DROPPED: | ||
3517 | - vscsi->flags |= RESPONSE_Q_DOWN; | ||
3518 | - case H_REMOTE_PARM: | ||
3519 | - dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", | ||
3520 | - rc); | ||
3521 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3522 | - break; | ||
3523 | - default: | ||
3524 | - dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", | ||
3525 | - rc); | ||
3526 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); | ||
3527 | - break; | ||
3528 | - } | ||
3529 | - | ||
3530 | - return rc; | ||
3531 | -} | ||
3532 | - | ||
3533 | -/** | ||
3534 | - * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message | ||
3535 | - * @vscsi: Pointer to our adapter structure | ||
3536 | - * | ||
3537 | - * Must be called with interrupt lock held. | ||
3538 | - */ | ||
3539 | -static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) | ||
3540 | -{ | ||
3541 | - long rc = ADAPT_SUCCESS; | ||
3542 | - | ||
3543 | - switch (vscsi->state) { | ||
3544 | - case NO_QUEUE: | ||
3545 | - case ERR_DISCONNECT: | ||
3546 | - case ERR_DISCONNECT_RECONNECT: | ||
3547 | - case ERR_DISCONNECTED: | ||
3548 | - case UNCONFIGURING: | ||
3549 | - case UNDEFINED: | ||
3550 | - rc = ERROR; | ||
3551 | - break; | ||
3552 | - | ||
3553 | - case WAIT_CONNECTION: | ||
3554 | - vscsi->state = CONNECTED; | ||
3555 | - break; | ||
3556 | - | ||
3557 | - case WAIT_IDLE: | ||
3558 | - case SRP_PROCESSING: | ||
3559 | - case CONNECTED: | ||
3560 | - case WAIT_ENABLED: | ||
3561 | - case PART_UP_WAIT_ENAB: | ||
3562 | - default: | ||
3563 | - rc = ERROR; | ||
3564 | - dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", | ||
3565 | - vscsi->state); | ||
3566 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3567 | - break; | ||
3568 | - } | ||
3569 | - | ||
3570 | - return rc; | ||
3571 | -} | ||
3572 | - | ||
3573 | -/** | ||
3574 | - * ibmvscsis_handle_init_msg() - Respond to an Init Message | ||
3575 | - * @vscsi: Pointer to our adapter structure | ||
3576 | - * | ||
3577 | - * Must be called with interrupt lock held. | ||
3578 | - */ | ||
3579 | -static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) | ||
3580 | -{ | ||
3581 | - long rc = ADAPT_SUCCESS; | ||
3582 | - | ||
3583 | - switch (vscsi->state) { | ||
3584 | - case WAIT_ENABLED: | ||
3585 | - vscsi->state = PART_UP_WAIT_ENAB; | ||
3586 | - break; | ||
3587 | + tsk = &vio_iu(iue)->srp.tsk_mgmt; | ||
3588 | + pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, | ||
3589 | + tsk->tag); | ||
3590 | + cmd->rsp.tag = tsk->tag; | ||
3591 | + vscsi->debit += 1; | ||
3592 | + cmd->type = TASK_MANAGEMENT; | ||
3593 | + list_add_tail(&cmd->list, &vscsi->schedule_q); | ||
3594 | + queue_work(vscsi->work_q, &cmd->work); | ||
3595 | + break; | ||
3596 | |||
3597 | - case WAIT_CONNECTION: | ||
3598 | - rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); | ||
3599 | - switch (rc) { | ||
3600 | - case H_SUCCESS: | ||
3601 | - vscsi->state = CONNECTED; | ||
3602 | + case SRP_CMD: | ||
3603 | + pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, | ||
3604 | + srp->tag); | ||
3605 | + cmd->rsp.tag = srp->tag; | ||
3606 | + vscsi->debit += 1; | ||
3607 | + cmd->type = SCSI_CDB; | ||
3608 | + /* | ||
3609 | + * We want to keep track of work waiting for | ||
3610 | + * the workqueue. | ||
3611 | + */ | ||
3612 | + list_add_tail(&cmd->list, &vscsi->schedule_q); | ||
3613 | + queue_work(vscsi->work_q, &cmd->work); | ||
3614 | break; | ||
3615 | |||
3616 | - case H_PARAMETER: | ||
3617 | - dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", | ||
3618 | - rc); | ||
3619 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); | ||
3620 | + case SRP_I_LOGOUT: | ||
3621 | + rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); | ||
3622 | break; | ||
3623 | |||
3624 | - case H_DROPPED: | ||
3625 | - dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", | ||
3626 | - rc); | ||
3627 | - rc = ERROR; | ||
3628 | + case SRP_CRED_RSP: | ||
3629 | + case SRP_AER_RSP: | ||
3630 | + default: | ||
3631 | + ibmvscsis_free_cmd_resources(vscsi, cmd); | ||
3632 | + dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", | ||
3633 | + (uint)srp->opcode); | ||
3634 | ibmvscsis_post_disconnect(vscsi, | ||
3635 | ERR_DISCONNECT_RECONNECT, 0); | ||
3636 | break; | ||
3637 | - | ||
3638 | - case H_CLOSED: | ||
3639 | - pr_warn("init_msg: failed to send, rc %ld\n", rc); | ||
3640 | - rc = 0; | ||
3641 | - break; | ||
3642 | } | ||
3643 | - break; | ||
3644 | - | ||
3645 | - case UNDEFINED: | ||
3646 | - rc = ERROR; | ||
3647 | - break; | ||
3648 | - | ||
3649 | - case UNCONFIGURING: | ||
3650 | - break; | ||
3651 | - | ||
3652 | - case PART_UP_WAIT_ENAB: | ||
3653 | - case CONNECTED: | ||
3654 | - case SRP_PROCESSING: | ||
3655 | - case WAIT_IDLE: | ||
3656 | - case NO_QUEUE: | ||
3657 | - case ERR_DISCONNECT: | ||
3658 | - case ERR_DISCONNECT_RECONNECT: | ||
3659 | - case ERR_DISCONNECTED: | ||
3660 | - default: | ||
3661 | - rc = ERROR; | ||
3662 | - dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", | ||
3663 | + } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { | ||
3664 | + rc = ibmvscsis_srp_login(vscsi, cmd, crq); | ||
3665 | + } else { | ||
3666 | + ibmvscsis_free_cmd_resources(vscsi, cmd); | ||
3667 | + dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", | ||
3668 | vscsi->state); | ||
3669 | ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3670 | - break; | ||
3671 | } | ||
3672 | - | ||
3673 | - return rc; | ||
3674 | } | ||
3675 | |||
3676 | /** | ||
3677 | - * ibmvscsis_init_msg() - Respond to an init message | ||
3678 | + * ibmvscsis_ping_response() - Respond to a ping request | ||
3679 | * @vscsi: Pointer to our adapter structure | ||
3680 | - * @crq: Pointer to CRQ element containing the Init Message | ||
3681 | + * | ||
3682 | + * Let the client know that the server is alive and waiting on | ||
3683 | + * its native I/O stack. | ||
3684 | + * If any type of error occurs from the call to queue a ping | ||
3685 | + * response then the client is either not accepting or receiving | ||
3686 | + * interrupts. Disconnect with an error. | ||
3687 | * | ||
3688 | * EXECUTION ENVIRONMENT: | ||
3689 | * Interrupt, interrupt lock held | ||
3690 | */ | ||
3691 | -static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) | ||
3692 | +static long ibmvscsis_ping_response(struct scsi_info *vscsi) | ||
3693 | { | ||
3694 | - long rc = ADAPT_SUCCESS; | ||
3695 | + struct viosrp_crq *crq; | ||
3696 | + u64 buffer[2] = { 0, 0 }; | ||
3697 | + long rc; | ||
3698 | |||
3699 | - pr_debug("init_msg: state 0x%hx\n", vscsi->state); | ||
3700 | + crq = (struct viosrp_crq *)&buffer; | ||
3701 | + crq->valid = VALID_CMD_RESP_EL; | ||
3702 | + crq->format = (u8)MESSAGE_IN_CRQ; | ||
3703 | + crq->status = PING_RESPONSE; | ||
3704 | |||
3705 | - rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, | ||
3706 | - (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, | ||
3707 | - 0); | ||
3708 | - if (rc == H_SUCCESS) { | ||
3709 | - vscsi->client_data.partition_number = | ||
3710 | - be64_to_cpu(*(u64 *)vscsi->map_buf); | ||
3711 | - pr_debug("init_msg, part num %d\n", | ||
3712 | - vscsi->client_data.partition_number); | ||
3713 | - } else { | ||
3714 | - pr_debug("init_msg h_vioctl rc %ld\n", rc); | ||
3715 | - rc = ADAPT_SUCCESS; | ||
3716 | - } | ||
3717 | + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), | ||
3718 | + cpu_to_be64(buffer[MSG_LOW])); | ||
3719 | |||
3720 | - if (crq->format == INIT_MSG) { | ||
3721 | - rc = ibmvscsis_handle_init_msg(vscsi); | ||
3722 | - } else if (crq->format == INIT_COMPLETE_MSG) { | ||
3723 | - rc = ibmvscsis_handle_init_compl_msg(vscsi); | ||
3724 | - } else { | ||
3725 | - rc = ERROR; | ||
3726 | - dev_err(&vscsi->dev, "init_msg: invalid format %d\n", | ||
3727 | - (uint)crq->format); | ||
3728 | + switch (rc) { | ||
3729 | + case H_SUCCESS: | ||
3730 | + break; | ||
3731 | + case H_CLOSED: | ||
3732 | + vscsi->flags |= CLIENT_FAILED; | ||
3733 | + case H_DROPPED: | ||
3734 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
3735 | + case H_REMOTE_PARM: | ||
3736 | + dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", | ||
3737 | + rc); | ||
3738 | ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3739 | + break; | ||
3740 | + default: | ||
3741 | + dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", | ||
3742 | + rc); | ||
3743 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); | ||
3744 | + break; | ||
3745 | } | ||
3746 | |||
3747 | return rc; | ||
3748 | @@ -2392,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi, | ||
3749 | break; | ||
3750 | |||
3751 | case VALID_TRANS_EVENT: | ||
3752 | - rc = ibmvscsis_trans_event(vscsi, crq); | ||
3753 | + rc = ibmvscsis_trans_event(vscsi, crq); | ||
3754 | break; | ||
3755 | |||
3756 | case VALID_INIT_MSG: | ||
3757 | @@ -2523,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, | ||
3758 | dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", | ||
3759 | srp->tag); | ||
3760 | goto fail; | ||
3761 | - return; | ||
3762 | } | ||
3763 | |||
3764 | cmd->rsp.sol_not = srp->sol_not; | ||
3765 | @@ -2560,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, | ||
3766 | data_len, attr, dir, 0); | ||
3767 | if (rc) { | ||
3768 | dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); | ||
3769 | + spin_lock_bh(&vscsi->intr_lock); | ||
3770 | + list_del(&cmd->list); | ||
3771 | + ibmvscsis_free_cmd_resources(vscsi, cmd); | ||
3772 | + spin_unlock_bh(&vscsi->intr_lock); | ||
3773 | goto fail; | ||
3774 | } | ||
3775 | return; | ||
3776 | @@ -2639,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi, | ||
3777 | if (rc) { | ||
3778 | dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", | ||
3779 | rc); | ||
3780 | + spin_lock_bh(&vscsi->intr_lock); | ||
3781 | + list_del(&cmd->list); | ||
3782 | + spin_unlock_bh(&vscsi->intr_lock); | ||
3783 | cmd->se_cmd.se_tmr_req->response = | ||
3784 | TMR_FUNCTION_REJECTED; | ||
3785 | } | ||
3786 | @@ -2787,36 +2787,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) | ||
3787 | } | ||
3788 | |||
3789 | /** | ||
3790 | - * ibmvscsis_check_q() - Helper function to Check Init Message Valid | ||
3791 | - * @vscsi: Pointer to our adapter structure | ||
3792 | - * | ||
3793 | - * Checks if a initialize message was queued by the initiatior | ||
3794 | - * while the timing window was open. This function is called from | ||
3795 | - * probe after the CRQ is created and interrupts are enabled. | ||
3796 | - * It would only be used by adapters who wait for some event before | ||
3797 | - * completing the init handshake with the client. For ibmvscsi, this | ||
3798 | - * event is waiting for the port to be enabled. | ||
3799 | - * | ||
3800 | - * EXECUTION ENVIRONMENT: | ||
3801 | - * Process level only, interrupt lock held | ||
3802 | - */ | ||
3803 | -static long ibmvscsis_check_q(struct scsi_info *vscsi) | ||
3804 | -{ | ||
3805 | - uint format; | ||
3806 | - long rc; | ||
3807 | - | ||
3808 | - rc = ibmvscsis_check_init_msg(vscsi, &format); | ||
3809 | - if (rc) | ||
3810 | - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); | ||
3811 | - else if (format == UNUSED_FORMAT) | ||
3812 | - vscsi->state = WAIT_ENABLED; | ||
3813 | - else | ||
3814 | - vscsi->state = PART_UP_WAIT_ENAB; | ||
3815 | - | ||
3816 | - return rc; | ||
3817 | -} | ||
3818 | - | ||
3819 | -/** | ||
3820 | * ibmvscsis_enable_change_state() - Set new state based on enabled status | ||
3821 | * @vscsi: Pointer to our adapter structure | ||
3822 | * | ||
3823 | @@ -2827,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi) | ||
3824 | */ | ||
3825 | static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) | ||
3826 | { | ||
3827 | + int bytes; | ||
3828 | long rc = ADAPT_SUCCESS; | ||
3829 | |||
3830 | -handle_state_change: | ||
3831 | - switch (vscsi->state) { | ||
3832 | - case WAIT_ENABLED: | ||
3833 | - rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); | ||
3834 | - switch (rc) { | ||
3835 | - case H_SUCCESS: | ||
3836 | - case H_DROPPED: | ||
3837 | - case H_CLOSED: | ||
3838 | - vscsi->state = WAIT_CONNECTION; | ||
3839 | - rc = ADAPT_SUCCESS; | ||
3840 | - break; | ||
3841 | - | ||
3842 | - case H_PARAMETER: | ||
3843 | - break; | ||
3844 | - | ||
3845 | - case H_HARDWARE: | ||
3846 | - break; | ||
3847 | - | ||
3848 | - default: | ||
3849 | - vscsi->state = UNDEFINED; | ||
3850 | - rc = H_HARDWARE; | ||
3851 | - break; | ||
3852 | - } | ||
3853 | - break; | ||
3854 | - case PART_UP_WAIT_ENAB: | ||
3855 | - rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); | ||
3856 | - switch (rc) { | ||
3857 | - case H_SUCCESS: | ||
3858 | - vscsi->state = CONNECTED; | ||
3859 | - rc = ADAPT_SUCCESS; | ||
3860 | - break; | ||
3861 | - | ||
3862 | - case H_DROPPED: | ||
3863 | - case H_CLOSED: | ||
3864 | - vscsi->state = WAIT_ENABLED; | ||
3865 | - goto handle_state_change; | ||
3866 | - | ||
3867 | - case H_PARAMETER: | ||
3868 | - break; | ||
3869 | - | ||
3870 | - case H_HARDWARE: | ||
3871 | - break; | ||
3872 | - | ||
3873 | - default: | ||
3874 | - rc = H_HARDWARE; | ||
3875 | - break; | ||
3876 | - } | ||
3877 | - break; | ||
3878 | - | ||
3879 | - case WAIT_CONNECTION: | ||
3880 | - case WAIT_IDLE: | ||
3881 | - case SRP_PROCESSING: | ||
3882 | - case CONNECTED: | ||
3883 | - rc = ADAPT_SUCCESS; | ||
3884 | - break; | ||
3885 | - /* should not be able to get here */ | ||
3886 | - case UNCONFIGURING: | ||
3887 | - rc = ERROR; | ||
3888 | - vscsi->state = UNDEFINED; | ||
3889 | - break; | ||
3890 | + bytes = vscsi->cmd_q.size * PAGE_SIZE; | ||
3891 | + rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); | ||
3892 | + if (rc == H_CLOSED || rc == H_SUCCESS) { | ||
3893 | + vscsi->state = WAIT_CONNECTION; | ||
3894 | + rc = ibmvscsis_establish_new_q(vscsi); | ||
3895 | + } | ||
3896 | |||
3897 | - /* driver should never allow this to happen */ | ||
3898 | - case ERR_DISCONNECT: | ||
3899 | - case ERR_DISCONNECT_RECONNECT: | ||
3900 | - default: | ||
3901 | - dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", | ||
3902 | - vscsi->state); | ||
3903 | - rc = ADAPT_SUCCESS; | ||
3904 | - break; | ||
3905 | + if (rc != ADAPT_SUCCESS) { | ||
3906 | + vscsi->state = ERR_DISCONNECTED; | ||
3907 | + vscsi->flags |= RESPONSE_Q_DOWN; | ||
3908 | } | ||
3909 | |||
3910 | return rc; | ||
3911 | @@ -2917,7 +2829,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) | ||
3912 | */ | ||
3913 | static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) | ||
3914 | { | ||
3915 | - long rc = 0; | ||
3916 | int pages; | ||
3917 | struct vio_dev *vdev = vscsi->dma_dev; | ||
3918 | |||
3919 | @@ -2941,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) | ||
3920 | return -ENOMEM; | ||
3921 | } | ||
3922 | |||
3923 | - rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); | ||
3924 | - if (rc) { | ||
3925 | - if (rc == H_CLOSED) { | ||
3926 | - vscsi->state = WAIT_ENABLED; | ||
3927 | - rc = 0; | ||
3928 | - } else { | ||
3929 | - dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, | ||
3930 | - PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
3931 | - free_page((unsigned long)vscsi->cmd_q.base_addr); | ||
3932 | - rc = -ENODEV; | ||
3933 | - } | ||
3934 | - } else { | ||
3935 | - vscsi->state = WAIT_ENABLED; | ||
3936 | - } | ||
3937 | - | ||
3938 | - return rc; | ||
3939 | + return 0; | ||
3940 | } | ||
3941 | |||
3942 | /** | ||
3943 | @@ -3271,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data) | ||
3944 | /* | ||
3945 | * if we are in a path where we are waiting for all pending commands | ||
3946 | * to complete because we received a transport event and anything in | ||
3947 | - * the command queue is for a new connection, do nothing | ||
3948 | + * the command queue is for a new connection, do nothing | ||
3949 | */ | ||
3950 | if (TARGET_STOP(vscsi)) { | ||
3951 | vio_enable_interrupts(vscsi->dma_dev); | ||
3952 | @@ -3315,7 +3211,7 @@ static void ibmvscsis_handle_crq(unsigned long data) | ||
3953 | * everything but transport events on the queue | ||
3954 | * | ||
3955 | * need to decrement the queue index so we can | ||
3956 | - * look at the elment again | ||
3957 | + * look at the element again | ||
3958 | */ | ||
3959 | if (vscsi->cmd_q.index) | ||
3960 | vscsi->cmd_q.index -= 1; | ||
3961 | @@ -3379,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | ||
3962 | INIT_LIST_HEAD(&vscsi->waiting_rsp); | ||
3963 | INIT_LIST_HEAD(&vscsi->active_q); | ||
3964 | |||
3965 | - snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); | ||
3966 | + snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", | ||
3967 | + dev_name(&vdev->dev)); | ||
3968 | |||
3969 | pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); | ||
3970 | |||
3971 | @@ -3394,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | ||
3972 | strncat(vscsi->eye, vdev->name, MAX_EYE); | ||
3973 | |||
3974 | vscsi->dds.unit_id = vdev->unit_address; | ||
3975 | + strncpy(vscsi->dds.partition_name, partition_name, | ||
3976 | + sizeof(vscsi->dds.partition_name)); | ||
3977 | + vscsi->dds.partition_num = partition_number; | ||
3978 | |||
3979 | spin_lock_bh(&ibmvscsis_dev_lock); | ||
3980 | list_add_tail(&vscsi->list, &ibmvscsis_dev_list); | ||
3981 | @@ -3470,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | ||
3982 | (unsigned long)vscsi); | ||
3983 | |||
3984 | init_completion(&vscsi->wait_idle); | ||
3985 | + init_completion(&vscsi->unconfig); | ||
3986 | |||
3987 | snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); | ||
3988 | vscsi->work_q = create_workqueue(wq_name); | ||
3989 | @@ -3486,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | ||
3990 | goto destroy_WQ; | ||
3991 | } | ||
3992 | |||
3993 | - spin_lock_bh(&vscsi->intr_lock); | ||
3994 | - vio_enable_interrupts(vdev); | ||
3995 | - if (rc) { | ||
3996 | - dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); | ||
3997 | - rc = -ENODEV; | ||
3998 | - spin_unlock_bh(&vscsi->intr_lock); | ||
3999 | - goto free_irq; | ||
4000 | - } | ||
4001 | - | ||
4002 | - if (ibmvscsis_check_q(vscsi)) { | ||
4003 | - rc = ERROR; | ||
4004 | - dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); | ||
4005 | - spin_unlock_bh(&vscsi->intr_lock); | ||
4006 | - goto disable_interrupt; | ||
4007 | - } | ||
4008 | - spin_unlock_bh(&vscsi->intr_lock); | ||
4009 | + vscsi->state = WAIT_ENABLED; | ||
4010 | |||
4011 | dev_set_drvdata(&vdev->dev, vscsi); | ||
4012 | |||
4013 | return 0; | ||
4014 | |||
4015 | -disable_interrupt: | ||
4016 | - vio_disable_interrupts(vdev); | ||
4017 | -free_irq: | ||
4018 | - free_irq(vdev->irq, vscsi); | ||
4019 | destroy_WQ: | ||
4020 | destroy_workqueue(vscsi->work_q); | ||
4021 | unmap_buf: | ||
4022 | @@ -3544,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev) | ||
4023 | |||
4024 | pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); | ||
4025 | |||
4026 | - /* | ||
4027 | - * TBD: Need to handle if there are commands on the waiting_rsp q | ||
4028 | - * Actually, can there still be cmds outstanding to tcm? | ||
4029 | - */ | ||
4030 | + spin_lock_bh(&vscsi->intr_lock); | ||
4031 | + ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); | ||
4032 | + vscsi->flags |= CFG_SLEEPING; | ||
4033 | + spin_unlock_bh(&vscsi->intr_lock); | ||
4034 | + wait_for_completion(&vscsi->unconfig); | ||
4035 | |||
4036 | vio_disable_interrupts(vdev); | ||
4037 | free_irq(vdev->irq, vscsi); | ||
4038 | @@ -3556,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev) | ||
4039 | DMA_BIDIRECTIONAL); | ||
4040 | kfree(vscsi->map_buf); | ||
4041 | tasklet_kill(&vscsi->work_task); | ||
4042 | - ibmvscsis_unregister_command_q(vscsi); | ||
4043 | ibmvscsis_destroy_command_q(vscsi); | ||
4044 | ibmvscsis_freetimer(vscsi); | ||
4045 | ibmvscsis_free_cmds(vscsi); | ||
4046 | @@ -3610,7 +3492,7 @@ static int ibmvscsis_get_system_info(void) | ||
4047 | |||
4048 | num = of_get_property(rootdn, "ibm,partition-no", NULL); | ||
4049 | if (num) | ||
4050 | - partition_number = *num; | ||
4051 | + partition_number = of_read_number(num, 1); | ||
4052 | |||
4053 | of_node_put(rootdn); | ||
4054 | |||
4055 | @@ -3904,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, | ||
4056 | } | ||
4057 | |||
4058 | if (tmp) { | ||
4059 | - tport->enabled = true; | ||
4060 | spin_lock_bh(&vscsi->intr_lock); | ||
4061 | + tport->enabled = true; | ||
4062 | lrc = ibmvscsis_enable_change_state(vscsi); | ||
4063 | if (lrc) | ||
4064 | pr_err("enable_change_state failed, rc %ld state %d\n", | ||
4065 | lrc, vscsi->state); | ||
4066 | spin_unlock_bh(&vscsi->intr_lock); | ||
4067 | } else { | ||
4068 | + spin_lock_bh(&vscsi->intr_lock); | ||
4069 | tport->enabled = false; | ||
4070 | + /* This simulates the server going down */ | ||
4071 | + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); | ||
4072 | + spin_unlock_bh(&vscsi->intr_lock); | ||
4073 | } | ||
4074 | |||
4075 | - pr_debug("tpg_enable_store, state %d\n", vscsi->state); | ||
4076 | + pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); | ||
4077 | |||
4078 | return count; | ||
4079 | } | ||
4080 | @@ -3985,10 +3871,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = { | ||
4081 | ATTRIBUTE_GROUPS(ibmvscsis_dev); | ||
4082 | |||
4083 | static struct class ibmvscsis_class = { | ||
4084 | - .name = "ibmvscsis", | ||
4085 | - .dev_release = ibmvscsis_dev_release, | ||
4086 | - .class_attrs = ibmvscsis_class_attrs, | ||
4087 | - .dev_groups = ibmvscsis_dev_groups, | ||
4088 | + .name = "ibmvscsis", | ||
4089 | + .dev_release = ibmvscsis_dev_release, | ||
4090 | + .class_attrs = ibmvscsis_class_attrs, | ||
4091 | + .dev_groups = ibmvscsis_dev_groups, | ||
4092 | }; | ||
4093 | |||
4094 | static struct vio_device_id ibmvscsis_device_table[] = { | ||
4095 | diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | ||
4096 | index 981a0c992b6c..98b0ca79a5c5 100644 | ||
4097 | --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | ||
4098 | +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | ||
4099 | @@ -204,8 +204,6 @@ struct scsi_info { | ||
4100 | struct list_head waiting_rsp; | ||
4101 | #define NO_QUEUE 0x00 | ||
4102 | #define WAIT_ENABLED 0X01 | ||
4103 | - /* driver has received an initialize command */ | ||
4104 | -#define PART_UP_WAIT_ENAB 0x02 | ||
4105 | #define WAIT_CONNECTION 0x04 | ||
4106 | /* have established a connection */ | ||
4107 | #define CONNECTED 0x08 | ||
4108 | @@ -259,6 +257,8 @@ struct scsi_info { | ||
4109 | #define SCHEDULE_DISCONNECT 0x00400 | ||
4110 | /* disconnect handler is scheduled */ | ||
4111 | #define DISCONNECT_SCHEDULED 0x00800 | ||
4112 | + /* remove function is sleeping */ | ||
4113 | +#define CFG_SLEEPING 0x01000 | ||
4114 | u32 flags; | ||
4115 | /* adapter lock */ | ||
4116 | spinlock_t intr_lock; | ||
4117 | @@ -287,6 +287,7 @@ struct scsi_info { | ||
4118 | |||
4119 | struct workqueue_struct *work_q; | ||
4120 | struct completion wait_idle; | ||
4121 | + struct completion unconfig; | ||
4122 | struct device dev; | ||
4123 | struct vio_dev *dma_dev; | ||
4124 | struct srp_target target; | ||
4125 | diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c | ||
4126 | index 4d09bd495a88..6e3e63675e56 100644 | ||
4127 | --- a/drivers/tty/serial/8250/8250_pci.c | ||
4128 | +++ b/drivers/tty/serial/8250/8250_pci.c | ||
4129 | @@ -52,6 +52,7 @@ struct serial_private { | ||
4130 | struct pci_dev *dev; | ||
4131 | unsigned int nr; | ||
4132 | struct pci_serial_quirk *quirk; | ||
4133 | + const struct pciserial_board *board; | ||
4134 | int line[0]; | ||
4135 | }; | ||
4136 | |||
4137 | @@ -3871,6 +3872,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) | ||
4138 | } | ||
4139 | } | ||
4140 | priv->nr = i; | ||
4141 | + priv->board = board; | ||
4142 | return priv; | ||
4143 | |||
4144 | err_deinit: | ||
4145 | @@ -3881,7 +3883,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) | ||
4146 | } | ||
4147 | EXPORT_SYMBOL_GPL(pciserial_init_ports); | ||
4148 | |||
4149 | -void pciserial_remove_ports(struct serial_private *priv) | ||
4150 | +void pciserial_detach_ports(struct serial_private *priv) | ||
4151 | { | ||
4152 | struct pci_serial_quirk *quirk; | ||
4153 | int i; | ||
4154 | @@ -3895,7 +3897,11 @@ void pciserial_remove_ports(struct serial_private *priv) | ||
4155 | quirk = find_quirk(priv->dev); | ||
4156 | if (quirk->exit) | ||
4157 | quirk->exit(priv->dev); | ||
4158 | +} | ||
4159 | |||
4160 | +void pciserial_remove_ports(struct serial_private *priv) | ||
4161 | +{ | ||
4162 | + pciserial_detach_ports(priv); | ||
4163 | kfree(priv); | ||
4164 | } | ||
4165 | EXPORT_SYMBOL_GPL(pciserial_remove_ports); | ||
4166 | @@ -5590,7 +5596,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, | ||
4167 | return PCI_ERS_RESULT_DISCONNECT; | ||
4168 | |||
4169 | if (priv) | ||
4170 | - pciserial_suspend_ports(priv); | ||
4171 | + pciserial_detach_ports(priv); | ||
4172 | |||
4173 | pci_disable_device(dev); | ||
4174 | |||
4175 | @@ -5615,9 +5621,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) | ||
4176 | static void serial8250_io_resume(struct pci_dev *dev) | ||
4177 | { | ||
4178 | struct serial_private *priv = pci_get_drvdata(dev); | ||
4179 | + const struct pciserial_board *board; | ||
4180 | |||
4181 | - if (priv) | ||
4182 | - pciserial_resume_ports(priv); | ||
4183 | + if (!priv) | ||
4184 | + return; | ||
4185 | + | ||
4186 | + board = priv->board; | ||
4187 | + kfree(priv); | ||
4188 | + priv = pciserial_init_ports(dev, board); | ||
4189 | + | ||
4190 | + if (!IS_ERR(priv)) { | ||
4191 | + pci_set_drvdata(dev, priv); | ||
4192 | + } | ||
4193 | } | ||
4194 | |||
4195 | static const struct pci_error_handlers serial8250_err_handler = { | ||
4196 | diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c | ||
4197 | index 45bc997d0711..a95b3e75f750 100644 | ||
4198 | --- a/drivers/usb/gadget/udc/atmel_usba_udc.c | ||
4199 | +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | ||
4200 | @@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, | ||
4201 | dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); | ||
4202 | goto err; | ||
4203 | } | ||
4204 | - ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); | ||
4205 | + sprintf(ep->name, "ep%d", ep->index); | ||
4206 | + ep->ep.name = ep->name; | ||
4207 | |||
4208 | ep->ep_regs = udc->regs + USBA_EPT_BASE(i); | ||
4209 | ep->dma_regs = udc->regs + USBA_DMA_BASE(i); | ||
4210 | diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h | ||
4211 | index 3e1c9d589dfa..b03b2ebfc53a 100644 | ||
4212 | --- a/drivers/usb/gadget/udc/atmel_usba_udc.h | ||
4213 | +++ b/drivers/usb/gadget/udc/atmel_usba_udc.h | ||
4214 | @@ -280,6 +280,7 @@ struct usba_ep { | ||
4215 | void __iomem *ep_regs; | ||
4216 | void __iomem *dma_regs; | ||
4217 | void __iomem *fifo; | ||
4218 | + char name[8]; | ||
4219 | struct usb_ep ep; | ||
4220 | struct usba_udc *udc; | ||
4221 | |||
4222 | diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c | ||
4223 | index 80378ddadc5c..c8823578a1b2 100644 | ||
4224 | --- a/drivers/vfio/vfio_iommu_spapr_tce.c | ||
4225 | +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | ||
4226 | @@ -31,49 +31,49 @@ | ||
4227 | static void tce_iommu_detach_group(void *iommu_data, | ||
4228 | struct iommu_group *iommu_group); | ||
4229 | |||
4230 | -static long try_increment_locked_vm(long npages) | ||
4231 | +static long try_increment_locked_vm(struct mm_struct *mm, long npages) | ||
4232 | { | ||
4233 | long ret = 0, locked, lock_limit; | ||
4234 | |||
4235 | - if (!current || !current->mm) | ||
4236 | - return -ESRCH; /* process exited */ | ||
4237 | + if (WARN_ON_ONCE(!mm)) | ||
4238 | + return -EPERM; | ||
4239 | |||
4240 | if (!npages) | ||
4241 | return 0; | ||
4242 | |||
4243 | - down_write(¤t->mm->mmap_sem); | ||
4244 | - locked = current->mm->locked_vm + npages; | ||
4245 | + down_write(&mm->mmap_sem); | ||
4246 | + locked = mm->locked_vm + npages; | ||
4247 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
4248 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) | ||
4249 | ret = -ENOMEM; | ||
4250 | else | ||
4251 | - current->mm->locked_vm += npages; | ||
4252 | + mm->locked_vm += npages; | ||
4253 | |||
4254 | pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, | ||
4255 | npages << PAGE_SHIFT, | ||
4256 | - current->mm->locked_vm << PAGE_SHIFT, | ||
4257 | + mm->locked_vm << PAGE_SHIFT, | ||
4258 | rlimit(RLIMIT_MEMLOCK), | ||
4259 | ret ? " - exceeded" : ""); | ||
4260 | |||
4261 | - up_write(¤t->mm->mmap_sem); | ||
4262 | + up_write(&mm->mmap_sem); | ||
4263 | |||
4264 | return ret; | ||
4265 | } | ||
4266 | |||
4267 | -static void decrement_locked_vm(long npages) | ||
4268 | +static void decrement_locked_vm(struct mm_struct *mm, long npages) | ||
4269 | { | ||
4270 | - if (!current || !current->mm || !npages) | ||
4271 | - return; /* process exited */ | ||
4272 | + if (!mm || !npages) | ||
4273 | + return; | ||
4274 | |||
4275 | - down_write(¤t->mm->mmap_sem); | ||
4276 | - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) | ||
4277 | - npages = current->mm->locked_vm; | ||
4278 | - current->mm->locked_vm -= npages; | ||
4279 | + down_write(&mm->mmap_sem); | ||
4280 | + if (WARN_ON_ONCE(npages > mm->locked_vm)) | ||
4281 | + npages = mm->locked_vm; | ||
4282 | + mm->locked_vm -= npages; | ||
4283 | pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, | ||
4284 | npages << PAGE_SHIFT, | ||
4285 | - current->mm->locked_vm << PAGE_SHIFT, | ||
4286 | + mm->locked_vm << PAGE_SHIFT, | ||
4287 | rlimit(RLIMIT_MEMLOCK)); | ||
4288 | - up_write(¤t->mm->mmap_sem); | ||
4289 | + up_write(&mm->mmap_sem); | ||
4290 | } | ||
4291 | |||
4292 | /* | ||
4293 | @@ -89,6 +89,15 @@ struct tce_iommu_group { | ||
4294 | }; | ||
4295 | |||
4296 | /* | ||
4297 | + * A container needs to remember which preregistered region it has | ||
4298 | + * referenced to do proper cleanup at the userspace process exit. | ||
4299 | + */ | ||
4300 | +struct tce_iommu_prereg { | ||
4301 | + struct list_head next; | ||
4302 | + struct mm_iommu_table_group_mem_t *mem; | ||
4303 | +}; | ||
4304 | + | ||
4305 | +/* | ||
4306 | * The container descriptor supports only a single group per container. | ||
4307 | * Required by the API as the container is not supplied with the IOMMU group | ||
4308 | * at the moment of initialization. | ||
4309 | @@ -97,24 +106,68 @@ struct tce_container { | ||
4310 | struct mutex lock; | ||
4311 | bool enabled; | ||
4312 | bool v2; | ||
4313 | + bool def_window_pending; | ||
4314 | unsigned long locked_pages; | ||
4315 | + struct mm_struct *mm; | ||
4316 | struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; | ||
4317 | struct list_head group_list; | ||
4318 | + struct list_head prereg_list; | ||
4319 | }; | ||
4320 | |||
4321 | +static long tce_iommu_mm_set(struct tce_container *container) | ||
4322 | +{ | ||
4323 | + if (container->mm) { | ||
4324 | + if (container->mm == current->mm) | ||
4325 | + return 0; | ||
4326 | + return -EPERM; | ||
4327 | + } | ||
4328 | + BUG_ON(!current->mm); | ||
4329 | + container->mm = current->mm; | ||
4330 | + atomic_inc(&container->mm->mm_count); | ||
4331 | + | ||
4332 | + return 0; | ||
4333 | +} | ||
4334 | + | ||
4335 | +static long tce_iommu_prereg_free(struct tce_container *container, | ||
4336 | + struct tce_iommu_prereg *tcemem) | ||
4337 | +{ | ||
4338 | + long ret; | ||
4339 | + | ||
4340 | + ret = mm_iommu_put(container->mm, tcemem->mem); | ||
4341 | + if (ret) | ||
4342 | + return ret; | ||
4343 | + | ||
4344 | + list_del(&tcemem->next); | ||
4345 | + kfree(tcemem); | ||
4346 | + | ||
4347 | + return 0; | ||
4348 | +} | ||
4349 | + | ||
4350 | static long tce_iommu_unregister_pages(struct tce_container *container, | ||
4351 | __u64 vaddr, __u64 size) | ||
4352 | { | ||
4353 | struct mm_iommu_table_group_mem_t *mem; | ||
4354 | + struct tce_iommu_prereg *tcemem; | ||
4355 | + bool found = false; | ||
4356 | |||
4357 | if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) | ||
4358 | return -EINVAL; | ||
4359 | |||
4360 | - mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); | ||
4361 | + mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); | ||
4362 | if (!mem) | ||
4363 | return -ENOENT; | ||
4364 | |||
4365 | - return mm_iommu_put(mem); | ||
4366 | + list_for_each_entry(tcemem, &container->prereg_list, next) { | ||
4367 | + if (tcemem->mem == mem) { | ||
4368 | + found = true; | ||
4369 | + break; | ||
4370 | + } | ||
4371 | + } | ||
4372 | + | ||
4373 | + if (!found) | ||
4374 | + return -ENOENT; | ||
4375 | + | ||
4376 | + return tce_iommu_prereg_free(container, tcemem); | ||
4377 | } | ||
4378 | |||
4379 | static long tce_iommu_register_pages(struct tce_container *container, | ||
4380 | @@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container, | ||
4381 | { | ||
4382 | long ret = 0; | ||
4383 | struct mm_iommu_table_group_mem_t *mem = NULL; | ||
4384 | + struct tce_iommu_prereg *tcemem; | ||
4385 | unsigned long entries = size >> PAGE_SHIFT; | ||
4386 | |||
4387 | if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || | ||
4388 | ((vaddr + size) < vaddr)) | ||
4389 | return -EINVAL; | ||
4390 | |||
4391 | - ret = mm_iommu_get(vaddr, entries, &mem); | ||
4392 | + mem = mm_iommu_find(container->mm, vaddr, entries); | ||
4393 | + if (mem) { | ||
4394 | + list_for_each_entry(tcemem, &container->prereg_list, next) { | ||
4395 | + if (tcemem->mem == mem) | ||
4396 | + return -EBUSY; | ||
4397 | + } | ||
4398 | + } | ||
4399 | + | ||
4400 | + ret = mm_iommu_get(container->mm, vaddr, entries, &mem); | ||
4401 | if (ret) | ||
4402 | return ret; | ||
4403 | |||
4404 | + tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); | ||
4405 | + tcemem->mem = mem; | ||
4406 | + list_add(&tcemem->next, &container->prereg_list); | ||
4407 | + | ||
4408 | container->enabled = true; | ||
4409 | |||
4410 | return 0; | ||
4411 | } | ||
4412 | |||
4413 | -static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) | ||
4414 | +static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, | ||
4415 | + struct mm_struct *mm) | ||
4416 | { | ||
4417 | unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * | ||
4418 | tbl->it_size, PAGE_SIZE); | ||
4419 | @@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) | ||
4420 | |||
4421 | BUG_ON(tbl->it_userspace); | ||
4422 | |||
4423 | - ret = try_increment_locked_vm(cb >> PAGE_SHIFT); | ||
4424 | + ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); | ||
4425 | if (ret) | ||
4426 | return ret; | ||
4427 | |||
4428 | uas = vzalloc(cb); | ||
4429 | if (!uas) { | ||
4430 | - decrement_locked_vm(cb >> PAGE_SHIFT); | ||
4431 | + decrement_locked_vm(mm, cb >> PAGE_SHIFT); | ||
4432 | return -ENOMEM; | ||
4433 | } | ||
4434 | tbl->it_userspace = uas; | ||
4435 | @@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) | ||
4436 | return 0; | ||
4437 | } | ||
4438 | |||
4439 | -static void tce_iommu_userspace_view_free(struct iommu_table *tbl) | ||
4440 | +static void tce_iommu_userspace_view_free(struct iommu_table *tbl, | ||
4441 | + struct mm_struct *mm) | ||
4442 | { | ||
4443 | unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * | ||
4444 | tbl->it_size, PAGE_SIZE); | ||
4445 | @@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) | ||
4446 | |||
4447 | vfree(tbl->it_userspace); | ||
4448 | tbl->it_userspace = NULL; | ||
4449 | - decrement_locked_vm(cb >> PAGE_SHIFT); | ||
4450 | + decrement_locked_vm(mm, cb >> PAGE_SHIFT); | ||
4451 | } | ||
4452 | |||
4453 | static bool tce_page_is_contained(struct page *page, unsigned page_shift) | ||
4454 | @@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container) | ||
4455 | struct iommu_table_group *table_group; | ||
4456 | struct tce_iommu_group *tcegrp; | ||
4457 | |||
4458 | - if (!current->mm) | ||
4459 | - return -ESRCH; /* process exited */ | ||
4460 | - | ||
4461 | if (container->enabled) | ||
4462 | return -EBUSY; | ||
4463 | |||
4464 | @@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container) | ||
4465 | if (!table_group->tce32_size) | ||
4466 | return -EPERM; | ||
4467 | |||
4468 | + ret = tce_iommu_mm_set(container); | ||
4469 | + if (ret) | ||
4470 | + return ret; | ||
4471 | + | ||
4472 | locked = table_group->tce32_size >> PAGE_SHIFT; | ||
4473 | - ret = try_increment_locked_vm(locked); | ||
4474 | + ret = try_increment_locked_vm(container->mm, locked); | ||
4475 | if (ret) | ||
4476 | return ret; | ||
4477 | |||
4478 | @@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container) | ||
4479 | |||
4480 | container->enabled = false; | ||
4481 | |||
4482 | - if (!current->mm) | ||
4483 | - return; | ||
4484 | - | ||
4485 | - decrement_locked_vm(container->locked_pages); | ||
4486 | + BUG_ON(!container->mm); | ||
4487 | + decrement_locked_vm(container->mm, container->locked_pages); | ||
4488 | } | ||
4489 | |||
4490 | static void *tce_iommu_open(unsigned long arg) | ||
4491 | @@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg) | ||
4492 | |||
4493 | mutex_init(&container->lock); | ||
4494 | INIT_LIST_HEAD_RCU(&container->group_list); | ||
4495 | + INIT_LIST_HEAD_RCU(&container->prereg_list); | ||
4496 | |||
4497 | container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; | ||
4498 | |||
4499 | @@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg) | ||
4500 | static int tce_iommu_clear(struct tce_container *container, | ||
4501 | struct iommu_table *tbl, | ||
4502 | unsigned long entry, unsigned long pages); | ||
4503 | -static void tce_iommu_free_table(struct iommu_table *tbl); | ||
4504 | +static void tce_iommu_free_table(struct tce_container *container, | ||
4505 | + struct iommu_table *tbl); | ||
4506 | |||
4507 | static void tce_iommu_release(void *iommu_data) | ||
4508 | { | ||
4509 | @@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data) | ||
4510 | continue; | ||
4511 | |||
4512 | tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); | ||
4513 | - tce_iommu_free_table(tbl); | ||
4514 | + tce_iommu_free_table(container, tbl); | ||
4515 | + } | ||
4516 | + | ||
4517 | + while (!list_empty(&container->prereg_list)) { | ||
4518 | + struct tce_iommu_prereg *tcemem; | ||
4519 | + | ||
4520 | + tcemem = list_first_entry(&container->prereg_list, | ||
4521 | + struct tce_iommu_prereg, next); | ||
4522 | + WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); | ||
4523 | } | ||
4524 | |||
4525 | tce_iommu_disable(container); | ||
4526 | + if (container->mm) | ||
4527 | + mmdrop(container->mm); | ||
4528 | mutex_destroy(&container->lock); | ||
4529 | |||
4530 | kfree(container); | ||
4531 | @@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container, | ||
4532 | put_page(page); | ||
4533 | } | ||
4534 | |||
4535 | -static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, | ||
4536 | +static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, | ||
4537 | + unsigned long tce, unsigned long size, | ||
4538 | unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) | ||
4539 | { | ||
4540 | long ret = 0; | ||
4541 | struct mm_iommu_table_group_mem_t *mem; | ||
4542 | |||
4543 | - mem = mm_iommu_lookup(tce, size); | ||
4544 | + mem = mm_iommu_lookup(container->mm, tce, size); | ||
4545 | if (!mem) | ||
4546 | return -EINVAL; | ||
4547 | |||
4548 | @@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, | ||
4549 | return 0; | ||
4550 | } | ||
4551 | |||
4552 | -static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, | ||
4553 | - unsigned long entry) | ||
4554 | +static void tce_iommu_unuse_page_v2(struct tce_container *container, | ||
4555 | + struct iommu_table *tbl, unsigned long entry) | ||
4556 | { | ||
4557 | struct mm_iommu_table_group_mem_t *mem = NULL; | ||
4558 | int ret; | ||
4559 | unsigned long hpa = 0; | ||
4560 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | ||
4561 | |||
4562 | - if (!pua || !current || !current->mm) | ||
4563 | + if (!pua) | ||
4564 | return; | ||
4565 | |||
4566 | - ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), | ||
4567 | + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), | ||
4568 | &hpa, &mem); | ||
4569 | if (ret) | ||
4570 | pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", | ||
4571 | @@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container, | ||
4572 | continue; | ||
4573 | |||
4574 | if (container->v2) { | ||
4575 | - tce_iommu_unuse_page_v2(tbl, entry); | ||
4576 | + tce_iommu_unuse_page_v2(container, tbl, entry); | ||
4577 | continue; | ||
4578 | } | ||
4579 | |||
4580 | @@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container, | ||
4581 | unsigned long hpa; | ||
4582 | enum dma_data_direction dirtmp; | ||
4583 | |||
4584 | + if (!tbl->it_userspace) { | ||
4585 | + ret = tce_iommu_userspace_view_alloc(tbl, container->mm); | ||
4586 | + if (ret) | ||
4587 | + return ret; | ||
4588 | + } | ||
4589 | + | ||
4590 | for (i = 0; i < pages; ++i) { | ||
4591 | struct mm_iommu_table_group_mem_t *mem = NULL; | ||
4592 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, | ||
4593 | entry + i); | ||
4594 | |||
4595 | - ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), | ||
4596 | - &hpa, &mem); | ||
4597 | + ret = tce_iommu_prereg_ua_to_hpa(container, | ||
4598 | + tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); | ||
4599 | if (ret) | ||
4600 | break; | ||
4601 | |||
4602 | @@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container, | ||
4603 | ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); | ||
4604 | if (ret) { | ||
4605 | /* dirtmp cannot be DMA_NONE here */ | ||
4606 | - tce_iommu_unuse_page_v2(tbl, entry + i); | ||
4607 | + tce_iommu_unuse_page_v2(container, tbl, entry + i); | ||
4608 | pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", | ||
4609 | __func__, entry << tbl->it_page_shift, | ||
4610 | tce, ret); | ||
4611 | @@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container, | ||
4612 | } | ||
4613 | |||
4614 | if (dirtmp != DMA_NONE) | ||
4615 | - tce_iommu_unuse_page_v2(tbl, entry + i); | ||
4616 | + tce_iommu_unuse_page_v2(container, tbl, entry + i); | ||
4617 | |||
4618 | *pua = tce; | ||
4619 | |||
4620 | @@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container, | ||
4621 | if (!table_size) | ||
4622 | return -EINVAL; | ||
4623 | |||
4624 | - ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); | ||
4625 | + ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); | ||
4626 | if (ret) | ||
4627 | return ret; | ||
4628 | |||
4629 | @@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container, | ||
4630 | WARN_ON(!ret && !(*ptbl)->it_ops->free); | ||
4631 | WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); | ||
4632 | |||
4633 | - if (!ret && container->v2) { | ||
4634 | - ret = tce_iommu_userspace_view_alloc(*ptbl); | ||
4635 | - if (ret) | ||
4636 | - (*ptbl)->it_ops->free(*ptbl); | ||
4637 | - } | ||
4638 | - | ||
4639 | - if (ret) | ||
4640 | - decrement_locked_vm(table_size >> PAGE_SHIFT); | ||
4641 | - | ||
4642 | return ret; | ||
4643 | } | ||
4644 | |||
4645 | -static void tce_iommu_free_table(struct iommu_table *tbl) | ||
4646 | +static void tce_iommu_free_table(struct tce_container *container, | ||
4647 | + struct iommu_table *tbl) | ||
4648 | { | ||
4649 | unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; | ||
4650 | |||
4651 | - tce_iommu_userspace_view_free(tbl); | ||
4652 | + tce_iommu_userspace_view_free(tbl, container->mm); | ||
4653 | tbl->it_ops->free(tbl); | ||
4654 | - decrement_locked_vm(pages); | ||
4655 | + decrement_locked_vm(container->mm, pages); | ||
4656 | } | ||
4657 | |||
4658 | static long tce_iommu_create_window(struct tce_container *container, | ||
4659 | @@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container, | ||
4660 | table_group = iommu_group_get_iommudata(tcegrp->grp); | ||
4661 | table_group->ops->unset_window(table_group, num); | ||
4662 | } | ||
4663 | - tce_iommu_free_table(tbl); | ||
4664 | + tce_iommu_free_table(container, tbl); | ||
4665 | |||
4666 | return ret; | ||
4667 | } | ||
4668 | @@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container, | ||
4669 | |||
4670 | /* Free table */ | ||
4671 | tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); | ||
4672 | - tce_iommu_free_table(tbl); | ||
4673 | + tce_iommu_free_table(container, tbl); | ||
4674 | container->tables[num] = NULL; | ||
4675 | |||
4676 | return 0; | ||
4677 | } | ||
4678 | |||
4679 | +static long tce_iommu_create_default_window(struct tce_container *container) | ||
4680 | +{ | ||
4681 | + long ret; | ||
4682 | + __u64 start_addr = 0; | ||
4683 | + struct tce_iommu_group *tcegrp; | ||
4684 | + struct iommu_table_group *table_group; | ||
4685 | + | ||
4686 | + if (!container->def_window_pending) | ||
4687 | + return 0; | ||
4688 | + | ||
4689 | + if (!tce_groups_attached(container)) | ||
4690 | + return -ENODEV; | ||
4691 | + | ||
4692 | + tcegrp = list_first_entry(&container->group_list, | ||
4693 | + struct tce_iommu_group, next); | ||
4694 | + table_group = iommu_group_get_iommudata(tcegrp->grp); | ||
4695 | + if (!table_group) | ||
4696 | + return -ENODEV; | ||
4697 | + | ||
4698 | + ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, | ||
4699 | + table_group->tce32_size, 1, &start_addr); | ||
4700 | + WARN_ON_ONCE(!ret && start_addr); | ||
4701 | + | ||
4702 | + if (!ret) | ||
4703 | + container->def_window_pending = false; | ||
4704 | + | ||
4705 | + return ret; | ||
4706 | +} | ||
4707 | + | ||
4708 | static long tce_iommu_ioctl(void *iommu_data, | ||
4709 | unsigned int cmd, unsigned long arg) | ||
4710 | { | ||
4711 | @@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4712 | } | ||
4713 | |||
4714 | return (ret < 0) ? 0 : ret; | ||
4715 | + } | ||
4716 | + | ||
4717 | + /* | ||
4718 | + * Sanity check to prevent one userspace from manipulating | ||
4719 | + * another userspace mm. | ||
4720 | + */ | ||
4721 | + BUG_ON(!container); | ||
4722 | + if (container->mm && container->mm != current->mm) | ||
4723 | + return -EPERM; | ||
4724 | |||
4725 | + switch (cmd) { | ||
4726 | case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { | ||
4727 | struct vfio_iommu_spapr_tce_info info; | ||
4728 | struct tce_iommu_group *tcegrp; | ||
4729 | @@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4730 | VFIO_DMA_MAP_FLAG_WRITE)) | ||
4731 | return -EINVAL; | ||
4732 | |||
4733 | + ret = tce_iommu_create_default_window(container); | ||
4734 | + if (ret) | ||
4735 | + return ret; | ||
4736 | + | ||
4737 | num = tce_iommu_find_table(container, param.iova, &tbl); | ||
4738 | if (num < 0) | ||
4739 | return -ENXIO; | ||
4740 | @@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4741 | if (param.flags) | ||
4742 | return -EINVAL; | ||
4743 | |||
4744 | + ret = tce_iommu_create_default_window(container); | ||
4745 | + if (ret) | ||
4746 | + return ret; | ||
4747 | + | ||
4748 | num = tce_iommu_find_table(container, param.iova, &tbl); | ||
4749 | if (num < 0) | ||
4750 | return -ENXIO; | ||
4751 | @@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4752 | minsz = offsetofend(struct vfio_iommu_spapr_register_memory, | ||
4753 | size); | ||
4754 | |||
4755 | + ret = tce_iommu_mm_set(container); | ||
4756 | + if (ret) | ||
4757 | + return ret; | ||
4758 | + | ||
4759 | if (copy_from_user(¶m, (void __user *)arg, minsz)) | ||
4760 | return -EFAULT; | ||
4761 | |||
4762 | @@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4763 | if (!container->v2) | ||
4764 | break; | ||
4765 | |||
4766 | + if (!container->mm) | ||
4767 | + return -EPERM; | ||
4768 | + | ||
4769 | minsz = offsetofend(struct vfio_iommu_spapr_register_memory, | ||
4770 | size); | ||
4771 | |||
4772 | @@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4773 | if (!container->v2) | ||
4774 | break; | ||
4775 | |||
4776 | + ret = tce_iommu_mm_set(container); | ||
4777 | + if (ret) | ||
4778 | + return ret; | ||
4779 | + | ||
4780 | if (!tce_groups_attached(container)) | ||
4781 | return -ENXIO; | ||
4782 | |||
4783 | @@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4784 | |||
4785 | mutex_lock(&container->lock); | ||
4786 | |||
4787 | + ret = tce_iommu_create_default_window(container); | ||
4788 | + if (ret) | ||
4789 | + return ret; | ||
4790 | + | ||
4791 | ret = tce_iommu_create_window(container, create.page_shift, | ||
4792 | create.window_size, create.levels, | ||
4793 | &create.start_addr); | ||
4794 | @@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4795 | if (!container->v2) | ||
4796 | break; | ||
4797 | |||
4798 | + ret = tce_iommu_mm_set(container); | ||
4799 | + if (ret) | ||
4800 | + return ret; | ||
4801 | + | ||
4802 | if (!tce_groups_attached(container)) | ||
4803 | return -ENXIO; | ||
4804 | |||
4805 | @@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data, | ||
4806 | if (remove.flags) | ||
4807 | return -EINVAL; | ||
4808 | |||
4809 | + if (container->def_window_pending && !remove.start_addr) { | ||
4810 | + container->def_window_pending = false; | ||
4811 | + return 0; | ||
4812 | + } | ||
4813 | + | ||
4814 | mutex_lock(&container->lock); | ||
4815 | |||
4816 | ret = tce_iommu_remove_window(container, remove.start_addr); | ||
4817 | @@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container, | ||
4818 | continue; | ||
4819 | |||
4820 | tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); | ||
4821 | - tce_iommu_userspace_view_free(tbl); | ||
4822 | + tce_iommu_userspace_view_free(tbl, container->mm); | ||
4823 | if (tbl->it_map) | ||
4824 | iommu_release_ownership(tbl); | ||
4825 | |||
4826 | @@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, | ||
4827 | if (!tbl || !tbl->it_map) | ||
4828 | continue; | ||
4829 | |||
4830 | - rc = tce_iommu_userspace_view_alloc(tbl); | ||
4831 | - if (!rc) | ||
4832 | - rc = iommu_take_ownership(tbl); | ||
4833 | - | ||
4834 | + rc = iommu_take_ownership(tbl); | ||
4835 | if (rc) { | ||
4836 | for (j = 0; j < i; ++j) | ||
4837 | iommu_release_ownership( | ||
4838 | @@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, | ||
4839 | static long tce_iommu_take_ownership_ddw(struct tce_container *container, | ||
4840 | struct iommu_table_group *table_group) | ||
4841 | { | ||
4842 | - long i, ret = 0; | ||
4843 | - struct iommu_table *tbl = NULL; | ||
4844 | - | ||
4845 | if (!table_group->ops->create_table || !table_group->ops->set_window || | ||
4846 | !table_group->ops->release_ownership) { | ||
4847 | WARN_ON_ONCE(1); | ||
4848 | @@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, | ||
4849 | |||
4850 | table_group->ops->take_ownership(table_group); | ||
4851 | |||
4852 | - /* | ||
4853 | - * If it the first group attached, check if there is | ||
4854 | - * a default DMA window and create one if none as | ||
4855 | - * the userspace expects it to exist. | ||
4856 | - */ | ||
4857 | - if (!tce_groups_attached(container) && !container->tables[0]) { | ||
4858 | - ret = tce_iommu_create_table(container, | ||
4859 | - table_group, | ||
4860 | - 0, /* window number */ | ||
4861 | - IOMMU_PAGE_SHIFT_4K, | ||
4862 | - table_group->tce32_size, | ||
4863 | - 1, /* default levels */ | ||
4864 | - &tbl); | ||
4865 | - if (ret) | ||
4866 | - goto release_exit; | ||
4867 | - else | ||
4868 | - container->tables[0] = tbl; | ||
4869 | - } | ||
4870 | - | ||
4871 | - /* Set all windows to the new group */ | ||
4872 | - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { | ||
4873 | - tbl = container->tables[i]; | ||
4874 | - | ||
4875 | - if (!tbl) | ||
4876 | - continue; | ||
4877 | - | ||
4878 | - /* Set the default window to a new group */ | ||
4879 | - ret = table_group->ops->set_window(table_group, i, tbl); | ||
4880 | - if (ret) | ||
4881 | - goto release_exit; | ||
4882 | - } | ||
4883 | - | ||
4884 | return 0; | ||
4885 | - | ||
4886 | -release_exit: | ||
4887 | - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) | ||
4888 | - table_group->ops->unset_window(table_group, i); | ||
4889 | - | ||
4890 | - table_group->ops->release_ownership(table_group); | ||
4891 | - | ||
4892 | - return ret; | ||
4893 | } | ||
4894 | |||
4895 | static int tce_iommu_attach_group(void *iommu_data, | ||
4896 | @@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data, | ||
4897 | } | ||
4898 | |||
4899 | if (!table_group->ops || !table_group->ops->take_ownership || | ||
4900 | - !table_group->ops->release_ownership) | ||
4901 | + !table_group->ops->release_ownership) { | ||
4902 | ret = tce_iommu_take_ownership(container, table_group); | ||
4903 | - else | ||
4904 | + } else { | ||
4905 | ret = tce_iommu_take_ownership_ddw(container, table_group); | ||
4906 | + if (!tce_groups_attached(container) && !container->tables[0]) | ||
4907 | + container->def_window_pending = true; | ||
4908 | + } | ||
4909 | |||
4910 | if (!ret) { | ||
4911 | tcegrp->grp = iommu_group; | ||
4912 | diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h | ||
4913 | index 6aaf425cebc3..a13b031dc6b8 100644 | ||
4914 | --- a/include/linux/bpf_verifier.h | ||
4915 | +++ b/include/linux/bpf_verifier.h | ||
4916 | @@ -18,19 +18,12 @@ | ||
4917 | |||
4918 | struct bpf_reg_state { | ||
4919 | enum bpf_reg_type type; | ||
4920 | - /* | ||
4921 | - * Used to determine if any memory access using this register will | ||
4922 | - * result in a bad access. | ||
4923 | - */ | ||
4924 | - s64 min_value; | ||
4925 | - u64 max_value; | ||
4926 | union { | ||
4927 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | ||
4928 | s64 imm; | ||
4929 | |||
4930 | /* valid when type == PTR_TO_PACKET* */ | ||
4931 | struct { | ||
4932 | - u32 id; | ||
4933 | u16 off; | ||
4934 | u16 range; | ||
4935 | }; | ||
4936 | @@ -40,6 +33,13 @@ struct bpf_reg_state { | ||
4937 | */ | ||
4938 | struct bpf_map *map_ptr; | ||
4939 | }; | ||
4940 | + u32 id; | ||
4941 | + /* Used to determine if any memory access using this register will | ||
4942 | + * result in a bad access. These two fields must be last. | ||
4943 | + * See states_equal() | ||
4944 | + */ | ||
4945 | + s64 min_value; | ||
4946 | + u64 max_value; | ||
4947 | }; | ||
4948 | |||
4949 | enum bpf_stack_slot_type { | ||
4950 | diff --git a/include/linux/dccp.h b/include/linux/dccp.h | ||
4951 | index 61d042bbbf60..68449293c4b6 100644 | ||
4952 | --- a/include/linux/dccp.h | ||
4953 | +++ b/include/linux/dccp.h | ||
4954 | @@ -163,6 +163,7 @@ struct dccp_request_sock { | ||
4955 | __u64 dreq_isr; | ||
4956 | __u64 dreq_gsr; | ||
4957 | __be32 dreq_service; | ||
4958 | + spinlock_t dreq_lock; | ||
4959 | struct list_head dreq_featneg; | ||
4960 | __u32 dreq_timestamp_echo; | ||
4961 | __u32 dreq_timestamp_time; | ||
4962 | diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h | ||
4963 | index 192eef2fd766..d596a076da11 100644 | ||
4964 | --- a/include/linux/hyperv.h | ||
4965 | +++ b/include/linux/hyperv.h | ||
4966 | @@ -1548,31 +1548,23 @@ static inline struct vmpacket_descriptor * | ||
4967 | get_next_pkt_raw(struct vmbus_channel *channel) | ||
4968 | { | ||
4969 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | ||
4970 | - u32 read_loc = ring_info->priv_read_index; | ||
4971 | + u32 priv_read_loc = ring_info->priv_read_index; | ||
4972 | void *ring_buffer = hv_get_ring_buffer(ring_info); | ||
4973 | - struct vmpacket_descriptor *cur_desc; | ||
4974 | - u32 packetlen; | ||
4975 | u32 dsize = ring_info->ring_datasize; | ||
4976 | - u32 delta = read_loc - ring_info->ring_buffer->read_index; | ||
4977 | + /* | ||
4978 | + * delta is the difference between what is available to read and | ||
4979 | + * what was already consumed in place. We commit read index after | ||
4980 | + * the whole batch is processed. | ||
4981 | + */ | ||
4982 | + u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? | ||
4983 | + priv_read_loc - ring_info->ring_buffer->read_index : | ||
4984 | + (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; | ||
4985 | u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); | ||
4986 | |||
4987 | if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) | ||
4988 | return NULL; | ||
4989 | |||
4990 | - if ((read_loc + sizeof(*cur_desc)) > dsize) | ||
4991 | - return NULL; | ||
4992 | - | ||
4993 | - cur_desc = ring_buffer + read_loc; | ||
4994 | - packetlen = cur_desc->len8 << 3; | ||
4995 | - | ||
4996 | - /* | ||
4997 | - * If the packet under consideration is wrapping around, | ||
4998 | - * return failure. | ||
4999 | - */ | ||
5000 | - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) | ||
5001 | - return NULL; | ||
5002 | - | ||
5003 | - return cur_desc; | ||
5004 | + return ring_buffer + priv_read_loc; | ||
5005 | } | ||
5006 | |||
5007 | /* | ||
5008 | @@ -1584,16 +1576,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, | ||
5009 | struct vmpacket_descriptor *desc) | ||
5010 | { | ||
5011 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | ||
5012 | - u32 read_loc = ring_info->priv_read_index; | ||
5013 | u32 packetlen = desc->len8 << 3; | ||
5014 | u32 dsize = ring_info->ring_datasize; | ||
5015 | |||
5016 | - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) | ||
5017 | - BUG(); | ||
5018 | /* | ||
5019 | * Include the packet trailer. | ||
5020 | */ | ||
5021 | ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | ||
5022 | + ring_info->priv_read_index %= dsize; | ||
5023 | } | ||
5024 | |||
5025 | /* | ||
5026 | diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h | ||
5027 | index d08c63f3dd6f..0c5d5dd61b6a 100644 | ||
5028 | --- a/include/uapi/linux/packet_diag.h | ||
5029 | +++ b/include/uapi/linux/packet_diag.h | ||
5030 | @@ -64,7 +64,7 @@ struct packet_diag_mclist { | ||
5031 | __u32 pdmc_count; | ||
5032 | __u16 pdmc_type; | ||
5033 | __u16 pdmc_alen; | ||
5034 | - __u8 pdmc_addr[MAX_ADDR_LEN]; | ||
5035 | + __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ | ||
5036 | }; | ||
5037 | |||
5038 | struct packet_diag_ring { | ||
5039 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c | ||
5040 | index 8199821f54cf..85d1c9423ccb 100644 | ||
5041 | --- a/kernel/bpf/verifier.c | ||
5042 | +++ b/kernel/bpf/verifier.c | ||
5043 | @@ -212,9 +212,10 @@ static void print_verifier_state(struct bpf_verifier_state *state) | ||
5044 | else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || | ||
5045 | t == PTR_TO_MAP_VALUE_OR_NULL || | ||
5046 | t == PTR_TO_MAP_VALUE_ADJ) | ||
5047 | - verbose("(ks=%d,vs=%d)", | ||
5048 | + verbose("(ks=%d,vs=%d,id=%u)", | ||
5049 | reg->map_ptr->key_size, | ||
5050 | - reg->map_ptr->value_size); | ||
5051 | + reg->map_ptr->value_size, | ||
5052 | + reg->id); | ||
5053 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) | ||
5054 | verbose(",min_value=%lld", | ||
5055 | (long long)reg->min_value); | ||
5056 | @@ -443,13 +444,19 @@ static void init_reg_state(struct bpf_reg_state *regs) | ||
5057 | regs[BPF_REG_1].type = PTR_TO_CTX; | ||
5058 | } | ||
5059 | |||
5060 | -static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) | ||
5061 | +static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) | ||
5062 | { | ||
5063 | - BUG_ON(regno >= MAX_BPF_REG); | ||
5064 | regs[regno].type = UNKNOWN_VALUE; | ||
5065 | + regs[regno].id = 0; | ||
5066 | regs[regno].imm = 0; | ||
5067 | } | ||
5068 | |||
5069 | +static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) | ||
5070 | +{ | ||
5071 | + BUG_ON(regno >= MAX_BPF_REG); | ||
5072 | + __mark_reg_unknown_value(regs, regno); | ||
5073 | +} | ||
5074 | + | ||
5075 | static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) | ||
5076 | { | ||
5077 | regs[regno].min_value = BPF_REGISTER_MIN_RANGE; | ||
5078 | @@ -1252,6 +1259,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id) | ||
5079 | return -EINVAL; | ||
5080 | } | ||
5081 | regs[BPF_REG_0].map_ptr = meta.map_ptr; | ||
5082 | + regs[BPF_REG_0].id = ++env->id_gen; | ||
5083 | } else { | ||
5084 | verbose("unknown return type %d of func %d\n", | ||
5085 | fn->ret_type, func_id); | ||
5086 | @@ -1668,8 +1676,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | ||
5087 | insn->src_reg); | ||
5088 | return -EACCES; | ||
5089 | } | ||
5090 | - regs[insn->dst_reg].type = UNKNOWN_VALUE; | ||
5091 | - regs[insn->dst_reg].map_ptr = NULL; | ||
5092 | + mark_reg_unknown_value(regs, insn->dst_reg); | ||
5093 | } | ||
5094 | } else { | ||
5095 | /* case: R = imm | ||
5096 | @@ -1931,6 +1938,43 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | ||
5097 | check_reg_overflow(true_reg); | ||
5098 | } | ||
5099 | |||
5100 | +static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, | ||
5101 | + enum bpf_reg_type type) | ||
5102 | +{ | ||
5103 | + struct bpf_reg_state *reg = ®s[regno]; | ||
5104 | + | ||
5105 | + if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { | ||
5106 | + reg->type = type; | ||
5107 | + /* We don't need id from this point onwards anymore, thus we | ||
5108 | + * should better reset it, so that state pruning has chances | ||
5109 | + * to take effect. | ||
5110 | + */ | ||
5111 | + reg->id = 0; | ||
5112 | + if (type == UNKNOWN_VALUE) | ||
5113 | + __mark_reg_unknown_value(regs, regno); | ||
5114 | + } | ||
5115 | +} | ||
5116 | + | ||
5117 | +/* The logic is similar to find_good_pkt_pointers(), both could eventually | ||
5118 | + * be folded together at some point. | ||
5119 | + */ | ||
5120 | +static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, | ||
5121 | + enum bpf_reg_type type) | ||
5122 | +{ | ||
5123 | + struct bpf_reg_state *regs = state->regs; | ||
5124 | + u32 id = regs[regno].id; | ||
5125 | + int i; | ||
5126 | + | ||
5127 | + for (i = 0; i < MAX_BPF_REG; i++) | ||
5128 | + mark_map_reg(regs, i, id, type); | ||
5129 | + | ||
5130 | + for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { | ||
5131 | + if (state->stack_slot_type[i] != STACK_SPILL) | ||
5132 | + continue; | ||
5133 | + mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); | ||
5134 | + } | ||
5135 | +} | ||
5136 | + | ||
5137 | static int check_cond_jmp_op(struct bpf_verifier_env *env, | ||
5138 | struct bpf_insn *insn, int *insn_idx) | ||
5139 | { | ||
5140 | @@ -2018,18 +2062,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | ||
5141 | if (BPF_SRC(insn->code) == BPF_K && | ||
5142 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && | ||
5143 | dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { | ||
5144 | - if (opcode == BPF_JEQ) { | ||
5145 | - /* next fallthrough insn can access memory via | ||
5146 | - * this register | ||
5147 | - */ | ||
5148 | - regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; | ||
5149 | - /* branch targer cannot access it, since reg == 0 */ | ||
5150 | - mark_reg_unknown_value(other_branch->regs, | ||
5151 | - insn->dst_reg); | ||
5152 | - } else { | ||
5153 | - other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; | ||
5154 | - mark_reg_unknown_value(regs, insn->dst_reg); | ||
5155 | - } | ||
5156 | + /* Mark all identical map registers in each branch as either | ||
5157 | + * safe or unknown depending R == 0 or R != 0 conditional. | ||
5158 | + */ | ||
5159 | + mark_map_regs(this_branch, insn->dst_reg, | ||
5160 | + opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); | ||
5161 | + mark_map_regs(other_branch, insn->dst_reg, | ||
5162 | + opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); | ||
5163 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && | ||
5164 | dst_reg->type == PTR_TO_PACKET && | ||
5165 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | ||
5166 | @@ -2469,7 +2508,7 @@ static bool states_equal(struct bpf_verifier_env *env, | ||
5167 | * we didn't do a variable access into a map then we are a-ok. | ||
5168 | */ | ||
5169 | if (!varlen_map_access && | ||
5170 | - rold->type == rcur->type && rold->imm == rcur->imm) | ||
5171 | + memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) | ||
5172 | continue; | ||
5173 | |||
5174 | /* If we didn't map access then again we don't care about the | ||
5175 | diff --git a/kernel/futex.c b/kernel/futex.c | ||
5176 | index 38b68c2735c5..4c6b6e697b73 100644 | ||
5177 | --- a/kernel/futex.c | ||
5178 | +++ b/kernel/futex.c | ||
5179 | @@ -2813,7 +2813,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | ||
5180 | { | ||
5181 | struct hrtimer_sleeper timeout, *to = NULL; | ||
5182 | struct rt_mutex_waiter rt_waiter; | ||
5183 | - struct rt_mutex *pi_mutex = NULL; | ||
5184 | struct futex_hash_bucket *hb; | ||
5185 | union futex_key key2 = FUTEX_KEY_INIT; | ||
5186 | struct futex_q q = futex_q_init; | ||
5187 | @@ -2897,6 +2896,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | ||
5188 | if (q.pi_state && (q.pi_state->owner != current)) { | ||
5189 | spin_lock(q.lock_ptr); | ||
5190 | ret = fixup_pi_state_owner(uaddr2, &q, current); | ||
5191 | + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) | ||
5192 | + rt_mutex_unlock(&q.pi_state->pi_mutex); | ||
5193 | /* | ||
5194 | * Drop the reference to the pi state which | ||
5195 | * the requeue_pi() code acquired for us. | ||
5196 | @@ -2905,6 +2906,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | ||
5197 | spin_unlock(q.lock_ptr); | ||
5198 | } | ||
5199 | } else { | ||
5200 | + struct rt_mutex *pi_mutex; | ||
5201 | + | ||
5202 | /* | ||
5203 | * We have been woken up by futex_unlock_pi(), a timeout, or a | ||
5204 | * signal. futex_unlock_pi() will not destroy the lock_ptr nor | ||
5205 | @@ -2928,18 +2931,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | ||
5206 | if (res) | ||
5207 | ret = (res < 0) ? res : 0; | ||
5208 | |||
5209 | + /* | ||
5210 | + * If fixup_pi_state_owner() faulted and was unable to handle | ||
5211 | + * the fault, unlock the rt_mutex and return the fault to | ||
5212 | + * userspace. | ||
5213 | + */ | ||
5214 | + if (ret && rt_mutex_owner(pi_mutex) == current) | ||
5215 | + rt_mutex_unlock(pi_mutex); | ||
5216 | + | ||
5217 | /* Unqueue and drop the lock. */ | ||
5218 | unqueue_me_pi(&q); | ||
5219 | } | ||
5220 | |||
5221 | - /* | ||
5222 | - * If fixup_pi_state_owner() faulted and was unable to handle the | ||
5223 | - * fault, unlock the rt_mutex and return the fault to userspace. | ||
5224 | - */ | ||
5225 | - if (ret == -EFAULT) { | ||
5226 | - if (pi_mutex && rt_mutex_owner(pi_mutex) == current) | ||
5227 | - rt_mutex_unlock(pi_mutex); | ||
5228 | - } else if (ret == -EINTR) { | ||
5229 | + if (ret == -EINTR) { | ||
5230 | /* | ||
5231 | * We've already been requeued, but cannot restart by calling | ||
5232 | * futex_lock_pi() directly. We could restart this syscall, but | ||
5233 | diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c | ||
5234 | index 1591f6b3539f..2bef4ab94003 100644 | ||
5235 | --- a/kernel/locking/rwsem-spinlock.c | ||
5236 | +++ b/kernel/locking/rwsem-spinlock.c | ||
5237 | @@ -216,10 +216,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) | ||
5238 | */ | ||
5239 | if (sem->count == 0) | ||
5240 | break; | ||
5241 | - if (signal_pending_state(state, current)) { | ||
5242 | - ret = -EINTR; | ||
5243 | - goto out; | ||
5244 | - } | ||
5245 | + if (signal_pending_state(state, current)) | ||
5246 | + goto out_nolock; | ||
5247 | set_task_state(tsk, state); | ||
5248 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
5249 | schedule(); | ||
5250 | @@ -227,12 +225,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) | ||
5251 | } | ||
5252 | /* got the lock */ | ||
5253 | sem->count = -1; | ||
5254 | -out: | ||
5255 | list_del(&waiter.list); | ||
5256 | |||
5257 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
5258 | |||
5259 | return ret; | ||
5260 | + | ||
5261 | +out_nolock: | ||
5262 | + list_del(&waiter.list); | ||
5263 | + if (!list_empty(&sem->wait_list)) | ||
5264 | + __rwsem_do_wake(sem, 1); | ||
5265 | + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
5266 | + | ||
5267 | + return -EINTR; | ||
5268 | } | ||
5269 | |||
5270 | void __sched __down_write(struct rw_semaphore *sem) | ||
5271 | diff --git a/mm/slab.c b/mm/slab.c | ||
5272 | index bd878f051a3b..1f82d16a0518 100644 | ||
5273 | --- a/mm/slab.c | ||
5274 | +++ b/mm/slab.c | ||
5275 | @@ -2332,7 +2332,7 @@ static int drain_freelist(struct kmem_cache *cache, | ||
5276 | return nr_freed; | ||
5277 | } | ||
5278 | |||
5279 | -int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | ||
5280 | +int __kmem_cache_shrink(struct kmem_cache *cachep) | ||
5281 | { | ||
5282 | int ret = 0; | ||
5283 | int node; | ||
5284 | @@ -2352,7 +2352,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) | ||
5285 | |||
5286 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | ||
5287 | { | ||
5288 | - return __kmem_cache_shrink(cachep, false); | ||
5289 | + return __kmem_cache_shrink(cachep); | ||
5290 | } | ||
5291 | |||
5292 | void __kmem_cache_release(struct kmem_cache *cachep) | ||
5293 | diff --git a/mm/slab.h b/mm/slab.h | ||
5294 | index bc05fdc3edce..ceb7d70cdb76 100644 | ||
5295 | --- a/mm/slab.h | ||
5296 | +++ b/mm/slab.h | ||
5297 | @@ -146,7 +146,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, | ||
5298 | |||
5299 | int __kmem_cache_shutdown(struct kmem_cache *); | ||
5300 | void __kmem_cache_release(struct kmem_cache *); | ||
5301 | -int __kmem_cache_shrink(struct kmem_cache *, bool); | ||
5302 | +int __kmem_cache_shrink(struct kmem_cache *); | ||
5303 | void slab_kmem_cache_release(struct kmem_cache *); | ||
5304 | |||
5305 | struct seq_file; | ||
5306 | diff --git a/mm/slab_common.c b/mm/slab_common.c | ||
5307 | index 329b03843863..5d2f24fbafc5 100644 | ||
5308 | --- a/mm/slab_common.c | ||
5309 | +++ b/mm/slab_common.c | ||
5310 | @@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | ||
5311 | get_online_cpus(); | ||
5312 | get_online_mems(); | ||
5313 | |||
5314 | +#ifdef CONFIG_SLUB | ||
5315 | + /* | ||
5316 | + * In case of SLUB, we need to disable empty slab caching to | ||
5317 | + * avoid pinning the offline memory cgroup by freeable kmem | ||
5318 | + * pages charged to it. SLAB doesn't need this, as it | ||
5319 | + * periodically purges unused slabs. | ||
5320 | + */ | ||
5321 | + mutex_lock(&slab_mutex); | ||
5322 | + list_for_each_entry(s, &slab_caches, list) { | ||
5323 | + c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL; | ||
5324 | + if (c) { | ||
5325 | + c->cpu_partial = 0; | ||
5326 | + c->min_partial = 0; | ||
5327 | + } | ||
5328 | + } | ||
5329 | + mutex_unlock(&slab_mutex); | ||
5330 | + /* | ||
5331 | + * kmem_cache->cpu_partial is checked locklessly (see | ||
5332 | + * put_cpu_partial()). Make sure the change is visible. | ||
5333 | + */ | ||
5334 | + synchronize_sched(); | ||
5335 | +#endif | ||
5336 | + | ||
5337 | mutex_lock(&slab_mutex); | ||
5338 | list_for_each_entry(s, &slab_caches, list) { | ||
5339 | if (!is_root_cache(s)) | ||
5340 | @@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) | ||
5341 | if (!c) | ||
5342 | continue; | ||
5343 | |||
5344 | - __kmem_cache_shrink(c, true); | ||
5345 | + __kmem_cache_shrink(c); | ||
5346 | arr->entries[idx] = NULL; | ||
5347 | } | ||
5348 | mutex_unlock(&slab_mutex); | ||
5349 | @@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | ||
5350 | get_online_cpus(); | ||
5351 | get_online_mems(); | ||
5352 | kasan_cache_shrink(cachep); | ||
5353 | - ret = __kmem_cache_shrink(cachep, false); | ||
5354 | + ret = __kmem_cache_shrink(cachep); | ||
5355 | put_online_mems(); | ||
5356 | put_online_cpus(); | ||
5357 | return ret; | ||
5358 | diff --git a/mm/slob.c b/mm/slob.c | ||
5359 | index 5ec158054ffe..eac04d4357ec 100644 | ||
5360 | --- a/mm/slob.c | ||
5361 | +++ b/mm/slob.c | ||
5362 | @@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) | ||
5363 | { | ||
5364 | } | ||
5365 | |||
5366 | -int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) | ||
5367 | +int __kmem_cache_shrink(struct kmem_cache *d) | ||
5368 | { | ||
5369 | return 0; | ||
5370 | } | ||
5371 | diff --git a/mm/slub.c b/mm/slub.c | ||
5372 | index 7aa0e97af928..58c7526f8de2 100644 | ||
5373 | --- a/mm/slub.c | ||
5374 | +++ b/mm/slub.c | ||
5375 | @@ -3887,7 +3887,7 @@ EXPORT_SYMBOL(kfree); | ||
5376 | * being allocated from last increasing the chance that the last objects | ||
5377 | * are freed in them. | ||
5378 | */ | ||
5379 | -int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | ||
5380 | +int __kmem_cache_shrink(struct kmem_cache *s) | ||
5381 | { | ||
5382 | int node; | ||
5383 | int i; | ||
5384 | @@ -3899,21 +3899,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) | ||
5385 | unsigned long flags; | ||
5386 | int ret = 0; | ||
5387 | |||
5388 | - if (deactivate) { | ||
5389 | - /* | ||
5390 | - * Disable empty slabs caching. Used to avoid pinning offline | ||
5391 | - * memory cgroups by kmem pages that can be freed. | ||
5392 | - */ | ||
5393 | - s->cpu_partial = 0; | ||
5394 | - s->min_partial = 0; | ||
5395 | - | ||
5396 | - /* | ||
5397 | - * s->cpu_partial is checked locklessly (see put_cpu_partial), | ||
5398 | - * so we have to make sure the change is visible. | ||
5399 | - */ | ||
5400 | - synchronize_sched(); | ||
5401 | - } | ||
5402 | - | ||
5403 | flush_all(s); | ||
5404 | for_each_kmem_cache_node(s, node, n) { | ||
5405 | INIT_LIST_HEAD(&discard); | ||
5406 | @@ -3970,7 +3955,7 @@ static int slab_mem_going_offline_callback(void *arg) | ||
5407 | |||
5408 | mutex_lock(&slab_mutex); | ||
5409 | list_for_each_entry(s, &slab_caches, list) | ||
5410 | - __kmem_cache_shrink(s, false); | ||
5411 | + __kmem_cache_shrink(s); | ||
5412 | mutex_unlock(&slab_mutex); | ||
5413 | |||
5414 | return 0; | ||
5415 | diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c | ||
5416 | index 7cb41aee4c82..8498e3503605 100644 | ||
5417 | --- a/net/bridge/br_forward.c | ||
5418 | +++ b/net/bridge/br_forward.c | ||
5419 | @@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, | ||
5420 | /* Do not flood unicast traffic to ports that turn it off */ | ||
5421 | if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) | ||
5422 | continue; | ||
5423 | + /* Do not flood if mc off, except for traffic we originate */ | ||
5424 | if (pkt_type == BR_PKT_MULTICAST && | ||
5425 | - !(p->flags & BR_MCAST_FLOOD)) | ||
5426 | + !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) | ||
5427 | continue; | ||
5428 | |||
5429 | /* Do not flood to ports that enable proxy ARP */ | ||
5430 | diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c | ||
5431 | index 855b72fbe1da..267b46af407f 100644 | ||
5432 | --- a/net/bridge/br_input.c | ||
5433 | +++ b/net/bridge/br_input.c | ||
5434 | @@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook); | ||
5435 | static int | ||
5436 | br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) | ||
5437 | { | ||
5438 | + br_drop_fake_rtable(skb); | ||
5439 | return netif_receive_skb(skb); | ||
5440 | } | ||
5441 | |||
5442 | diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c | ||
5443 | index 7fbdbae58e65..aa1df1a10dd7 100644 | ||
5444 | --- a/net/bridge/br_netfilter_hooks.c | ||
5445 | +++ b/net/bridge/br_netfilter_hooks.c | ||
5446 | @@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv, | ||
5447 | } | ||
5448 | |||
5449 | |||
5450 | -/* PF_BRIDGE/LOCAL_IN ************************************************/ | ||
5451 | -/* The packet is locally destined, which requires a real | ||
5452 | - * dst_entry, so detach the fake one. On the way up, the | ||
5453 | - * packet would pass through PRE_ROUTING again (which already | ||
5454 | - * took place when the packet entered the bridge), but we | ||
5455 | - * register an IPv4 PRE_ROUTING 'sabotage' hook that will | ||
5456 | - * prevent this from happening. */ | ||
5457 | -static unsigned int br_nf_local_in(void *priv, | ||
5458 | - struct sk_buff *skb, | ||
5459 | - const struct nf_hook_state *state) | ||
5460 | -{ | ||
5461 | - br_drop_fake_rtable(skb); | ||
5462 | - return NF_ACCEPT; | ||
5463 | -} | ||
5464 | - | ||
5465 | /* PF_BRIDGE/FORWARD *************************************************/ | ||
5466 | static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | ||
5467 | { | ||
5468 | @@ -906,12 +891,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { | ||
5469 | .priority = NF_BR_PRI_BRNF, | ||
5470 | }, | ||
5471 | { | ||
5472 | - .hook = br_nf_local_in, | ||
5473 | - .pf = NFPROTO_BRIDGE, | ||
5474 | - .hooknum = NF_BR_LOCAL_IN, | ||
5475 | - .priority = NF_BR_PRI_BRNF, | ||
5476 | - }, | ||
5477 | - { | ||
5478 | .hook = br_nf_forward_ip, | ||
5479 | .pf = NFPROTO_BRIDGE, | ||
5480 | .hooknum = NF_BR_FORWARD, | ||
5481 | diff --git a/net/core/dev.c b/net/core/dev.c | ||
5482 | index 60b0a6049e72..2e04fd188081 100644 | ||
5483 | --- a/net/core/dev.c | ||
5484 | +++ b/net/core/dev.c | ||
5485 | @@ -1697,27 +1697,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); | ||
5486 | static struct static_key netstamp_needed __read_mostly; | ||
5487 | #ifdef HAVE_JUMP_LABEL | ||
5488 | static atomic_t netstamp_needed_deferred; | ||
5489 | +static atomic_t netstamp_wanted; | ||
5490 | static void netstamp_clear(struct work_struct *work) | ||
5491 | { | ||
5492 | int deferred = atomic_xchg(&netstamp_needed_deferred, 0); | ||
5493 | + int wanted; | ||
5494 | |||
5495 | - while (deferred--) | ||
5496 | - static_key_slow_dec(&netstamp_needed); | ||
5497 | + wanted = atomic_add_return(deferred, &netstamp_wanted); | ||
5498 | + if (wanted > 0) | ||
5499 | + static_key_enable(&netstamp_needed); | ||
5500 | + else | ||
5501 | + static_key_disable(&netstamp_needed); | ||
5502 | } | ||
5503 | static DECLARE_WORK(netstamp_work, netstamp_clear); | ||
5504 | #endif | ||
5505 | |||
5506 | void net_enable_timestamp(void) | ||
5507 | { | ||
5508 | +#ifdef HAVE_JUMP_LABEL | ||
5509 | + int wanted; | ||
5510 | + | ||
5511 | + while (1) { | ||
5512 | + wanted = atomic_read(&netstamp_wanted); | ||
5513 | + if (wanted <= 0) | ||
5514 | + break; | ||
5515 | + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) | ||
5516 | + return; | ||
5517 | + } | ||
5518 | + atomic_inc(&netstamp_needed_deferred); | ||
5519 | + schedule_work(&netstamp_work); | ||
5520 | +#else | ||
5521 | static_key_slow_inc(&netstamp_needed); | ||
5522 | +#endif | ||
5523 | } | ||
5524 | EXPORT_SYMBOL(net_enable_timestamp); | ||
5525 | |||
5526 | void net_disable_timestamp(void) | ||
5527 | { | ||
5528 | #ifdef HAVE_JUMP_LABEL | ||
5529 | - /* net_disable_timestamp() can be called from non process context */ | ||
5530 | - atomic_inc(&netstamp_needed_deferred); | ||
5531 | + int wanted; | ||
5532 | + | ||
5533 | + while (1) { | ||
5534 | + wanted = atomic_read(&netstamp_wanted); | ||
5535 | + if (wanted <= 1) | ||
5536 | + break; | ||
5537 | + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) | ||
5538 | + return; | ||
5539 | + } | ||
5540 | + atomic_dec(&netstamp_needed_deferred); | ||
5541 | schedule_work(&netstamp_work); | ||
5542 | #else | ||
5543 | static_key_slow_dec(&netstamp_needed); | ||
5544 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c | ||
5545 | index 1e3e0087245b..f0f462c0573d 100644 | ||
5546 | --- a/net/core/skbuff.c | ||
5547 | +++ b/net/core/skbuff.c | ||
5548 | @@ -3814,13 +3814,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | ||
5549 | if (!skb_may_tx_timestamp(sk, false)) | ||
5550 | return; | ||
5551 | |||
5552 | - /* take a reference to prevent skb_orphan() from freeing the socket */ | ||
5553 | - sock_hold(sk); | ||
5554 | - | ||
5555 | - *skb_hwtstamps(skb) = *hwtstamps; | ||
5556 | - __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); | ||
5557 | - | ||
5558 | - sock_put(sk); | ||
5559 | + /* Take a reference to prevent skb_orphan() from freeing the socket, | ||
5560 | + * but only if the socket refcount is not zero. | ||
5561 | + */ | ||
5562 | + if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { | ||
5563 | + *skb_hwtstamps(skb) = *hwtstamps; | ||
5564 | + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); | ||
5565 | + sock_put(sk); | ||
5566 | + } | ||
5567 | } | ||
5568 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); | ||
5569 | |||
5570 | @@ -3871,7 +3872,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) | ||
5571 | { | ||
5572 | struct sock *sk = skb->sk; | ||
5573 | struct sock_exterr_skb *serr; | ||
5574 | - int err; | ||
5575 | + int err = 1; | ||
5576 | |||
5577 | skb->wifi_acked_valid = 1; | ||
5578 | skb->wifi_acked = acked; | ||
5579 | @@ -3881,14 +3882,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) | ||
5580 | serr->ee.ee_errno = ENOMSG; | ||
5581 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; | ||
5582 | |||
5583 | - /* take a reference to prevent skb_orphan() from freeing the socket */ | ||
5584 | - sock_hold(sk); | ||
5585 | - | ||
5586 | - err = sock_queue_err_skb(sk, skb); | ||
5587 | + /* Take a reference to prevent skb_orphan() from freeing the socket, | ||
5588 | + * but only if the socket refcount is not zero. | ||
5589 | + */ | ||
5590 | + if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { | ||
5591 | + err = sock_queue_err_skb(sk, skb); | ||
5592 | + sock_put(sk); | ||
5593 | + } | ||
5594 | if (err) | ||
5595 | kfree_skb(skb); | ||
5596 | - | ||
5597 | - sock_put(sk); | ||
5598 | } | ||
5599 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); | ||
5600 | |||
5601 | diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c | ||
5602 | index f053198e730c..5e3a7302f774 100644 | ||
5603 | --- a/net/dccp/ccids/ccid2.c | ||
5604 | +++ b/net/dccp/ccids/ccid2.c | ||
5605 | @@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) | ||
5606 | for (i = 0; i < hc->tx_seqbufc; i++) | ||
5607 | kfree(hc->tx_seqbuf[i]); | ||
5608 | hc->tx_seqbufc = 0; | ||
5609 | + dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); | ||
5610 | } | ||
5611 | |||
5612 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | ||
5613 | diff --git a/net/dccp/input.c b/net/dccp/input.c | ||
5614 | index 8fedc2d49770..4a05d7876850 100644 | ||
5615 | --- a/net/dccp/input.c | ||
5616 | +++ b/net/dccp/input.c | ||
5617 | @@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | ||
5618 | struct dccp_sock *dp = dccp_sk(sk); | ||
5619 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
5620 | const int old_state = sk->sk_state; | ||
5621 | + bool acceptable; | ||
5622 | int queued = 0; | ||
5623 | |||
5624 | /* | ||
5625 | @@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | ||
5626 | */ | ||
5627 | if (sk->sk_state == DCCP_LISTEN) { | ||
5628 | if (dh->dccph_type == DCCP_PKT_REQUEST) { | ||
5629 | - if (inet_csk(sk)->icsk_af_ops->conn_request(sk, | ||
5630 | - skb) < 0) | ||
5631 | + /* It is possible that we process SYN packets from backlog, | ||
5632 | + * so we need to make sure to disable BH right there. | ||
5633 | + */ | ||
5634 | + local_bh_disable(); | ||
5635 | + acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; | ||
5636 | + local_bh_enable(); | ||
5637 | + if (!acceptable) | ||
5638 | return 1; | ||
5639 | consume_skb(skb); | ||
5640 | return 0; | ||
5641 | diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c | ||
5642 | index edbe59d203ef..86b0933ecd45 100644 | ||
5643 | --- a/net/dccp/ipv4.c | ||
5644 | +++ b/net/dccp/ipv4.c | ||
5645 | @@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | ||
5646 | |||
5647 | switch (type) { | ||
5648 | case ICMP_REDIRECT: | ||
5649 | - dccp_do_redirect(skb, sk); | ||
5650 | + if (!sock_owned_by_user(sk)) | ||
5651 | + dccp_do_redirect(skb, sk); | ||
5652 | goto out; | ||
5653 | case ICMP_SOURCE_QUENCH: | ||
5654 | /* Just silently ignore these. */ | ||
5655 | diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c | ||
5656 | index 7506c03a7db9..237d62c493e3 100644 | ||
5657 | --- a/net/dccp/ipv6.c | ||
5658 | +++ b/net/dccp/ipv6.c | ||
5659 | @@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | ||
5660 | np = inet6_sk(sk); | ||
5661 | |||
5662 | if (type == NDISC_REDIRECT) { | ||
5663 | - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); | ||
5664 | + if (!sock_owned_by_user(sk)) { | ||
5665 | + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); | ||
5666 | |||
5667 | - if (dst) | ||
5668 | - dst->ops->redirect(dst, sk, skb); | ||
5669 | + if (dst) | ||
5670 | + dst->ops->redirect(dst, sk, skb); | ||
5671 | + } | ||
5672 | goto out; | ||
5673 | } | ||
5674 | |||
5675 | diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c | ||
5676 | index 53eddf99e4f6..39e7e2bca8db 100644 | ||
5677 | --- a/net/dccp/minisocks.c | ||
5678 | +++ b/net/dccp/minisocks.c | ||
5679 | @@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, | ||
5680 | /* It is still raw copy of parent, so invalidate | ||
5681 | * destructor and make plain sk_free() */ | ||
5682 | newsk->sk_destruct = NULL; | ||
5683 | + bh_unlock_sock(newsk); | ||
5684 | sk_free(newsk); | ||
5685 | return NULL; | ||
5686 | } | ||
5687 | @@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | ||
5688 | struct dccp_request_sock *dreq = dccp_rsk(req); | ||
5689 | bool own_req; | ||
5690 | |||
5691 | + /* TCP/DCCP listeners became lockless. | ||
5692 | + * DCCP stores complex state in its request_sock, so we need | ||
5693 | + * a protection for them, now this code runs without being protected | ||
5694 | + * by the parent (listener) lock. | ||
5695 | + */ | ||
5696 | + spin_lock_bh(&dreq->dreq_lock); | ||
5697 | + | ||
5698 | /* Check for retransmitted REQUEST */ | ||
5699 | if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { | ||
5700 | |||
5701 | @@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | ||
5702 | inet_rtx_syn_ack(sk, req); | ||
5703 | } | ||
5704 | /* Network Duplicate, discard packet */ | ||
5705 | - return NULL; | ||
5706 | + goto out; | ||
5707 | } | ||
5708 | |||
5709 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; | ||
5710 | @@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | ||
5711 | |||
5712 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, | ||
5713 | req, &own_req); | ||
5714 | - if (!child) | ||
5715 | - goto listen_overflow; | ||
5716 | - | ||
5717 | - return inet_csk_complete_hashdance(sk, child, req, own_req); | ||
5718 | + if (child) { | ||
5719 | + child = inet_csk_complete_hashdance(sk, child, req, own_req); | ||
5720 | + goto out; | ||
5721 | + } | ||
5722 | |||
5723 | -listen_overflow: | ||
5724 | - dccp_pr_debug("listen_overflow!\n"); | ||
5725 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; | ||
5726 | drop: | ||
5727 | if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) | ||
5728 | req->rsk_ops->send_reset(sk, skb); | ||
5729 | |||
5730 | inet_csk_reqsk_queue_drop(sk, req); | ||
5731 | - return NULL; | ||
5732 | +out: | ||
5733 | + spin_unlock_bh(&dreq->dreq_lock); | ||
5734 | + return child; | ||
5735 | } | ||
5736 | |||
5737 | EXPORT_SYMBOL_GPL(dccp_check_req); | ||
5738 | @@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req, | ||
5739 | { | ||
5740 | struct dccp_request_sock *dreq = dccp_rsk(req); | ||
5741 | |||
5742 | + spin_lock_init(&dreq->dreq_lock); | ||
5743 | inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; | ||
5744 | inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); | ||
5745 | inet_rsk(req)->acked = 0; | ||
5746 | diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c | ||
5747 | index 215143246e4b..971b9471d427 100644 | ||
5748 | --- a/net/ipv4/af_inet.c | ||
5749 | +++ b/net/ipv4/af_inet.c | ||
5750 | @@ -1460,8 +1460,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) | ||
5751 | int proto = iph->protocol; | ||
5752 | int err = -ENOSYS; | ||
5753 | |||
5754 | - if (skb->encapsulation) | ||
5755 | + if (skb->encapsulation) { | ||
5756 | + skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); | ||
5757 | skb_set_inner_network_header(skb, nhoff); | ||
5758 | + } | ||
5759 | |||
5760 | csum_replace2(&iph->check, iph->tot_len, newlen); | ||
5761 | iph->tot_len = newlen; | ||
5762 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c | ||
5763 | index d851cae27dac..17e6fbf30448 100644 | ||
5764 | --- a/net/ipv4/route.c | ||
5765 | +++ b/net/ipv4/route.c | ||
5766 | @@ -1968,6 +1968,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, | ||
5767 | { | ||
5768 | int res; | ||
5769 | |||
5770 | + tos &= IPTOS_RT_MASK; | ||
5771 | rcu_read_lock(); | ||
5772 | |||
5773 | /* Multicast recognition logic is moved from route cache to here. | ||
5774 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c | ||
5775 | index c71d49ce0c93..ce42ded59958 100644 | ||
5776 | --- a/net/ipv4/tcp_input.c | ||
5777 | +++ b/net/ipv4/tcp_input.c | ||
5778 | @@ -5916,9 +5916,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) | ||
5779 | if (th->syn) { | ||
5780 | if (th->fin) | ||
5781 | goto discard; | ||
5782 | - if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) | ||
5783 | - return 1; | ||
5784 | + /* It is possible that we process SYN packets from backlog, | ||
5785 | + * so we need to make sure to disable BH right there. | ||
5786 | + */ | ||
5787 | + local_bh_disable(); | ||
5788 | + acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; | ||
5789 | + local_bh_enable(); | ||
5790 | |||
5791 | + if (!acceptable) | ||
5792 | + return 1; | ||
5793 | consume_skb(skb); | ||
5794 | return 0; | ||
5795 | } | ||
5796 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c | ||
5797 | index 2259114c7242..6988566dc72f 100644 | ||
5798 | --- a/net/ipv4/tcp_ipv4.c | ||
5799 | +++ b/net/ipv4/tcp_ipv4.c | ||
5800 | @@ -269,10 +269,13 @@ EXPORT_SYMBOL(tcp_v4_connect); | ||
5801 | */ | ||
5802 | void tcp_v4_mtu_reduced(struct sock *sk) | ||
5803 | { | ||
5804 | - struct dst_entry *dst; | ||
5805 | struct inet_sock *inet = inet_sk(sk); | ||
5806 | - u32 mtu = tcp_sk(sk)->mtu_info; | ||
5807 | + struct dst_entry *dst; | ||
5808 | + u32 mtu; | ||
5809 | |||
5810 | + if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) | ||
5811 | + return; | ||
5812 | + mtu = tcp_sk(sk)->mtu_info; | ||
5813 | dst = inet_csk_update_pmtu(sk, mtu); | ||
5814 | if (!dst) | ||
5815 | return; | ||
5816 | @@ -418,7 +421,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | ||
5817 | |||
5818 | switch (type) { | ||
5819 | case ICMP_REDIRECT: | ||
5820 | - do_redirect(icmp_skb, sk); | ||
5821 | + if (!sock_owned_by_user(sk)) | ||
5822 | + do_redirect(icmp_skb, sk); | ||
5823 | goto out; | ||
5824 | case ICMP_SOURCE_QUENCH: | ||
5825 | /* Just silently ignore these. */ | ||
5826 | diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c | ||
5827 | index 3ea1cf804748..b1e65b3b4361 100644 | ||
5828 | --- a/net/ipv4/tcp_timer.c | ||
5829 | +++ b/net/ipv4/tcp_timer.c | ||
5830 | @@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk) | ||
5831 | |||
5832 | sk_mem_reclaim_partial(sk); | ||
5833 | |||
5834 | - if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
5835 | + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || | ||
5836 | + !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
5837 | goto out; | ||
5838 | |||
5839 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { | ||
5840 | @@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk) | ||
5841 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
5842 | int event; | ||
5843 | |||
5844 | - if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) | ||
5845 | + if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || | ||
5846 | + !icsk->icsk_pending) | ||
5847 | goto out; | ||
5848 | |||
5849 | if (time_after(icsk->icsk_timeout, jiffies)) { | ||
5850 | diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c | ||
5851 | index ef5485204522..8c88a37392d0 100644 | ||
5852 | --- a/net/ipv6/ip6_fib.c | ||
5853 | +++ b/net/ipv6/ip6_fib.c | ||
5854 | @@ -908,6 +908,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | ||
5855 | ins = &rt->dst.rt6_next; | ||
5856 | iter = *ins; | ||
5857 | while (iter) { | ||
5858 | + if (iter->rt6i_metric > rt->rt6i_metric) | ||
5859 | + break; | ||
5860 | if (rt6_qualify_for_ecmp(iter)) { | ||
5861 | *ins = iter->dst.rt6_next; | ||
5862 | fib6_purge_rt(iter, fn, info->nl_net); | ||
5863 | diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c | ||
5864 | index fc7b4017ba24..33b04ec2744a 100644 | ||
5865 | --- a/net/ipv6/ip6_offload.c | ||
5866 | +++ b/net/ipv6/ip6_offload.c | ||
5867 | @@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) | ||
5868 | struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); | ||
5869 | int err = -ENOSYS; | ||
5870 | |||
5871 | - if (skb->encapsulation) | ||
5872 | + if (skb->encapsulation) { | ||
5873 | + skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); | ||
5874 | skb_set_inner_network_header(skb, nhoff); | ||
5875 | + } | ||
5876 | |||
5877 | iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); | ||
5878 | |||
5879 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c | ||
5880 | index 9a87bfb2ec16..e27b8fdba5d2 100644 | ||
5881 | --- a/net/ipv6/ip6_output.c | ||
5882 | +++ b/net/ipv6/ip6_output.c | ||
5883 | @@ -757,13 +757,14 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | ||
5884 | * Fragment the datagram. | ||
5885 | */ | ||
5886 | |||
5887 | - *prevhdr = NEXTHDR_FRAGMENT; | ||
5888 | troom = rt->dst.dev->needed_tailroom; | ||
5889 | |||
5890 | /* | ||
5891 | * Keep copying data until we run out. | ||
5892 | */ | ||
5893 | while (left > 0) { | ||
5894 | + u8 *fragnexthdr_offset; | ||
5895 | + | ||
5896 | len = left; | ||
5897 | /* IF: it doesn't fit, use 'mtu' - the data space left */ | ||
5898 | if (len > mtu) | ||
5899 | @@ -808,6 +809,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | ||
5900 | */ | ||
5901 | skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); | ||
5902 | |||
5903 | + fragnexthdr_offset = skb_network_header(frag); | ||
5904 | + fragnexthdr_offset += prevhdr - skb_network_header(skb); | ||
5905 | + *fragnexthdr_offset = NEXTHDR_FRAGMENT; | ||
5906 | + | ||
5907 | /* | ||
5908 | * Build fragment header. | ||
5909 | */ | ||
5910 | diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c | ||
5911 | index c299c1e2bbf0..66c2b4b41793 100644 | ||
5912 | --- a/net/ipv6/ip6_vti.c | ||
5913 | +++ b/net/ipv6/ip6_vti.c | ||
5914 | @@ -691,6 +691,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) | ||
5915 | u->link = p->link; | ||
5916 | u->i_key = p->i_key; | ||
5917 | u->o_key = p->o_key; | ||
5918 | + if (u->i_key) | ||
5919 | + u->i_flags |= GRE_KEY; | ||
5920 | + if (u->o_key) | ||
5921 | + u->o_flags |= GRE_KEY; | ||
5922 | u->proto = p->proto; | ||
5923 | |||
5924 | memcpy(u->name, p->name, sizeof(u->name)); | ||
5925 | diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
5926 | index 9948b5ce52da..986d4ca38832 100644 | ||
5927 | --- a/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
5928 | +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
5929 | @@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) | ||
5930 | hdr = ipv6_hdr(skb); | ||
5931 | fhdr = (struct frag_hdr *)skb_transport_header(skb); | ||
5932 | |||
5933 | + skb_orphan(skb); | ||
5934 | fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, | ||
5935 | skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); | ||
5936 | if (fq == NULL) { | ||
5937 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c | ||
5938 | index 667396536feb..b2e61a0e8d0a 100644 | ||
5939 | --- a/net/ipv6/tcp_ipv6.c | ||
5940 | +++ b/net/ipv6/tcp_ipv6.c | ||
5941 | @@ -375,10 +375,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | ||
5942 | np = inet6_sk(sk); | ||
5943 | |||
5944 | if (type == NDISC_REDIRECT) { | ||
5945 | - struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); | ||
5946 | + if (!sock_owned_by_user(sk)) { | ||
5947 | + struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); | ||
5948 | |||
5949 | - if (dst) | ||
5950 | - dst->ops->redirect(dst, sk, skb); | ||
5951 | + if (dst) | ||
5952 | + dst->ops->redirect(dst, sk, skb); | ||
5953 | + } | ||
5954 | goto out; | ||
5955 | } | ||
5956 | |||
5957 | diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c | ||
5958 | index c0f0750639bd..ff750bb334fa 100644 | ||
5959 | --- a/net/l2tp/l2tp_ip.c | ||
5960 | +++ b/net/l2tp/l2tp_ip.c | ||
5961 | @@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
5962 | drop: | ||
5963 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); | ||
5964 | kfree_skb(skb); | ||
5965 | - return -1; | ||
5966 | + return 0; | ||
5967 | } | ||
5968 | |||
5969 | /* Userspace will call sendmsg() on the tunnel socket to send L2TP | ||
5970 | diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c | ||
5971 | index 5b77377e5a15..1309e2c34764 100644 | ||
5972 | --- a/net/mpls/af_mpls.c | ||
5973 | +++ b/net/mpls/af_mpls.c | ||
5974 | @@ -956,7 +956,8 @@ static void mpls_ifdown(struct net_device *dev, int event) | ||
5975 | /* fall through */ | ||
5976 | case NETDEV_CHANGE: | ||
5977 | nh->nh_flags |= RTNH_F_LINKDOWN; | ||
5978 | - ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; | ||
5979 | + if (event != NETDEV_UNREGISTER) | ||
5980 | + ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; | ||
5981 | break; | ||
5982 | } | ||
5983 | if (event == NETDEV_UNREGISTER) | ||
5984 | @@ -1696,6 +1697,7 @@ static void mpls_net_exit(struct net *net) | ||
5985 | for (index = 0; index < platform_labels; index++) { | ||
5986 | struct mpls_route *rt = rtnl_dereference(platform_label[index]); | ||
5987 | RCU_INIT_POINTER(platform_label[index], NULL); | ||
5988 | + mpls_notify_route(net, index, rt, NULL, NULL); | ||
5989 | mpls_rt_free(rt); | ||
5990 | } | ||
5991 | rtnl_unlock(); | ||
5992 | diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c | ||
5993 | index eab210bb1ef0..48386bff8b4e 100644 | ||
5994 | --- a/net/openvswitch/conntrack.c | ||
5995 | +++ b/net/openvswitch/conntrack.c | ||
5996 | @@ -367,7 +367,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, | ||
5997 | } else if (key->eth.type == htons(ETH_P_IPV6)) { | ||
5998 | enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; | ||
5999 | |||
6000 | - skb_orphan(skb); | ||
6001 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); | ||
6002 | err = nf_ct_frag6_gather(net, skb, user); | ||
6003 | if (err) { | ||
6004 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c | ||
6005 | index 34de326b4f09..f2b04a77258d 100644 | ||
6006 | --- a/net/packet/af_packet.c | ||
6007 | +++ b/net/packet/af_packet.c | ||
6008 | @@ -3140,7 +3140,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, | ||
6009 | int addr_len) | ||
6010 | { | ||
6011 | struct sock *sk = sock->sk; | ||
6012 | - char name[15]; | ||
6013 | + char name[sizeof(uaddr->sa_data) + 1]; | ||
6014 | |||
6015 | /* | ||
6016 | * Check legality | ||
6017 | @@ -3148,7 +3148,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, | ||
6018 | |||
6019 | if (addr_len != sizeof(struct sockaddr)) | ||
6020 | return -EINVAL; | ||
6021 | - strlcpy(name, uaddr->sa_data, sizeof(name)); | ||
6022 | + /* uaddr->sa_data comes from the userspace, it's not guaranteed to be | ||
6023 | + * zero-terminated. | ||
6024 | + */ | ||
6025 | + memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); | ||
6026 | + name[sizeof(uaddr->sa_data)] = 0; | ||
6027 | |||
6028 | return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); | ||
6029 | } | ||
6030 | diff --git a/net/sched/act_api.c b/net/sched/act_api.c | ||
6031 | index c6c2a93cc2a2..c651cfce9be6 100644 | ||
6032 | --- a/net/sched/act_api.c | ||
6033 | +++ b/net/sched/act_api.c | ||
6034 | @@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, | ||
6035 | goto out_module_put; | ||
6036 | |||
6037 | err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); | ||
6038 | - if (err < 0) | ||
6039 | + if (err <= 0) | ||
6040 | goto out_module_put; | ||
6041 | - if (err == 0) | ||
6042 | - goto noflush_out; | ||
6043 | |||
6044 | nla_nest_end(skb, nest); | ||
6045 | |||
6046 | @@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, | ||
6047 | out_module_put: | ||
6048 | module_put(ops->owner); | ||
6049 | err_out: | ||
6050 | -noflush_out: | ||
6051 | kfree_skb(skb); | ||
6052 | return err; | ||
6053 | } | ||
6054 | diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c | ||
6055 | index eae07a2e774d..1191179c0341 100644 | ||
6056 | --- a/net/sched/act_connmark.c | ||
6057 | +++ b/net/sched/act_connmark.c | ||
6058 | @@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, | ||
6059 | if (ret < 0) | ||
6060 | return ret; | ||
6061 | |||
6062 | + if (!tb[TCA_CONNMARK_PARMS]) | ||
6063 | + return -EINVAL; | ||
6064 | + | ||
6065 | parm = nla_data(tb[TCA_CONNMARK_PARMS]); | ||
6066 | |||
6067 | if (!tcf_hash_check(tn, parm->index, a, bind)) { | ||
6068 | diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c | ||
6069 | index e7d96381c908..f85313d60a4d 100644 | ||
6070 | --- a/net/sched/act_skbmod.c | ||
6071 | +++ b/net/sched/act_skbmod.c | ||
6072 | @@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, | ||
6073 | |||
6074 | return skb->len; | ||
6075 | nla_put_failure: | ||
6076 | - rcu_read_unlock(); | ||
6077 | nlmsg_trim(skb, b); | ||
6078 | return -1; | ||
6079 | } | ||
6080 | diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c | ||
6081 | index 41adf362936d..b5c279b22680 100644 | ||
6082 | --- a/net/strparser/strparser.c | ||
6083 | +++ b/net/strparser/strparser.c | ||
6084 | @@ -504,6 +504,7 @@ static int __init strp_mod_init(void) | ||
6085 | |||
6086 | static void __exit strp_mod_exit(void) | ||
6087 | { | ||
6088 | + destroy_workqueue(strp_wq); | ||
6089 | } | ||
6090 | module_init(strp_mod_init); | ||
6091 | module_exit(strp_mod_exit); |