Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0398-4.9.299-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3700 - (show annotations) (download)
Mon Oct 24 14:08:14 2022 UTC (18 months, 3 weeks ago) by niro
File size: 25812 byte(s)
-linux-4.9.299
1 diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
2 index 481b6a9c25d5a..16ddfd6bd6a1a 100644
3 --- a/Documentation/virtual/kvm/mmu.txt
4 +++ b/Documentation/virtual/kvm/mmu.txt
5 @@ -152,8 +152,8 @@ Shadow pages contain the following information:
6 shadow pages) so role.quadrant takes values in the range 0..3. Each
7 quadrant maps 1GB virtual address space.
8 role.access:
9 - Inherited guest access permissions in the form uwx. Note execute
10 - permission is positive, not negative.
11 + Inherited guest access permissions from the parent ptes in the form uwx.
12 + Note execute permission is positive, not negative.
13 role.invalid:
14 The page is invalid and should not be used. It is a root page that is
15 currently pinned (by a cpu hardware register pointing to it); once it is
16 diff --git a/Makefile b/Makefile
17 index b0f683f18df71..99d37c23495ef 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,6 +1,6 @@
21 VERSION = 4
22 PATCHLEVEL = 9
23 -SUBLEVEL = 298
24 +SUBLEVEL = 299
25 EXTRAVERSION =
26 NAME = Roaring Lionus
27
28 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
29 index bb8f39fe3a225..8df8cdd093e98 100644
30 --- a/arch/arm/Kconfig.debug
31 +++ b/arch/arm/Kconfig.debug
32 @@ -15,30 +15,42 @@ config ARM_PTDUMP
33 kernel.
34 If in doubt, say "N"
35
36 -# RMK wants arm kernels compiled with frame pointers or stack unwinding.
37 -# If you know what you are doing and are willing to live without stack
38 -# traces, you can get a slightly smaller kernel by setting this option to
39 -# n, but then RMK will have to kill you ;).
40 -config FRAME_POINTER
41 - bool
42 - depends on !THUMB2_KERNEL
43 - default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
44 +choice
45 + prompt "Choose kernel unwinder"
46 + default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
47 + default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
48 + help
49 + This determines which method will be used for unwinding kernel stack
50 + traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
51 + livepatch, lockdep, and more.
52 +
53 +config UNWINDER_FRAME_POINTER
54 + bool "Frame pointer unwinder"
55 + depends on !THUMB2_KERNEL && !CC_IS_CLANG
56 + select ARCH_WANT_FRAME_POINTERS
57 + select FRAME_POINTER
58 help
59 - If you say N here, the resulting kernel will be slightly smaller and
60 - faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
61 - when a problem occurs with the kernel, the information that is
62 - reported is severely limited.
63 + This option enables the frame pointer unwinder for unwinding
64 + kernel stack traces.
65
66 -config ARM_UNWIND
67 - bool "Enable stack unwinding support (EXPERIMENTAL)"
68 +config UNWINDER_ARM
69 + bool "ARM EABI stack unwinder"
70 depends on AEABI
71 - default y
72 + select ARM_UNWIND
73 help
74 This option enables stack unwinding support in the kernel
75 using the information automatically generated by the
76 compiler. The resulting kernel image is slightly bigger but
77 the performance is not affected. Currently, this feature
78 - only works with EABI compilers. If unsure say Y.
79 + only works with EABI compilers.
80 +
81 +endchoice
82 +
83 +config ARM_UNWIND
84 + bool
85 +
86 +config FRAME_POINTER
87 + bool
88
89 config OLD_MCOUNT
90 bool
91 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
92 index e03225e707b26..d92c7758efad1 100644
93 --- a/arch/x86/kvm/paging_tmpl.h
94 +++ b/arch/x86/kvm/paging_tmpl.h
95 @@ -100,8 +100,8 @@ struct guest_walker {
96 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
97 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
98 bool pte_writable[PT_MAX_FULL_LEVELS];
99 - unsigned pt_access;
100 - unsigned pte_access;
101 + unsigned int pt_access[PT_MAX_FULL_LEVELS];
102 + unsigned int pte_access;
103 gfn_t gfn;
104 struct x86_exception fault;
105 };
106 @@ -285,9 +285,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
107 pt_element_t pte;
108 pt_element_t __user *uninitialized_var(ptep_user);
109 gfn_t table_gfn;
110 - unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
111 + u64 pt_access, pte_access;
112 + unsigned index, accessed_dirty, pte_pkey;
113 gpa_t pte_gpa;
114 int offset;
115 + u64 walk_nx_mask = 0;
116 const int write_fault = access & PFERR_WRITE_MASK;
117 const int user_fault = access & PFERR_USER_MASK;
118 const int fetch_fault = access & PFERR_FETCH_MASK;
119 @@ -301,6 +303,7 @@ retry_walk:
120 pte = mmu->get_cr3(vcpu);
121
122 #if PTTYPE == 64
123 + walk_nx_mask = 1ULL << PT64_NX_SHIFT;
124 if (walker->level == PT32E_ROOT_LEVEL) {
125 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
126 trace_kvm_mmu_paging_element(pte, walker->level);
127 @@ -312,15 +315,14 @@ retry_walk:
128 walker->max_level = walker->level;
129 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
130
131 - accessed_dirty = PT_GUEST_ACCESSED_MASK;
132 - pt_access = pte_access = ACC_ALL;
133 + pte_access = ~0;
134 ++walker->level;
135
136 do {
137 gfn_t real_gfn;
138 unsigned long host_addr;
139
140 - pt_access &= pte_access;
141 + pt_access = pte_access;
142 --walker->level;
143
144 index = PT_INDEX(addr, walker->level);
145 @@ -363,6 +365,12 @@ retry_walk:
146
147 trace_kvm_mmu_paging_element(pte, walker->level);
148
149 + /*
150 + * Inverting the NX it lets us AND it like other
151 + * permission bits.
152 + */
153 + pte_access = pt_access & (pte ^ walk_nx_mask);
154 +
155 if (unlikely(!FNAME(is_present_gpte)(pte)))
156 goto error;
157
158 @@ -371,14 +379,18 @@ retry_walk:
159 goto error;
160 }
161
162 - accessed_dirty &= pte;
163 - pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
164 -
165 walker->ptes[walker->level - 1] = pte;
166 +
167 + /* Convert to ACC_*_MASK flags for struct guest_walker. */
168 + walker->pt_access[walker->level - 1] = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
169 } while (!is_last_gpte(mmu, walker->level, pte));
170
171 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
172 - errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
173 + accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
174 +
175 + /* Convert to ACC_*_MASK flags for struct guest_walker. */
176 + walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
177 + errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
178 if (unlikely(errcode))
179 goto error;
180
181 @@ -395,7 +407,7 @@ retry_walk:
182 walker->gfn = real_gpa >> PAGE_SHIFT;
183
184 if (!write_fault)
185 - FNAME(protect_clean_gpte)(&pte_access, pte);
186 + FNAME(protect_clean_gpte)(&walker->pte_access, pte);
187 else
188 /*
189 * On a write fault, fold the dirty bit into accessed_dirty.
190 @@ -413,10 +425,9 @@ retry_walk:
191 goto retry_walk;
192 }
193
194 - walker->pt_access = pt_access;
195 - walker->pte_access = pte_access;
196 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
197 - __func__, (u64)pte, pte_access, pt_access);
198 + __func__, (u64)pte, walker->pte_access,
199 + walker->pt_access[walker->level - 1]);
200 return 1;
201
202 error:
203 @@ -444,7 +455,7 @@ error:
204 */
205 if (!(errcode & PFERR_RSVD_MASK)) {
206 vcpu->arch.exit_qualification &= 0x187;
207 - vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
208 + vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
209 }
210 #endif
211 walker->fault.address = addr;
212 @@ -578,7 +589,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
213 {
214 struct kvm_mmu_page *sp = NULL;
215 struct kvm_shadow_walk_iterator it;
216 - unsigned direct_access, access = gw->pt_access;
217 + unsigned int direct_access, access;
218 int top_level, ret;
219 gfn_t gfn, base_gfn;
220
221 @@ -610,6 +621,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
222 sp = NULL;
223 if (!is_shadow_present_pte(*it.sptep)) {
224 table_gfn = gw->table_gfn[it.level - 2];
225 + access = gw->pt_access[it.level - 2];
226 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
227 false, access);
228 }
229 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
230 index c4f155663ca9a..14cd0a742e794 100644
231 --- a/drivers/gpu/drm/i915/i915_drv.h
232 +++ b/drivers/gpu/drm/i915/i915_drv.h
233 @@ -1763,6 +1763,8 @@ struct drm_i915_private {
234
235 struct intel_uncore uncore;
236
237 + struct mutex tlb_invalidate_lock;
238 +
239 struct i915_virtual_gpu vgpu;
240
241 struct intel_gvt gvt;
242 @@ -2211,7 +2213,8 @@ struct drm_i915_gem_object {
243 * rendering and so a non-zero seqno), and is not set if it i s on
244 * inactive (ready to be unbound) list.
245 */
246 -#define I915_BO_ACTIVE_SHIFT 0
247 +#define I915_BO_WAS_BOUND_BIT 0
248 +#define I915_BO_ACTIVE_SHIFT 1
249 #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
250 #define __I915_BO_ACTIVE(bo) \
251 ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
252 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
253 index 3fb4f9acacba0..9265ac5774c25 100644
254 --- a/drivers/gpu/drm/i915/i915_gem.c
255 +++ b/drivers/gpu/drm/i915/i915_gem.c
256 @@ -2185,6 +2185,67 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
257 kfree(obj->pages);
258 }
259
260 +static int
261 +__intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
262 + i915_reg_t reg,
263 + const u32 mask,
264 + const u32 value,
265 + const unsigned int timeout_us,
266 + const unsigned int timeout_ms)
267 +{
268 +#define done ((I915_READ_FW(reg) & mask) == value)
269 + int ret = wait_for_us(done, timeout_us);
270 + if (ret)
271 + ret = wait_for(done, timeout_ms);
272 + return ret;
273 +#undef done
274 +}
275 +
276 +static void invalidate_tlbs(struct drm_i915_private *dev_priv)
277 +{
278 + static const i915_reg_t gen8_regs[] = {
279 + [RCS] = GEN8_RTCR,
280 + [VCS] = GEN8_M1TCR,
281 + [VCS2] = GEN8_M2TCR,
282 + [VECS] = GEN8_VTCR,
283 + [BCS] = GEN8_BTCR,
284 + };
285 + struct intel_engine_cs *engine;
286 +
287 + if (INTEL_GEN(dev_priv) < 8)
288 + return;
289 +
290 + assert_rpm_wakelock_held(dev_priv);
291 +
292 + mutex_lock(&dev_priv->tlb_invalidate_lock);
293 + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
294 +
295 + for_each_engine(engine, dev_priv) {
296 + /*
297 + * HW architecture suggest typical invalidation time at 40us,
298 + * with pessimistic cases up to 100us and a recommendation to
299 + * cap at 1ms. We go a bit higher just in case.
300 + */
301 + const unsigned int timeout_us = 100;
302 + const unsigned int timeout_ms = 4;
303 + const enum intel_engine_id id = engine->id;
304 +
305 + if (WARN_ON_ONCE(id >= ARRAY_SIZE(gen8_regs) ||
306 + !i915_mmio_reg_offset(gen8_regs[id])))
307 + continue;
308 +
309 + I915_WRITE_FW(gen8_regs[id], 1);
310 + if (__intel_wait_for_register_fw(dev_priv,
311 + gen8_regs[id], 1, 0,
312 + timeout_us, timeout_ms))
313 + DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
314 + engine->name, timeout_ms);
315 + }
316 +
317 + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
318 + mutex_unlock(&dev_priv->tlb_invalidate_lock);
319 +}
320 +
321 int
322 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
323 {
324 @@ -2215,6 +2276,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
325 obj->mapping = NULL;
326 }
327
328 + if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
329 + struct drm_i915_private *i915 = to_i915(obj->base.dev);
330 +
331 + if (intel_runtime_pm_get_if_in_use(i915)) {
332 + invalidate_tlbs(i915);
333 + intel_runtime_pm_put(i915);
334 + }
335 + }
336 +
337 ops->put_pages(obj);
338 obj->pages = NULL;
339
340 @@ -4627,6 +4697,8 @@ i915_gem_load_init(struct drm_device *dev)
341
342 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
343
344 + mutex_init(&dev_priv->tlb_invalidate_lock);
345 +
346 spin_lock_init(&dev_priv->fb_tracking.lock);
347 }
348
349 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
350 index 16f56f14f4d06..edaff73b7aa9d 100644
351 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
352 +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
353 @@ -3685,6 +3685,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
354 return ret;
355
356 vma->flags |= bind_flags;
357 +
358 + if (vma->obj)
359 + set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
360 +
361 return 0;
362 }
363
364 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
365 index 5468e69bf520a..1ff1e33df2c71 100644
366 --- a/drivers/gpu/drm/i915/i915_reg.h
367 +++ b/drivers/gpu/drm/i915/i915_reg.h
368 @@ -1698,6 +1698,12 @@ enum skl_disp_power_wells {
369 #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
370 #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
371
372 +#define GEN8_RTCR _MMIO(0x4260)
373 +#define GEN8_M1TCR _MMIO(0x4264)
374 +#define GEN8_M2TCR _MMIO(0x4268)
375 +#define GEN8_BTCR _MMIO(0x426c)
376 +#define GEN8_VTCR _MMIO(0x4270)
377 +
378 #if 0
379 #define PRB0_TAIL _MMIO(0x2030)
380 #define PRB0_HEAD _MMIO(0x2034)
381 diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
382 index 280b5ffea5922..3a373711f5ad9 100644
383 --- a/drivers/media/firewire/firedtv-avc.c
384 +++ b/drivers/media/firewire/firedtv-avc.c
385 @@ -1169,7 +1169,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
386 read_pos += program_info_length;
387 write_pos += program_info_length;
388 }
389 - while (read_pos < length) {
390 + while (read_pos + 4 < length) {
391 + if (write_pos + 4 >= sizeof(c->operand) - 4) {
392 + ret = -EINVAL;
393 + goto out;
394 + }
395 c->operand[write_pos++] = msg[read_pos++];
396 c->operand[write_pos++] = msg[read_pos++];
397 c->operand[write_pos++] = msg[read_pos++];
398 @@ -1181,13 +1185,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
399 c->operand[write_pos++] = es_info_length >> 8;
400 c->operand[write_pos++] = es_info_length & 0xff;
401 if (es_info_length > 0) {
402 + if (read_pos >= length) {
403 + ret = -EINVAL;
404 + goto out;
405 + }
406 pmt_cmd_id = msg[read_pos++];
407 if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
408 dev_err(fdtv->device, "invalid pmt_cmd_id %d "
409 "at stream level\n", pmt_cmd_id);
410
411 - if (es_info_length > sizeof(c->operand) - 4 -
412 - write_pos) {
413 + if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
414 + es_info_length > length - read_pos) {
415 ret = -EINVAL;
416 goto out;
417 }
418 diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
419 index edbb30fdd9d95..93fb4b7312afc 100644
420 --- a/drivers/media/firewire/firedtv-ci.c
421 +++ b/drivers/media/firewire/firedtv-ci.c
422 @@ -138,6 +138,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
423 } else {
424 data_length = msg->msg[3];
425 }
426 + if (data_length > sizeof(msg->msg) - data_pos)
427 + return -EINVAL;
428
429 return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
430 }
431 diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
432 index e3596855a7031..a27865b94416b 100644
433 --- a/drivers/staging/android/ion/ion-ioctl.c
434 +++ b/drivers/staging/android/ion/ion-ioctl.c
435 @@ -30,6 +30,69 @@ union ion_ioctl_arg {
436 struct ion_heap_query query;
437 };
438
439 +/* Must hold the client lock */
440 +static void user_ion_handle_get(struct ion_handle *handle)
441 +{
442 + if (handle->user_ref_count++ == 0)
443 + kref_get(&handle->ref);
444 +}
445 +
446 +/* Must hold the client lock */
447 +static struct ion_handle *user_ion_handle_get_check_overflow(
448 + struct ion_handle *handle)
449 +{
450 + if (handle->user_ref_count + 1 == 0)
451 + return ERR_PTR(-EOVERFLOW);
452 + user_ion_handle_get(handle);
453 + return handle;
454 +}
455 +
456 +/* passes a kref to the user ref count.
457 + * We know we're holding a kref to the object before and
458 + * after this call, so no need to reverify handle.
459 + */
460 +static struct ion_handle *pass_to_user(struct ion_handle *handle)
461 +{
462 + struct ion_client *client = handle->client;
463 + struct ion_handle *ret;
464 +
465 + mutex_lock(&client->lock);
466 + ret = user_ion_handle_get_check_overflow(handle);
467 + ion_handle_put_nolock(handle);
468 + mutex_unlock(&client->lock);
469 + return ret;
470 +}
471 +
472 +/* Must hold the client lock */
473 +static int user_ion_handle_put_nolock(struct ion_handle *handle)
474 +{
475 + int ret;
476 +
477 + if (--handle->user_ref_count == 0)
478 + ret = ion_handle_put_nolock(handle);
479 +
480 + return ret;
481 +}
482 +
483 +static void user_ion_free_nolock(struct ion_client *client,
484 + struct ion_handle *handle)
485 +{
486 + bool valid_handle;
487 +
488 + WARN_ON(client != handle->client);
489 +
490 + valid_handle = ion_handle_validate(client, handle);
491 + if (!valid_handle) {
492 + WARN(1, "%s: invalid handle passed to free.\n", __func__);
493 + return;
494 + }
495 + if (handle->user_ref_count == 0) {
496 + WARN(1, "%s: User does not have access!\n", __func__);
497 + return;
498 + }
499 + user_ion_handle_put_nolock(handle);
500 +}
501 +
502 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
503 {
504 int ret = 0;
505 @@ -96,16 +159,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
506 {
507 struct ion_handle *handle;
508
509 - handle = ion_alloc(client, data.allocation.len,
510 - data.allocation.align,
511 - data.allocation.heap_id_mask,
512 - data.allocation.flags);
513 + handle = __ion_alloc(client, data.allocation.len,
514 + data.allocation.align,
515 + data.allocation.heap_id_mask,
516 + data.allocation.flags, true);
517 if (IS_ERR(handle))
518 return PTR_ERR(handle);
519 -
520 data.allocation.handle = handle->id;
521 -
522 cleanup_handle = handle;
523 + pass_to_user(handle);
524 break;
525 }
526 case ION_IOC_FREE:
527 @@ -118,7 +180,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
528 mutex_unlock(&client->lock);
529 return PTR_ERR(handle);
530 }
531 - ion_free_nolock(client, handle);
532 + user_ion_free_nolock(client, handle);
533 ion_handle_put_nolock(handle);
534 mutex_unlock(&client->lock);
535 break;
536 @@ -146,10 +208,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
537 struct ion_handle *handle;
538
539 handle = ion_import_dma_buf_fd(client, data.fd.fd);
540 - if (IS_ERR(handle))
541 + if (IS_ERR(handle)) {
542 ret = PTR_ERR(handle);
543 - else
544 + } else {
545 data.handle.handle = handle->id;
546 + handle = pass_to_user(handle);
547 + if (IS_ERR(handle)) {
548 + ret = PTR_ERR(handle);
549 + data.handle.handle = 0;
550 + }
551 + }
552 break;
553 }
554 case ION_IOC_SYNC:
555 @@ -174,10 +242,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
556
557 if (dir & _IOC_READ) {
558 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
559 - if (cleanup_handle)
560 - ion_free(client, cleanup_handle);
561 + if (cleanup_handle) {
562 + mutex_lock(&client->lock);
563 + user_ion_free_nolock(client, cleanup_handle);
564 + ion_handle_put_nolock(cleanup_handle);
565 + mutex_unlock(&client->lock);
566 + }
567 return -EFAULT;
568 }
569 }
570 + if (cleanup_handle)
571 + ion_handle_put(cleanup_handle);
572 return ret;
573 }
574 diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
575 index aac9b38b8c25c..b272f2ab87e8f 100644
576 --- a/drivers/staging/android/ion/ion.c
577 +++ b/drivers/staging/android/ion/ion.c
578 @@ -363,8 +363,8 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
579 return ERR_PTR(-EINVAL);
580 }
581
582 -static bool ion_handle_validate(struct ion_client *client,
583 - struct ion_handle *handle)
584 +bool ion_handle_validate(struct ion_client *client,
585 + struct ion_handle *handle)
586 {
587 WARN_ON(!mutex_is_locked(&client->lock));
588 return idr_find(&client->idr, handle->id) == handle;
589 @@ -401,9 +401,9 @@ static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
590 return 0;
591 }
592
593 -struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
594 - size_t align, unsigned int heap_id_mask,
595 - unsigned int flags)
596 +struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
597 + size_t align, unsigned int heap_id_mask,
598 + unsigned int flags, bool grab_handle)
599 {
600 struct ion_handle *handle;
601 struct ion_device *dev = client->dev;
602 @@ -453,6 +453,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
603 return handle;
604
605 mutex_lock(&client->lock);
606 + if (grab_handle)
607 + ion_handle_get(handle);
608 ret = ion_handle_add(client, handle);
609 mutex_unlock(&client->lock);
610 if (ret) {
611 @@ -462,6 +464,13 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
612
613 return handle;
614 }
615 +
616 +struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
617 + size_t align, unsigned int heap_id_mask,
618 + unsigned int flags)
619 +{
620 + return __ion_alloc(client, len, align, heap_id_mask, flags, false);
621 +}
622 EXPORT_SYMBOL(ion_alloc);
623
624 void ion_free_nolock(struct ion_client *client,
625 diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
626 index 93dafb4586e43..cfa50dfb46edc 100644
627 --- a/drivers/staging/android/ion/ion.h
628 +++ b/drivers/staging/android/ion/ion.h
629 @@ -109,6 +109,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
630 size_t align, unsigned int heap_id_mask,
631 unsigned int flags);
632
633 +struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
634 + size_t align, unsigned int heap_id_mask,
635 + unsigned int flags, bool grab_handle);
636 +
637 /**
638 * ion_free - free a handle
639 * @client: the client
640 diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
641 index 760e41885448a..e1dd25eab1dbd 100644
642 --- a/drivers/staging/android/ion/ion_priv.h
643 +++ b/drivers/staging/android/ion/ion_priv.h
644 @@ -149,6 +149,7 @@ struct ion_client {
645 */
646 struct ion_handle {
647 struct kref ref;
648 + unsigned int user_ref_count;
649 struct ion_client *client;
650 struct ion_buffer *buffer;
651 struct rb_node node;
652 @@ -459,6 +460,9 @@ int ion_sync_for_device(struct ion_client *client, int fd);
653 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
654 int id);
655
656 +bool ion_handle_validate(struct ion_client *client,
657 + struct ion_handle *handle);
658 +
659 void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
660
661 int ion_handle_put_nolock(struct ion_handle *handle);
662 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
663 index 3ee60c5332179..2fb4633897084 100644
664 --- a/fs/nfs/nfs4client.c
665 +++ b/fs/nfs/nfs4client.c
666 @@ -177,8 +177,11 @@ void nfs40_shutdown_client(struct nfs_client *clp)
667
668 struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
669 {
670 - int err;
671 + char buf[INET6_ADDRSTRLEN + 1];
672 + const char *ip_addr = cl_init->ip_addr;
673 struct nfs_client *clp = nfs_alloc_client(cl_init);
674 + int err;
675 +
676 if (IS_ERR(clp))
677 return clp;
678
679 @@ -202,6 +205,44 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
680 #if IS_ENABLED(CONFIG_NFS_V4_1)
681 init_waitqueue_head(&clp->cl_lock_waitq);
682 #endif
683 +
684 + if (cl_init->minorversion != 0)
685 + __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
686 + __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
687 + __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
688 +
689 + /*
690 + * Set up the connection to the server before we add add to the
691 + * global list.
692 + */
693 + err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
694 + if (err == -EINVAL)
695 + err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
696 + if (err < 0)
697 + goto error;
698 +
699 + /* If no clientaddr= option was specified, find a usable cb address */
700 + if (ip_addr == NULL) {
701 + struct sockaddr_storage cb_addr;
702 + struct sockaddr *sap = (struct sockaddr *)&cb_addr;
703 +
704 + err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
705 + if (err < 0)
706 + goto error;
707 + err = rpc_ntop(sap, buf, sizeof(buf));
708 + if (err < 0)
709 + goto error;
710 + ip_addr = (const char *)buf;
711 + }
712 + strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
713 +
714 + err = nfs_idmap_new(clp);
715 + if (err < 0) {
716 + dprintk("%s: failed to create idmapper. Error = %d\n",
717 + __func__, err);
718 + goto error;
719 + }
720 + __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
721 return clp;
722
723 error:
724 @@ -354,8 +395,6 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
725 struct nfs_client *nfs4_init_client(struct nfs_client *clp,
726 const struct nfs_client_initdata *cl_init)
727 {
728 - char buf[INET6_ADDRSTRLEN + 1];
729 - const char *ip_addr = cl_init->ip_addr;
730 struct nfs_client *old;
731 int error;
732
733 @@ -365,43 +404,6 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
734 return clp;
735 }
736
737 - /* Check NFS protocol revision and initialize RPC op vector */
738 - clp->rpc_ops = &nfs_v4_clientops;
739 -
740 - if (clp->cl_minorversion != 0)
741 - __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
742 - __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
743 - __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
744 -
745 - error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
746 - if (error == -EINVAL)
747 - error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
748 - if (error < 0)
749 - goto error;
750 -
751 - /* If no clientaddr= option was specified, find a usable cb address */
752 - if (ip_addr == NULL) {
753 - struct sockaddr_storage cb_addr;
754 - struct sockaddr *sap = (struct sockaddr *)&cb_addr;
755 -
756 - error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
757 - if (error < 0)
758 - goto error;
759 - error = rpc_ntop(sap, buf, sizeof(buf));
760 - if (error < 0)
761 - goto error;
762 - ip_addr = (const char *)buf;
763 - }
764 - strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
765 -
766 - error = nfs_idmap_new(clp);
767 - if (error < 0) {
768 - dprintk("%s: failed to create idmapper. Error = %d\n",
769 - __func__, error);
770 - goto error;
771 - }
772 - __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
773 -
774 error = nfs4_init_client_minor_version(clp);
775 if (error < 0)
776 goto error;
777 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
778 index bc5ff3a53d4a6..e7addfcd302f4 100644
779 --- a/lib/Kconfig.debug
780 +++ b/lib/Kconfig.debug
781 @@ -1091,7 +1091,7 @@ config LOCKDEP
782 bool
783 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
784 select STACKTRACE
785 - select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
786 + select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !SCORE
787 select KALLSYMS
788 select KALLSYMS_ALL
789
790 @@ -1670,7 +1670,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
791 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
792 depends on !X86_64
793 select STACKTRACE
794 - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
795 + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !SCORE
796 help
797 Provide stacktrace filter for fault-injection capabilities
798
799 @@ -1679,7 +1679,7 @@ config LATENCYTOP
800 depends on DEBUG_KERNEL
801 depends on STACKTRACE_SUPPORT
802 depends on PROC_FS
803 - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
804 + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC
805 select KALLSYMS
806 select KALLSYMS_ALL
807 select STACKTRACE