Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0240-5.4.141-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 47522 byte(s)
-sync kernel patches
1 diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt
2 index ec072c6bc03f8..da1ac6a6398f6 100644
3 --- a/Documentation/virt/kvm/mmu.txt
4 +++ b/Documentation/virt/kvm/mmu.txt
5 @@ -152,8 +152,8 @@ Shadow pages contain the following information:
6 shadow pages) so role.quadrant takes values in the range 0..3. Each
7 quadrant maps 1GB virtual address space.
8 role.access:
9 - Inherited guest access permissions in the form uwx. Note execute
10 - permission is positive, not negative.
11 + Inherited guest access permissions from the parent ptes in the form uwx.
12 + Note execute permission is positive, not negative.
13 role.invalid:
14 The page is invalid and should not be used. It is a root page that is
15 currently pinned (by a cpu hardware register pointing to it); once it is
16 diff --git a/Makefile b/Makefile
17 index 1cb8f72d4dcea..2bfa11d0aab36 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,7 +1,7 @@
21 # SPDX-License-Identifier: GPL-2.0
22 VERSION = 5
23 PATCHLEVEL = 4
24 -SUBLEVEL = 140
25 +SUBLEVEL = 141
26 EXTRAVERSION =
27 NAME = Kleptomaniac Octopus
28
29 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
30 index a20fc1ba607f3..d4a8ad6c6a4bb 100644
31 --- a/arch/x86/kvm/paging_tmpl.h
32 +++ b/arch/x86/kvm/paging_tmpl.h
33 @@ -90,8 +90,8 @@ struct guest_walker {
34 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
35 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
36 bool pte_writable[PT_MAX_FULL_LEVELS];
37 - unsigned pt_access;
38 - unsigned pte_access;
39 + unsigned int pt_access[PT_MAX_FULL_LEVELS];
40 + unsigned int pte_access;
41 gfn_t gfn;
42 struct x86_exception fault;
43 };
44 @@ -406,13 +406,15 @@ retry_walk:
45 }
46
47 walker->ptes[walker->level - 1] = pte;
48 +
49 + /* Convert to ACC_*_MASK flags for struct guest_walker. */
50 + walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
51 } while (!is_last_gpte(mmu, walker->level, pte));
52
53 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
54 accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
55
56 /* Convert to ACC_*_MASK flags for struct guest_walker. */
57 - walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
58 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
59 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
60 if (unlikely(errcode))
61 @@ -451,7 +453,8 @@ retry_walk:
62 }
63
64 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
65 - __func__, (u64)pte, walker->pte_access, walker->pt_access);
66 + __func__, (u64)pte, walker->pte_access,
67 + walker->pt_access[walker->level - 1]);
68 return 1;
69
70 error:
71 @@ -620,7 +623,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
72 {
73 struct kvm_mmu_page *sp = NULL;
74 struct kvm_shadow_walk_iterator it;
75 - unsigned direct_access, access = gw->pt_access;
76 + unsigned int direct_access, access;
77 int top_level, ret;
78 gfn_t gfn, base_gfn;
79
80 @@ -652,6 +655,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
81 sp = NULL;
82 if (!is_shadow_present_pte(*it.sptep)) {
83 table_gfn = gw->table_gfn[it.level - 2];
84 + access = gw->pt_access[it.level - 2];
85 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
86 false, access);
87 }
88 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
89 index 7341d22ed04f1..2a958dcc80f21 100644
90 --- a/arch/x86/kvm/svm.c
91 +++ b/arch/x86/kvm/svm.c
92 @@ -1783,7 +1783,7 @@ static void __sev_asid_free(int asid)
93
94 for_each_possible_cpu(cpu) {
95 sd = per_cpu(svm_data, cpu);
96 - sd->sev_vmcbs[pos] = NULL;
97 + sd->sev_vmcbs[asid] = NULL;
98 }
99 }
100
101 diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
102 index 3d6a6306cec77..639dc8d45e603 100644
103 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c
104 +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
105 @@ -635,10 +635,8 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
106 * If the last buffer was dequeued from the capture queue,
107 * return immediately. DQBUF will return -EPIPE.
108 */
109 - if (dst_q->last_buffer_dequeued) {
110 - spin_unlock_irqrestore(&dst_q->done_lock, flags);
111 - return EPOLLIN | EPOLLRDNORM;
112 - }
113 + if (dst_q->last_buffer_dequeued)
114 + rc |= EPOLLIN | EPOLLRDNORM;
115 }
116 spin_unlock_irqrestore(&dst_q->done_lock, flags);
117
118 diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
119 index 0de52e70abcca..53dbf3e28f1ef 100644
120 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
121 +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
122 @@ -1191,9 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
123 }
124
125 dev_info(dev,
126 - "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
127 - (unsigned int __force)ndev->mem_start,
128 - (unsigned int __force)lp->base_addr, ndev->irq);
129 + "Xilinx EmacLite at 0x%08X mapped to 0x%p, irq=%d\n",
130 + (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq);
131 return 0;
132
133 error:
134 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
135 index 61824bbb55887..b7e2b4a0f3c66 100644
136 --- a/drivers/net/ppp/ppp_generic.c
137 +++ b/drivers/net/ppp/ppp_generic.c
138 @@ -283,7 +283,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
139 static int ppp_connect_channel(struct channel *pch, int unit);
140 static int ppp_disconnect_channel(struct channel *pch);
141 static void ppp_destroy_channel(struct channel *pch);
142 -static int unit_get(struct idr *p, void *ptr);
143 +static int unit_get(struct idr *p, void *ptr, int min);
144 static int unit_set(struct idr *p, void *ptr, int n);
145 static void unit_put(struct idr *p, int n);
146 static void *unit_find(struct idr *p, int n);
147 @@ -959,9 +959,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
148 mutex_lock(&pn->all_ppp_mutex);
149
150 if (unit < 0) {
151 - ret = unit_get(&pn->units_idr, ppp);
152 + ret = unit_get(&pn->units_idr, ppp, 0);
153 if (ret < 0)
154 goto err;
155 + if (!ifname_is_set) {
156 + while (1) {
157 + snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
158 + if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
159 + break;
160 + unit_put(&pn->units_idr, ret);
161 + ret = unit_get(&pn->units_idr, ppp, ret + 1);
162 + if (ret < 0)
163 + goto err;
164 + }
165 + }
166 } else {
167 /* Caller asked for a specific unit number. Fail with -EEXIST
168 * if unavailable. For backward compatibility, return -EEXIST
169 @@ -3294,9 +3305,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
170 }
171
172 /* get new free unit number and associate pointer with it */
173 -static int unit_get(struct idr *p, void *ptr)
174 +static int unit_get(struct idr *p, void *ptr, int min)
175 {
176 - return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
177 + return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
178 }
179
180 /* put unit number back to a pool */
181 diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
182 index 4b5069f88d786..3a54455d9ddff 100644
183 --- a/drivers/tee/optee/call.c
184 +++ b/drivers/tee/optee/call.c
185 @@ -181,7 +181,7 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
186 struct optee_msg_arg *ma;
187
188 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
189 - TEE_SHM_MAPPED);
190 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
191 if (IS_ERR(shm))
192 return shm;
193
194 diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
195 index 432dd38921dd9..4bb4c8f28cbd7 100644
196 --- a/drivers/tee/optee/core.c
197 +++ b/drivers/tee/optee/core.c
198 @@ -254,7 +254,8 @@ static void optee_release(struct tee_context *ctx)
199 if (!ctxdata)
200 return;
201
202 - shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
203 + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
204 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
205 if (!IS_ERR(shm)) {
206 arg = tee_shm_get_va(shm, 0);
207 /*
208 diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
209 index b4ade54d1f280..aecf62016e7b8 100644
210 --- a/drivers/tee/optee/rpc.c
211 +++ b/drivers/tee/optee/rpc.c
212 @@ -220,7 +220,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
213 shm = cmd_alloc_suppl(ctx, sz);
214 break;
215 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
216 - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
217 + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
218 break;
219 default:
220 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
221 @@ -405,7 +405,8 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
222
223 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
224 case OPTEE_SMC_RPC_FUNC_ALLOC:
225 - shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
226 + shm = tee_shm_alloc(ctx, param->a1,
227 + TEE_SHM_MAPPED | TEE_SHM_PRIV);
228 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
229 reg_pair_from_64(&param->a1, &param->a2, pa);
230 reg_pair_from_64(&param->a4, &param->a5,
231 diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
232 index da06ce9b9313e..c41a9a501a6e9 100644
233 --- a/drivers/tee/optee/shm_pool.c
234 +++ b/drivers/tee/optee/shm_pool.c
235 @@ -27,7 +27,11 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
236 shm->paddr = page_to_phys(page);
237 shm->size = PAGE_SIZE << order;
238
239 - if (shm->flags & TEE_SHM_DMA_BUF) {
240 + /*
241 + * Shared memory private to the OP-TEE driver doesn't need
242 + * to be registered with OP-TEE.
243 + */
244 + if (!(shm->flags & TEE_SHM_PRIV)) {
245 unsigned int nr_pages = 1 << order, i;
246 struct page **pages;
247
248 @@ -60,7 +64,7 @@ err:
249 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
250 struct tee_shm *shm)
251 {
252 - if (shm->flags & TEE_SHM_DMA_BUF)
253 + if (!(shm->flags & TEE_SHM_PRIV))
254 optee_shm_unregister(shm->ctx, shm);
255
256 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
257 diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
258 index 1b4b4a1ba91d9..d6491e973fa4c 100644
259 --- a/drivers/tee/tee_shm.c
260 +++ b/drivers/tee/tee_shm.c
261 @@ -117,7 +117,7 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
262 return ERR_PTR(-EINVAL);
263 }
264
265 - if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
266 + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
267 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
268 return ERR_PTR(-EINVAL);
269 }
270 @@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
271 */
272 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
273 {
274 - return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
275 + return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
276 }
277 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
278
279 diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
280 index 03b444f753aa2..4f28122f1bb83 100644
281 --- a/drivers/usb/dwc3/ep0.c
282 +++ b/drivers/usb/dwc3/ep0.c
283 @@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
284 int ret;
285
286 spin_lock_irqsave(&dwc->lock, flags);
287 - if (!dep->endpoint.desc) {
288 + if (!dep->endpoint.desc || !dwc->pullups_connected) {
289 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
290 dep->name);
291 ret = -ESHUTDOWN;
292 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
293 index 9cf66636b19d5..8a3752fcf7b46 100644
294 --- a/drivers/usb/dwc3/gadget.c
295 +++ b/drivers/usb/dwc3/gadget.c
296 @@ -746,8 +746,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
297
298 trace_dwc3_gadget_ep_disable(dep);
299
300 - dwc3_remove_requests(dwc, dep);
301 -
302 /* make sure HW endpoint isn't stalled */
303 if (dep->flags & DWC3_EP_STALL)
304 __dwc3_gadget_ep_set_halt(dep, 0, false);
305 @@ -756,16 +754,18 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
306 reg &= ~DWC3_DALEPENA_EP(dep->number);
307 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
308
309 - dep->stream_capable = false;
310 - dep->type = 0;
311 - dep->flags = 0;
312 -
313 /* Clear out the ep descriptors for non-ep0 */
314 if (dep->number > 1) {
315 dep->endpoint.comp_desc = NULL;
316 dep->endpoint.desc = NULL;
317 }
318
319 + dwc3_remove_requests(dwc, dep);
320 +
321 + dep->stream_capable = false;
322 + dep->type = 0;
323 + dep->flags = 0;
324 +
325 return 0;
326 }
327
328 @@ -1511,7 +1511,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
329 {
330 struct dwc3 *dwc = dep->dwc;
331
332 - if (!dep->endpoint.desc) {
333 + if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
334 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
335 dep->name);
336 return -ESHUTDOWN;
337 @@ -1931,6 +1931,21 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
338 return 0;
339 }
340
341 +static void dwc3_stop_active_transfers(struct dwc3 *dwc)
342 +{
343 + u32 epnum;
344 +
345 + for (epnum = 2; epnum < dwc->num_eps; epnum++) {
346 + struct dwc3_ep *dep;
347 +
348 + dep = dwc->eps[epnum];
349 + if (!dep)
350 + continue;
351 +
352 + dwc3_remove_requests(dwc, dep);
353 + }
354 +}
355 +
356 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
357 {
358 u32 reg;
359 @@ -1976,6 +1991,10 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
360 return 0;
361 }
362
363 +static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
364 +static void __dwc3_gadget_stop(struct dwc3 *dwc);
365 +static int __dwc3_gadget_start(struct dwc3 *dwc);
366 +
367 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
368 {
369 struct dwc3 *dwc = gadget_to_dwc(g);
370 @@ -1999,9 +2018,73 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
371 }
372 }
373
374 + /*
375 + * Avoid issuing a runtime resume if the device is already in the
376 + * suspended state during gadget disconnect. DWC3 gadget was already
377 + * halted/stopped during runtime suspend.
378 + */
379 + if (!is_on) {
380 + pm_runtime_barrier(dwc->dev);
381 + if (pm_runtime_suspended(dwc->dev))
382 + return 0;
383 + }
384 +
385 + /*
386 + * Check the return value for successful resume, or error. For a
387 + * successful resume, the DWC3 runtime PM resume routine will handle
388 + * the run stop sequence, so avoid duplicate operations here.
389 + */
390 + ret = pm_runtime_get_sync(dwc->dev);
391 + if (!ret || ret < 0) {
392 + pm_runtime_put(dwc->dev);
393 + return 0;
394 + }
395 +
396 + /*
397 + * Synchronize and disable any further event handling while controller
398 + * is being enabled/disabled.
399 + */
400 + disable_irq(dwc->irq_gadget);
401 +
402 spin_lock_irqsave(&dwc->lock, flags);
403 +
404 + if (!is_on) {
405 + u32 count;
406 +
407 + dwc->connected = false;
408 + /*
409 + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
410 + * Section 4.1.8 Table 4-7, it states that for a device-initiated
411 + * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
412 + * command for any active transfers" before clearing the RunStop
413 + * bit.
414 + */
415 + dwc3_stop_active_transfers(dwc);
416 + __dwc3_gadget_stop(dwc);
417 +
418 + /*
419 + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
420 + * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
421 + * "software needs to acknowledge the events that are generated
422 + * (by writing to GEVNTCOUNTn) while it is waiting for this bit
423 + * to be set to '1'."
424 + */
425 + count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
426 + count &= DWC3_GEVNTCOUNT_MASK;
427 + if (count > 0) {
428 + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
429 + dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
430 + dwc->ev_buf->length;
431 + }
432 + } else {
433 + __dwc3_gadget_start(dwc);
434 + }
435 +
436 ret = dwc3_gadget_run_stop(dwc, is_on, false);
437 spin_unlock_irqrestore(&dwc->lock, flags);
438 + enable_irq(dwc->irq_gadget);
439 +
440 + pm_runtime_put(dwc->dev);
441
442 return ret;
443 }
444 @@ -2174,10 +2257,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
445 }
446
447 dwc->gadget_driver = driver;
448 -
449 - if (pm_runtime_active(dwc->dev))
450 - __dwc3_gadget_start(dwc);
451 -
452 spin_unlock_irqrestore(&dwc->lock, flags);
453
454 return 0;
455 @@ -2203,13 +2282,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
456 unsigned long flags;
457
458 spin_lock_irqsave(&dwc->lock, flags);
459 -
460 - if (pm_runtime_suspended(dwc->dev))
461 - goto out;
462 -
463 - __dwc3_gadget_stop(dwc);
464 -
465 -out:
466 dwc->gadget_driver = NULL;
467 spin_unlock_irqrestore(&dwc->lock, flags);
468
469 @@ -2995,8 +3067,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
470 {
471 u32 reg;
472
473 - dwc->connected = true;
474 -
475 /*
476 * Ideally, dwc3_reset_gadget() would trigger the function
477 * drivers to stop any active transfers through ep disable.
478 @@ -3038,6 +3108,14 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
479 }
480
481 dwc3_reset_gadget(dwc);
482 + /*
483 + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
484 + * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
485 + * needs to ensure that it sends "a DEPENDXFER command for any active
486 + * transfers."
487 + */
488 + dwc3_stop_active_transfers(dwc);
489 + dwc->connected = true;
490
491 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
492 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
493 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
494 index 66713c2537653..774ccaa5aceea 100644
495 --- a/drivers/usb/host/ehci-pci.c
496 +++ b/drivers/usb/host/ehci-pci.c
497 @@ -298,6 +298,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
498 if (pdev->vendor == PCI_VENDOR_ID_STMICRO
499 && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
500 ; /* ConneXT has no sbrn register */
501 + else if (pdev->vendor == PCI_VENDOR_ID_HUAWEI
502 + && pdev->device == 0xa239)
503 + ; /* HUAWEI Kunpeng920 USB EHCI has no sbrn register */
504 else
505 pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
506
507 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
508 index 7960359dbc700..cd77c0621a555 100644
509 --- a/fs/btrfs/ctree.h
510 +++ b/fs/btrfs/ctree.h
511 @@ -504,11 +504,6 @@ enum {
512 * (device replace, resize, device add/delete, balance)
513 */
514 BTRFS_FS_EXCL_OP,
515 - /*
516 - * To info transaction_kthread we need an immediate commit so it
517 - * doesn't need to wait for commit_interval
518 - */
519 - BTRFS_FS_NEED_ASYNC_COMMIT,
520 /*
521 * Indicate that balance has been set up from the ioctl and is in the
522 * main phase. The fs_info::balance_ctl is initialized.
523 @@ -832,7 +827,10 @@ struct btrfs_fs_info {
524 */
525 struct ulist *qgroup_ulist;
526
527 - /* protect user change for quota operations */
528 + /*
529 + * Protect user change for quota operations. If a transaction is needed,
530 + * it must be started before locking this lock.
531 + */
532 struct mutex qgroup_ioctl_lock;
533
534 /* list of dirty qgroups to be written at next commit */
535 @@ -945,6 +943,8 @@ enum {
536 BTRFS_ROOT_DEAD_TREE,
537 /* The root has a log tree. Used only for subvolume roots. */
538 BTRFS_ROOT_HAS_LOG_TREE,
539 + /* Qgroup flushing is in progress */
540 + BTRFS_ROOT_QGROUP_FLUSHING,
541 };
542
543 /*
544 @@ -1097,6 +1097,7 @@ struct btrfs_root {
545 spinlock_t qgroup_meta_rsv_lock;
546 u64 qgroup_meta_rsv_pertrans;
547 u64 qgroup_meta_rsv_prealloc;
548 + wait_queue_head_t qgroup_flush_wait;
549
550 /* Number of active swapfiles */
551 atomic_t nr_swapfiles;
552 diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
553 index db9f2c58eb4af..f4f531c4aa960 100644
554 --- a/fs/btrfs/delalloc-space.c
555 +++ b/fs/btrfs/delalloc-space.c
556 @@ -151,7 +151,7 @@ int btrfs_check_data_free_space(struct inode *inode,
557 return ret;
558
559 /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
560 - ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
561 + ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), reserved, start, len);
562 if (ret < 0)
563 btrfs_free_reserved_data_space_noquota(inode, start, len);
564 else
565 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
566 index 3dccbbe4a6585..e96890475bac7 100644
567 --- a/fs/btrfs/delayed-inode.c
568 +++ b/fs/btrfs/delayed-inode.c
569 @@ -627,7 +627,8 @@ static int btrfs_delayed_inode_reserve_metadata(
570 */
571 if (!src_rsv || (!trans->bytes_reserved &&
572 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
573 - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
574 + ret = btrfs_qgroup_reserve_meta(root, num_bytes,
575 + BTRFS_QGROUP_RSV_META_PREALLOC, true);
576 if (ret < 0)
577 return ret;
578 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
579 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
580 index 1d28333bb798c..dacd67dca43fe 100644
581 --- a/fs/btrfs/disk-io.c
582 +++ b/fs/btrfs/disk-io.c
583 @@ -1154,6 +1154,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
584 mutex_init(&root->log_mutex);
585 mutex_init(&root->ordered_extent_mutex);
586 mutex_init(&root->delalloc_mutex);
587 + init_waitqueue_head(&root->qgroup_flush_wait);
588 init_waitqueue_head(&root->log_writer_wait);
589 init_waitqueue_head(&root->log_commit_wait[0]);
590 init_waitqueue_head(&root->log_commit_wait[1]);
591 @@ -1747,8 +1748,7 @@ static int transaction_kthread(void *arg)
592 }
593
594 now = ktime_get_seconds();
595 - if (cur->state < TRANS_STATE_BLOCKED &&
596 - !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
597 + if (cur->state < TRANS_STATE_COMMIT_START &&
598 (now < cur->start_time ||
599 now - cur->start_time < fs_info->commit_interval)) {
600 spin_unlock(&fs_info->trans_lock);
601 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
602 index f6308a7b761db..400b0717b9d44 100644
603 --- a/fs/btrfs/file.c
604 +++ b/fs/btrfs/file.c
605 @@ -3149,7 +3149,7 @@ reserve_space:
606 &cached_state);
607 if (ret)
608 goto out;
609 - ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
610 + ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
611 alloc_start, bytes_to_reserve);
612 if (ret) {
613 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
614 @@ -3322,8 +3322,9 @@ static long btrfs_fallocate(struct file *file, int mode,
615 free_extent_map(em);
616 break;
617 }
618 - ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
619 - cur_offset, last_byte - cur_offset);
620 + ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
621 + &data_reserved, cur_offset,
622 + last_byte - cur_offset);
623 if (ret < 0) {
624 cur_offset = last_byte;
625 free_extent_map(em);
626 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
627 index 8959d011aafa8..b044b1d910dec 100644
628 --- a/fs/btrfs/inode.c
629 +++ b/fs/btrfs/inode.c
630 @@ -6375,7 +6375,7 @@ static int btrfs_dirty_inode(struct inode *inode)
631 return PTR_ERR(trans);
632
633 ret = btrfs_update_inode(trans, root, inode);
634 - if (ret && ret == -ENOSPC) {
635 + if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
636 /* whoops, lets try again with the full transaction */
637 btrfs_end_transaction(trans);
638 trans = btrfs_start_transaction(root, 1);
639 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
640 index 837bd5e29c8a0..bb034e19a2a8a 100644
641 --- a/fs/btrfs/qgroup.c
642 +++ b/fs/btrfs/qgroup.c
643 @@ -11,7 +11,6 @@
644 #include <linux/slab.h>
645 #include <linux/workqueue.h>
646 #include <linux/btrfs.h>
647 -#include <linux/sizes.h>
648
649 #include "ctree.h"
650 #include "transaction.h"
651 @@ -887,6 +886,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
652 struct btrfs_key found_key;
653 struct btrfs_qgroup *qgroup = NULL;
654 struct btrfs_trans_handle *trans = NULL;
655 + struct ulist *ulist = NULL;
656 int ret = 0;
657 int slot;
658
659 @@ -894,12 +894,27 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
660 if (fs_info->quota_root)
661 goto out;
662
663 - fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
664 - if (!fs_info->qgroup_ulist) {
665 + ulist = ulist_alloc(GFP_KERNEL);
666 + if (!ulist) {
667 ret = -ENOMEM;
668 goto out;
669 }
670
671 + /*
672 + * Unlock qgroup_ioctl_lock before starting the transaction. This is to
673 + * avoid lock acquisition inversion problems (reported by lockdep) between
674 + * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
675 + * start a transaction.
676 + * After we started the transaction lock qgroup_ioctl_lock again and
677 + * check if someone else created the quota root in the meanwhile. If so,
678 + * just return success and release the transaction handle.
679 + *
680 + * Also we don't need to worry about someone else calling
681 + * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
682 + * that function returns 0 (success) when the sysfs entries already exist.
683 + */
684 + mutex_unlock(&fs_info->qgroup_ioctl_lock);
685 +
686 /*
687 * 1 for quota root item
688 * 1 for BTRFS_QGROUP_STATUS item
689 @@ -909,12 +924,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
690 * would be a lot of overkill.
691 */
692 trans = btrfs_start_transaction(tree_root, 2);
693 +
694 + mutex_lock(&fs_info->qgroup_ioctl_lock);
695 if (IS_ERR(trans)) {
696 ret = PTR_ERR(trans);
697 trans = NULL;
698 goto out;
699 }
700
701 + if (fs_info->quota_root)
702 + goto out;
703 +
704 + fs_info->qgroup_ulist = ulist;
705 + ulist = NULL;
706 +
707 /*
708 * initially create the quota tree
709 */
710 @@ -1047,10 +1070,13 @@ out:
711 if (ret) {
712 ulist_free(fs_info->qgroup_ulist);
713 fs_info->qgroup_ulist = NULL;
714 - if (trans)
715 - btrfs_end_transaction(trans);
716 }
717 mutex_unlock(&fs_info->qgroup_ioctl_lock);
718 + if (ret && trans)
719 + btrfs_end_transaction(trans);
720 + else if (trans)
721 + ret = btrfs_end_transaction(trans);
722 + ulist_free(ulist);
723 return ret;
724 }
725
726 @@ -1063,19 +1089,29 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
727 mutex_lock(&fs_info->qgroup_ioctl_lock);
728 if (!fs_info->quota_root)
729 goto out;
730 + mutex_unlock(&fs_info->qgroup_ioctl_lock);
731
732 /*
733 * 1 For the root item
734 *
735 * We should also reserve enough items for the quota tree deletion in
736 * btrfs_clean_quota_tree but this is not done.
737 + *
738 + * Also, we must always start a transaction without holding the mutex
739 + * qgroup_ioctl_lock, see btrfs_quota_enable().
740 */
741 trans = btrfs_start_transaction(fs_info->tree_root, 1);
742 +
743 + mutex_lock(&fs_info->qgroup_ioctl_lock);
744 if (IS_ERR(trans)) {
745 ret = PTR_ERR(trans);
746 + trans = NULL;
747 goto out;
748 }
749
750 + if (!fs_info->quota_root)
751 + goto out;
752 +
753 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
754 btrfs_qgroup_wait_for_completion(fs_info, false);
755 spin_lock(&fs_info->qgroup_lock);
756 @@ -1089,13 +1125,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
757 ret = btrfs_clean_quota_tree(trans, quota_root);
758 if (ret) {
759 btrfs_abort_transaction(trans, ret);
760 - goto end_trans;
761 + goto out;
762 }
763
764 ret = btrfs_del_root(trans, &quota_root->root_key);
765 if (ret) {
766 btrfs_abort_transaction(trans, ret);
767 - goto end_trans;
768 + goto out;
769 }
770
771 list_del(&quota_root->dirty_list);
772 @@ -1109,10 +1145,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
773 free_extent_buffer(quota_root->commit_root);
774 kfree(quota_root);
775
776 -end_trans:
777 - ret = btrfs_end_transaction(trans);
778 out:
779 mutex_unlock(&fs_info->qgroup_ioctl_lock);
780 + if (ret && trans)
781 + btrfs_end_transaction(trans);
782 + else if (trans)
783 + ret = btrfs_end_transaction(trans);
784 +
785 return ret;
786 }
787
788 @@ -2840,20 +2879,8 @@ out:
789 return ret;
790 }
791
792 -/*
793 - * Two limits to commit transaction in advance.
794 - *
795 - * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
796 - * For SIZE, it will be in byte unit as threshold.
797 - */
798 -#define QGROUP_FREE_RATIO 32
799 -#define QGROUP_FREE_SIZE SZ_32M
800 -static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
801 - const struct btrfs_qgroup *qg, u64 num_bytes)
802 +static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
803 {
804 - u64 free;
805 - u64 threshold;
806 -
807 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
808 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
809 return false;
810 @@ -2862,32 +2889,6 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
811 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
812 return false;
813
814 - /*
815 - * Even if we passed the check, it's better to check if reservation
816 - * for meta_pertrans is pushing us near limit.
817 - * If there is too much pertrans reservation or it's near the limit,
818 - * let's try commit transaction to free some, using transaction_kthread
819 - */
820 - if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
821 - BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
822 - if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
823 - free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
824 - threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
825 - QGROUP_FREE_SIZE);
826 - } else {
827 - free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
828 - threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
829 - QGROUP_FREE_SIZE);
830 - }
831 -
832 - /*
833 - * Use transaction_kthread to commit transaction, so we no
834 - * longer need to bother nested transaction nor lock context.
835 - */
836 - if (free < threshold)
837 - btrfs_commit_transaction_locksafe(fs_info);
838 - }
839 -
840 return true;
841 }
842
843 @@ -2937,7 +2938,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
844
845 qg = unode_aux_to_qgroup(unode);
846
847 - if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
848 + if (enforce && !qgroup_check_limits(qg, num_bytes)) {
849 ret = -EDQUOT;
850 goto out;
851 }
852 @@ -3411,28 +3412,150 @@ btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
853 }
854 }
855
856 +#define rbtree_iterate_from_safe(node, next, start) \
857 + for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
858 +
859 +static int qgroup_unreserve_range(struct btrfs_inode *inode,
860 + struct extent_changeset *reserved, u64 start,
861 + u64 len)
862 +{
863 + struct rb_node *node;
864 + struct rb_node *next;
865 + struct ulist_node *entry = NULL;
866 + int ret = 0;
867 +
868 + node = reserved->range_changed.root.rb_node;
869 + while (node) {
870 + entry = rb_entry(node, struct ulist_node, rb_node);
871 + if (entry->val < start)
872 + node = node->rb_right;
873 + else if (entry)
874 + node = node->rb_left;
875 + else
876 + break;
877 + }
878 +
879 + /* Empty changeset */
880 + if (!entry)
881 + return 0;
882 +
883 + if (entry->val > start && rb_prev(&entry->rb_node))
884 + entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
885 + rb_node);
886 +
887 + rbtree_iterate_from_safe(node, next, &entry->rb_node) {
888 + u64 entry_start;
889 + u64 entry_end;
890 + u64 entry_len;
891 + int clear_ret;
892 +
893 + entry = rb_entry(node, struct ulist_node, rb_node);
894 + entry_start = entry->val;
895 + entry_end = entry->aux;
896 + entry_len = entry_end - entry_start + 1;
897 +
898 + if (entry_start >= start + len)
899 + break;
900 + if (entry_start + entry_len <= start)
901 + continue;
902 + /*
903 + * Now the entry is in [start, start + len), revert the
904 + * EXTENT_QGROUP_RESERVED bit.
905 + */
906 + clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
907 + entry_end, EXTENT_QGROUP_RESERVED);
908 + if (!ret && clear_ret < 0)
909 + ret = clear_ret;
910 +
911 + ulist_del(&reserved->range_changed, entry->val, entry->aux);
912 + if (likely(reserved->bytes_changed >= entry_len)) {
913 + reserved->bytes_changed -= entry_len;
914 + } else {
915 + WARN_ON(1);
916 + reserved->bytes_changed = 0;
917 + }
918 + }
919 +
920 + return ret;
921 +}
922 +
923 /*
924 - * Reserve qgroup space for range [start, start + len).
925 + * Try to free some space for qgroup.
926 *
927 - * This function will either reserve space from related qgroups or doing
928 - * nothing if the range is already reserved.
929 + * For qgroup, there are only 3 ways to free qgroup space:
930 + * - Flush nodatacow write
931 + * Any nodatacow write will free its reserved data space at run_delalloc_range().
932 + * In theory, we should only flush nodatacow inodes, but it's not yet
933 + * possible, so we need to flush the whole root.
934 *
935 - * Return 0 for successful reserve
936 - * Return <0 for error (including -EQUOT)
937 + * - Wait for ordered extents
938 + * When ordered extents are finished, their reserved metadata is finally
939 + * converted to per_trans status, which can be freed by later commit
940 + * transaction.
941 *
942 - * NOTE: this function may sleep for memory allocation.
943 - * if btrfs_qgroup_reserve_data() is called multiple times with
944 - * same @reserved, caller must ensure when error happens it's OK
945 - * to free *ALL* reserved space.
946 + * - Commit transaction
947 + * This would free the meta_per_trans space.
948 + * In theory this shouldn't provide much space, but any more qgroup space
949 + * is needed.
950 */
951 -int btrfs_qgroup_reserve_data(struct inode *inode,
952 +static int try_flush_qgroup(struct btrfs_root *root)
953 +{
954 + struct btrfs_trans_handle *trans;
955 + int ret;
956 + bool can_commit = true;
957 +
958 + /*
959 + * We don't want to run flush again and again, so if there is a running
960 + * one, we won't try to start a new flush, but exit directly.
961 + */
962 + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
963 + wait_event(root->qgroup_flush_wait,
964 + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
965 + return 0;
966 + }
967 +
968 + /*
969 + * If current process holds a transaction, we shouldn't flush, as we
970 + * assume all space reservation happens before a transaction handle is
971 + * held.
972 + *
973 + * But there are cases like btrfs_delayed_item_reserve_metadata() where
974 + * we try to reserve space with one transction handle already held.
975 + * In that case we can't commit transaction, but at least try to end it
976 + * and hope the started data writes can free some space.
977 + */
978 + if (current->journal_info &&
979 + current->journal_info != BTRFS_SEND_TRANS_STUB)
980 + can_commit = false;
981 +
982 + ret = btrfs_start_delalloc_snapshot(root);
983 + if (ret < 0)
984 + goto out;
985 + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
986 +
987 + trans = btrfs_join_transaction(root);
988 + if (IS_ERR(trans)) {
989 + ret = PTR_ERR(trans);
990 + goto out;
991 + }
992 +
993 + if (can_commit)
994 + ret = btrfs_commit_transaction(trans);
995 + else
996 + ret = btrfs_end_transaction(trans);
997 +out:
998 + clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
999 + wake_up(&root->qgroup_flush_wait);
1000 + return ret;
1001 +}
1002 +
1003 +static int qgroup_reserve_data(struct btrfs_inode *inode,
1004 struct extent_changeset **reserved_ret, u64 start,
1005 u64 len)
1006 {
1007 - struct btrfs_root *root = BTRFS_I(inode)->root;
1008 - struct ulist_node *unode;
1009 - struct ulist_iterator uiter;
1010 + struct btrfs_root *root = inode->root;
1011 struct extent_changeset *reserved;
1012 + bool new_reserved = false;
1013 u64 orig_reserved;
1014 u64 to_reserve;
1015 int ret;
1016 @@ -3445,6 +3568,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
1017 if (WARN_ON(!reserved_ret))
1018 return -EINVAL;
1019 if (!*reserved_ret) {
1020 + new_reserved = true;
1021 *reserved_ret = extent_changeset_alloc();
1022 if (!*reserved_ret)
1023 return -ENOMEM;
1024 @@ -3452,15 +3576,15 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
1025 reserved = *reserved_ret;
1026 /* Record already reserved space */
1027 orig_reserved = reserved->bytes_changed;
1028 - ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
1029 + ret = set_record_extent_bits(&inode->io_tree, start,
1030 start + len -1, EXTENT_QGROUP_RESERVED, reserved);
1031
1032 /* Newly reserved space */
1033 to_reserve = reserved->bytes_changed - orig_reserved;
1034 - trace_btrfs_qgroup_reserve_data(inode, start, len,
1035 + trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
1036 to_reserve, QGROUP_RESERVE);
1037 if (ret < 0)
1038 - goto cleanup;
1039 + goto out;
1040 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
1041 if (ret < 0)
1042 goto cleanup;
1043 @@ -3468,23 +3592,49 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
1044 return ret;
1045
1046 cleanup:
1047 - /* cleanup *ALL* already reserved ranges */
1048 - ULIST_ITER_INIT(&uiter);
1049 - while ((unode = ulist_next(&reserved->range_changed, &uiter)))
1050 - clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
1051 - unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
1052 - /* Also free data bytes of already reserved one */
1053 - btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
1054 - orig_reserved, BTRFS_QGROUP_RSV_DATA);
1055 - extent_changeset_release(reserved);
1056 + qgroup_unreserve_range(inode, reserved, start, len);
1057 +out:
1058 + if (new_reserved) {
1059 + extent_changeset_release(reserved);
1060 + kfree(reserved);
1061 + *reserved_ret = NULL;
1062 + }
1063 return ret;
1064 }
1065
1066 +/*
1067 + * Reserve qgroup space for range [start, start + len).
1068 + *
1069 + * This function will either reserve space from related qgroups or do nothing
1070 + * if the range is already reserved.
1071 + *
1072 + * Return 0 for successful reservation
1073 + * Return <0 for error (including -EQUOT)
1074 + *
1075 + * NOTE: This function may sleep for memory allocation, dirty page flushing and
1076 + * commit transaction. So caller should not hold any dirty page locked.
1077 + */
1078 +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
1079 + struct extent_changeset **reserved_ret, u64 start,
1080 + u64 len)
1081 +{
1082 + int ret;
1083 +
1084 + ret = qgroup_reserve_data(inode, reserved_ret, start, len);
1085 + if (ret <= 0 && ret != -EDQUOT)
1086 + return ret;
1087 +
1088 + ret = try_flush_qgroup(inode->root);
1089 + if (ret < 0)
1090 + return ret;
1091 + return qgroup_reserve_data(inode, reserved_ret, start, len);
1092 +}
1093 +
1094 /* Free ranges specified by @reserved, normally in error path */
1095 -static int qgroup_free_reserved_data(struct inode *inode,
1096 +static int qgroup_free_reserved_data(struct btrfs_inode *inode,
1097 struct extent_changeset *reserved, u64 start, u64 len)
1098 {
1099 - struct btrfs_root *root = BTRFS_I(inode)->root;
1100 + struct btrfs_root *root = inode->root;
1101 struct ulist_node *unode;
1102 struct ulist_iterator uiter;
1103 struct extent_changeset changeset;
1104 @@ -3520,8 +3670,8 @@ static int qgroup_free_reserved_data(struct inode *inode,
1105 * EXTENT_QGROUP_RESERVED, we won't double free.
1106 * So not need to rush.
1107 */
1108 - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
1109 - free_start, free_start + free_len - 1,
1110 + ret = clear_record_extent_bits(&inode->io_tree, free_start,
1111 + free_start + free_len - 1,
1112 EXTENT_QGROUP_RESERVED, &changeset);
1113 if (ret < 0)
1114 goto out;
1115 @@ -3550,7 +3700,8 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
1116 /* In release case, we shouldn't have @reserved */
1117 WARN_ON(!free && reserved);
1118 if (free && reserved)
1119 - return qgroup_free_reserved_data(inode, reserved, start, len);
1120 + return qgroup_free_reserved_data(BTRFS_I(inode), reserved,
1121 + start, len);
1122 extent_changeset_init(&changeset);
1123 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
1124 start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
1125 @@ -3649,8 +3800,8 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
1126 return num_bytes;
1127 }
1128
1129 -int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1130 - enum btrfs_qgroup_rsv_type type, bool enforce)
1131 +int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1132 + enum btrfs_qgroup_rsv_type type, bool enforce)
1133 {
1134 struct btrfs_fs_info *fs_info = root->fs_info;
1135 int ret;
1136 @@ -3676,6 +3827,21 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1137 return ret;
1138 }
1139
1140 +int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1141 + enum btrfs_qgroup_rsv_type type, bool enforce)
1142 +{
1143 + int ret;
1144 +
1145 + ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
1146 + if (ret <= 0 && ret != -EDQUOT)
1147 + return ret;
1148 +
1149 + ret = try_flush_qgroup(root);
1150 + if (ret < 0)
1151 + return ret;
1152 + return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
1153 +}
1154 +
1155 void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
1156 {
1157 struct btrfs_fs_info *fs_info = root->fs_info;
1158 diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
1159 index b0420c4f5d0ef..0a2659685ad65 100644
1160 --- a/fs/btrfs/qgroup.h
1161 +++ b/fs/btrfs/qgroup.h
1162 @@ -344,12 +344,13 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
1163 #endif
1164
1165 /* New io_tree based accurate qgroup reserve API */
1166 -int btrfs_qgroup_reserve_data(struct inode *inode,
1167 +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
1168 struct extent_changeset **reserved, u64 start, u64 len);
1169 int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len);
1170 int btrfs_qgroup_free_data(struct inode *inode,
1171 struct extent_changeset *reserved, u64 start, u64 len);
1172 -
1173 +int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1174 + enum btrfs_qgroup_rsv_type type, bool enforce);
1175 int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
1176 enum btrfs_qgroup_rsv_type type, bool enforce);
1177 /* Reserve metadata space for pertrans and prealloc type */
1178 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1179 index d8e4e0bf3fc2d..e6cb95b81787f 100644
1180 --- a/fs/btrfs/transaction.c
1181 +++ b/fs/btrfs/transaction.c
1182 @@ -27,7 +27,6 @@
1183
1184 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
1185 [TRANS_STATE_RUNNING] = 0U,
1186 - [TRANS_STATE_BLOCKED] = __TRANS_START,
1187 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
1188 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
1189 __TRANS_ATTACH |
1190 @@ -388,7 +387,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
1191
1192 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
1193 {
1194 - return (trans->state >= TRANS_STATE_BLOCKED &&
1195 + return (trans->state >= TRANS_STATE_COMMIT_START &&
1196 trans->state < TRANS_STATE_UNBLOCKED &&
1197 !TRANS_ABORTED(trans));
1198 }
1199 @@ -580,7 +579,7 @@ again:
1200 INIT_LIST_HEAD(&h->new_bgs);
1201
1202 smp_mb();
1203 - if (cur_trans->state >= TRANS_STATE_BLOCKED &&
1204 + if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
1205 may_wait_transaction(fs_info, type)) {
1206 current->journal_info = h;
1207 btrfs_commit_transaction(h);
1208 @@ -797,7 +796,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
1209 struct btrfs_transaction *cur_trans = trans->transaction;
1210
1211 smp_mb();
1212 - if (cur_trans->state >= TRANS_STATE_BLOCKED ||
1213 + if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
1214 cur_trans->delayed_refs.flushing)
1215 return 1;
1216
1217 @@ -830,7 +829,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1218 {
1219 struct btrfs_fs_info *info = trans->fs_info;
1220 struct btrfs_transaction *cur_trans = trans->transaction;
1221 - int lock = (trans->type != TRANS_JOIN_NOLOCK);
1222 int err = 0;
1223
1224 if (refcount_read(&trans->use_count) > 1) {
1225 @@ -846,13 +844,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1226
1227 btrfs_trans_release_chunk_metadata(trans);
1228
1229 - if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
1230 - if (throttle)
1231 - return btrfs_commit_transaction(trans);
1232 - else
1233 - wake_up_process(info->transaction_kthread);
1234 - }
1235 -
1236 if (trans->type & __TRANS_FREEZABLE)
1237 sb_end_intwrite(info->sb);
1238
1239 @@ -2306,7 +2297,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1240 */
1241 cur_trans->state = TRANS_STATE_COMPLETED;
1242 wake_up(&cur_trans->commit_wait);
1243 - clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
1244
1245 spin_lock(&fs_info->trans_lock);
1246 list_del_init(&cur_trans->list);
1247 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
1248 index 7291a2a930751..d8a7d460e436a 100644
1249 --- a/fs/btrfs/transaction.h
1250 +++ b/fs/btrfs/transaction.h
1251 @@ -13,7 +13,6 @@
1252
1253 enum btrfs_trans_state {
1254 TRANS_STATE_RUNNING,
1255 - TRANS_STATE_BLOCKED,
1256 TRANS_STATE_COMMIT_START,
1257 TRANS_STATE_COMMIT_DOING,
1258 TRANS_STATE_UNBLOCKED,
1259 @@ -208,20 +207,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
1260 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
1261 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1262 int wait_for_unblock);
1263 -
1264 -/*
1265 - * Try to commit transaction asynchronously, so this is safe to call
1266 - * even holding a spinlock.
1267 - *
1268 - * It's done by informing transaction_kthread to commit transaction without
1269 - * waiting for commit interval.
1270 - */
1271 -static inline void btrfs_commit_transaction_locksafe(
1272 - struct btrfs_fs_info *fs_info)
1273 -{
1274 - set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
1275 - wake_up_process(fs_info->transaction_kthread);
1276 -}
1277 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
1278 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
1279 void btrfs_throttle(struct btrfs_fs_info *fs_info);
1280 diff --git a/fs/namespace.c b/fs/namespace.c
1281 index 76ea92994d26d..a092611d89e77 100644
1282 --- a/fs/namespace.c
1283 +++ b/fs/namespace.c
1284 @@ -1861,6 +1861,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
1285 namespace_unlock();
1286 }
1287
1288 +static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1289 +{
1290 + struct mount *child;
1291 +
1292 + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1293 + if (!is_subdir(child->mnt_mountpoint, dentry))
1294 + continue;
1295 +
1296 + if (child->mnt.mnt_flags & MNT_LOCKED)
1297 + return true;
1298 + }
1299 + return false;
1300 +}
1301 +
1302 /**
1303 * clone_private_mount - create a private clone of a path
1304 *
1305 @@ -1875,14 +1889,27 @@ struct vfsmount *clone_private_mount(const struct path *path)
1306 struct mount *old_mnt = real_mount(path->mnt);
1307 struct mount *new_mnt;
1308
1309 + down_read(&namespace_sem);
1310 if (IS_MNT_UNBINDABLE(old_mnt))
1311 - return ERR_PTR(-EINVAL);
1312 + goto invalid;
1313 +
1314 + if (!check_mnt(old_mnt))
1315 + goto invalid;
1316 +
1317 + if (has_locked_children(old_mnt, path->dentry))
1318 + goto invalid;
1319
1320 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1321 + up_read(&namespace_sem);
1322 +
1323 if (IS_ERR(new_mnt))
1324 return ERR_CAST(new_mnt);
1325
1326 return &new_mnt->mnt;
1327 +
1328 +invalid:
1329 + up_read(&namespace_sem);
1330 + return ERR_PTR(-EINVAL);
1331 }
1332 EXPORT_SYMBOL_GPL(clone_private_mount);
1333
1334 @@ -2234,19 +2261,6 @@ static int do_change_type(struct path *path, int ms_flags)
1335 return err;
1336 }
1337
1338 -static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1339 -{
1340 - struct mount *child;
1341 - list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1342 - if (!is_subdir(child->mnt_mountpoint, dentry))
1343 - continue;
1344 -
1345 - if (child->mnt.mnt_flags & MNT_LOCKED)
1346 - return true;
1347 - }
1348 - return false;
1349 -}
1350 -
1351 static struct mount *__do_loopback(struct path *old_path, int recurse)
1352 {
1353 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
1354 diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
1355 index 91677f2fa2e8b..cd15c1b7fae06 100644
1356 --- a/include/linux/tee_drv.h
1357 +++ b/include/linux/tee_drv.h
1358 @@ -26,6 +26,7 @@
1359 #define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
1360 #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
1361 #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
1362 +#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */
1363
1364 struct device;
1365 struct tee_device;
1366 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
1367 index db2b10c718ba2..e40712abe089e 100644
1368 --- a/kernel/trace/trace_events_hist.c
1369 +++ b/kernel/trace/trace_events_hist.c
1370 @@ -66,7 +66,8 @@
1371 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
1372 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
1373 C(VAR_NOT_FOUND, "Couldn't find variable"), \
1374 - C(FIELD_NOT_FOUND, "Couldn't find field"),
1375 + C(FIELD_NOT_FOUND, "Couldn't find field"), \
1376 + C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
1377
1378 #undef C
1379 #define C(a, b) HIST_ERR_##a
1380 @@ -3038,6 +3039,13 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
1381 ret = PTR_ERR(operand1);
1382 goto free;
1383 }
1384 + if (operand1->flags & HIST_FIELD_FL_STRING) {
1385 + /* String type can not be the operand of unary operator. */
1386 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
1387 + destroy_hist_field(operand1, 0);
1388 + ret = -EINVAL;
1389 + goto free;
1390 + }
1391
1392 expr->flags |= operand1->flags &
1393 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
1394 @@ -3139,6 +3147,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
1395 operand1 = NULL;
1396 goto free;
1397 }
1398 + if (operand1->flags & HIST_FIELD_FL_STRING) {
1399 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
1400 + ret = -EINVAL;
1401 + goto free;
1402 + }
1403
1404 /* rest of string could be another expression e.g. b+c in a+b+c */
1405 operand_flags = 0;
1406 @@ -3148,6 +3161,11 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
1407 operand2 = NULL;
1408 goto free;
1409 }
1410 + if (operand2->flags & HIST_FIELD_FL_STRING) {
1411 + hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
1412 + ret = -EINVAL;
1413 + goto free;
1414 + }
1415
1416 ret = check_expr_operands(file->tr, operand1, operand2);
1417 if (ret)
1418 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1419 index a0d1561eeb532..f486e680aed1d 100644
1420 --- a/sound/pci/hda/patch_realtek.c
1421 +++ b/sound/pci/hda/patch_realtek.c
1422 @@ -8122,6 +8122,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1423 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
1424 SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
1425 SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
1426 + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
1427 SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
1428 SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
1429 SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),