Contents of /trunk/kernel-alx/patches-5.4/0150-5.4.51-all-fixes.patch
Parent Directory | Revision Log
Revision 3544 -
(show annotations)
(download)
Wed Jul 15 07:48:05 2020 UTC (4 years, 2 months ago) by niro
File size: 80373 byte(s)
Wed Jul 15 07:48:05 2020 UTC (4 years, 2 months ago) by niro
File size: 80373 byte(s)
-linux-5.4.51
1 | diff --git a/Makefile b/Makefile |
2 | index 380e398b2995..6ac83669e073 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 50 |
10 | +SUBLEVEL = 51 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c |
15 | index 342e41de9d64..6a2536460026 100644 |
16 | --- a/arch/mips/kernel/traps.c |
17 | +++ b/arch/mips/kernel/traps.c |
18 | @@ -2126,6 +2126,7 @@ static void configure_status(void) |
19 | |
20 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
21 | status_set); |
22 | + back_to_back_c0_hazard(); |
23 | } |
24 | |
25 | unsigned int hwrena; |
26 | diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c |
27 | index 156a95ac5c72..2ee68d6e8bb9 100644 |
28 | --- a/arch/mips/lantiq/xway/sysctrl.c |
29 | +++ b/arch/mips/lantiq/xway/sysctrl.c |
30 | @@ -514,8 +514,8 @@ void __init ltq_soc_init(void) |
31 | clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | |
32 | PMU_PPE_DP | PMU_PPE_TC); |
33 | clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); |
34 | - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); |
35 | - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); |
36 | + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); |
37 | + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); |
38 | clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); |
39 | clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); |
40 | clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); |
41 | @@ -538,8 +538,8 @@ void __init ltq_soc_init(void) |
42 | PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | |
43 | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | |
44 | PMU_PPE_QSB | PMU_PPE_TOP); |
45 | - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); |
46 | - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); |
47 | + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); |
48 | + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); |
49 | clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); |
50 | clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); |
51 | clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); |
52 | diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c |
53 | index 6d321f5f101d..7184d55d87aa 100644 |
54 | --- a/arch/s390/kernel/debug.c |
55 | +++ b/arch/s390/kernel/debug.c |
56 | @@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) |
57 | if (!areas) |
58 | goto fail_malloc_areas; |
59 | for (i = 0; i < nr_areas; i++) { |
60 | + /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */ |
61 | areas[i] = kmalloc_array(pages_per_area, |
62 | sizeof(debug_entry_t *), |
63 | - GFP_KERNEL); |
64 | + GFP_KERNEL | __GFP_NOWARN); |
65 | if (!areas[i]) |
66 | goto fail_malloc_areas2; |
67 | for (j = 0; j < pages_per_area; j++) { |
68 | diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c |
69 | index d8cc5223b7ce..87a34b6e06a2 100644 |
70 | --- a/arch/x86/kernel/cpu/resctrl/core.c |
71 | +++ b/arch/x86/kernel/cpu/resctrl/core.c |
72 | @@ -260,6 +260,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r) |
73 | r->num_closid = edx.split.cos_max + 1; |
74 | r->membw.max_delay = eax.split.max_delay + 1; |
75 | r->default_ctrl = MAX_MBA_BW; |
76 | + r->membw.mbm_width = MBM_CNTR_WIDTH; |
77 | if (ecx & MBA_IS_LINEAR) { |
78 | r->membw.delay_linear = true; |
79 | r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; |
80 | @@ -289,6 +290,7 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) |
81 | /* AMD does not use delay */ |
82 | r->membw.delay_linear = false; |
83 | |
84 | + r->membw.mbm_width = MBM_CNTR_WIDTH_AMD; |
85 | r->membw.min_bw = 0; |
86 | r->membw.bw_gran = 1; |
87 | /* Max value is 2048, Data width should be 4 in decimal */ |
88 | diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h |
89 | index 3dd13f3a8b23..17095435c875 100644 |
90 | --- a/arch/x86/kernel/cpu/resctrl/internal.h |
91 | +++ b/arch/x86/kernel/cpu/resctrl/internal.h |
92 | @@ -32,6 +32,7 @@ |
93 | #define CQM_LIMBOCHECK_INTERVAL 1000 |
94 | |
95 | #define MBM_CNTR_WIDTH 24 |
96 | +#define MBM_CNTR_WIDTH_AMD 44 |
97 | #define MBM_OVERFLOW_INTERVAL 1000 |
98 | #define MAX_MBA_BW 100u |
99 | #define MBA_IS_LINEAR 0x4 |
100 | @@ -368,6 +369,7 @@ struct rdt_cache { |
101 | * @min_bw: Minimum memory bandwidth percentage user can request |
102 | * @bw_gran: Granularity at which the memory bandwidth is allocated |
103 | * @delay_linear: True if memory B/W delay is in linear scale |
104 | + * @mbm_width: memory B/W monitor counter width |
105 | * @mba_sc: True if MBA software controller(mba_sc) is enabled |
106 | * @mb_map: Mapping of memory B/W percentage to memory B/W delay |
107 | */ |
108 | @@ -376,6 +378,7 @@ struct rdt_membw { |
109 | u32 min_bw; |
110 | u32 bw_gran; |
111 | u32 delay_linear; |
112 | + u32 mbm_width; |
113 | bool mba_sc; |
114 | u32 *mb_map; |
115 | }; |
116 | diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c |
117 | index 773124b0e18a..0cf4f87f6012 100644 |
118 | --- a/arch/x86/kernel/cpu/resctrl/monitor.c |
119 | +++ b/arch/x86/kernel/cpu/resctrl/monitor.c |
120 | @@ -216,8 +216,9 @@ void free_rmid(u32 rmid) |
121 | |
122 | static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) |
123 | { |
124 | - u64 shift = 64 - MBM_CNTR_WIDTH, chunks; |
125 | + u64 shift, chunks; |
126 | |
127 | + shift = 64 - rdt_resources_all[RDT_RESOURCE_MBA].membw.mbm_width; |
128 | chunks = (cur_msr << shift) - (prev_msr << shift); |
129 | return chunks >>= shift; |
130 | } |
131 | diff --git a/crypto/af_alg.c b/crypto/af_alg.c |
132 | index 3d8e53010cda..a3b9df99af6d 100644 |
133 | --- a/crypto/af_alg.c |
134 | +++ b/crypto/af_alg.c |
135 | @@ -128,21 +128,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); |
136 | void af_alg_release_parent(struct sock *sk) |
137 | { |
138 | struct alg_sock *ask = alg_sk(sk); |
139 | - unsigned int nokey = ask->nokey_refcnt; |
140 | - bool last = nokey && !ask->refcnt; |
141 | + unsigned int nokey = atomic_read(&ask->nokey_refcnt); |
142 | |
143 | sk = ask->parent; |
144 | ask = alg_sk(sk); |
145 | |
146 | - local_bh_disable(); |
147 | - bh_lock_sock(sk); |
148 | - ask->nokey_refcnt -= nokey; |
149 | - if (!last) |
150 | - last = !--ask->refcnt; |
151 | - bh_unlock_sock(sk); |
152 | - local_bh_enable(); |
153 | + if (nokey) |
154 | + atomic_dec(&ask->nokey_refcnt); |
155 | |
156 | - if (last) |
157 | + if (atomic_dec_and_test(&ask->refcnt)) |
158 | sock_put(sk); |
159 | } |
160 | EXPORT_SYMBOL_GPL(af_alg_release_parent); |
161 | @@ -187,7 +181,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
162 | |
163 | err = -EBUSY; |
164 | lock_sock(sk); |
165 | - if (ask->refcnt | ask->nokey_refcnt) |
166 | + if (atomic_read(&ask->refcnt)) |
167 | goto unlock; |
168 | |
169 | swap(ask->type, type); |
170 | @@ -236,7 +230,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, |
171 | int err = -EBUSY; |
172 | |
173 | lock_sock(sk); |
174 | - if (ask->refcnt) |
175 | + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) |
176 | goto unlock; |
177 | |
178 | type = ask->type; |
179 | @@ -301,12 +295,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) |
180 | if (err) |
181 | goto unlock; |
182 | |
183 | - if (nokey || !ask->refcnt++) |
184 | + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) |
185 | sock_hold(sk); |
186 | - ask->nokey_refcnt += nokey; |
187 | + if (nokey) { |
188 | + atomic_inc(&ask->nokey_refcnt); |
189 | + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); |
190 | + } |
191 | alg_sk(sk2)->parent = sk; |
192 | alg_sk(sk2)->type = type; |
193 | - alg_sk(sk2)->nokey_refcnt = nokey; |
194 | |
195 | newsock->ops = type->ops; |
196 | newsock->state = SS_CONNECTED; |
197 | diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c |
198 | index eb1910b6d434..0ae000a61c7f 100644 |
199 | --- a/crypto/algif_aead.c |
200 | +++ b/crypto/algif_aead.c |
201 | @@ -384,7 +384,7 @@ static int aead_check_key(struct socket *sock) |
202 | struct alg_sock *ask = alg_sk(sk); |
203 | |
204 | lock_sock(sk); |
205 | - if (ask->refcnt) |
206 | + if (!atomic_read(&ask->nokey_refcnt)) |
207 | goto unlock_child; |
208 | |
209 | psk = ask->parent; |
210 | @@ -396,11 +396,8 @@ static int aead_check_key(struct socket *sock) |
211 | if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) |
212 | goto unlock; |
213 | |
214 | - if (!pask->refcnt++) |
215 | - sock_hold(psk); |
216 | - |
217 | - ask->refcnt = 1; |
218 | - sock_put(psk); |
219 | + atomic_dec(&pask->nokey_refcnt); |
220 | + atomic_set(&ask->nokey_refcnt, 0); |
221 | |
222 | err = 0; |
223 | |
224 | diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c |
225 | index 178f4cd75ef1..8673ac8828e9 100644 |
226 | --- a/crypto/algif_hash.c |
227 | +++ b/crypto/algif_hash.c |
228 | @@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock) |
229 | struct alg_sock *ask = alg_sk(sk); |
230 | |
231 | lock_sock(sk); |
232 | - if (ask->refcnt) |
233 | + if (!atomic_read(&ask->nokey_refcnt)) |
234 | goto unlock_child; |
235 | |
236 | psk = ask->parent; |
237 | @@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock) |
238 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
239 | goto unlock; |
240 | |
241 | - if (!pask->refcnt++) |
242 | - sock_hold(psk); |
243 | - |
244 | - ask->refcnt = 1; |
245 | - sock_put(psk); |
246 | + atomic_dec(&pask->nokey_refcnt); |
247 | + atomic_set(&ask->nokey_refcnt, 0); |
248 | |
249 | err = 0; |
250 | |
251 | diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c |
252 | index 4c3bdffe0c3a..ec5567c87a6d 100644 |
253 | --- a/crypto/algif_skcipher.c |
254 | +++ b/crypto/algif_skcipher.c |
255 | @@ -211,7 +211,7 @@ static int skcipher_check_key(struct socket *sock) |
256 | struct alg_sock *ask = alg_sk(sk); |
257 | |
258 | lock_sock(sk); |
259 | - if (ask->refcnt) |
260 | + if (!atomic_read(&ask->nokey_refcnt)) |
261 | goto unlock_child; |
262 | |
263 | psk = ask->parent; |
264 | @@ -223,11 +223,8 @@ static int skcipher_check_key(struct socket *sock) |
265 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
266 | goto unlock; |
267 | |
268 | - if (!pask->refcnt++) |
269 | - sock_hold(psk); |
270 | - |
271 | - ask->refcnt = 1; |
272 | - sock_put(psk); |
273 | + atomic_dec(&pask->nokey_refcnt); |
274 | + atomic_set(&ask->nokey_refcnt, 0); |
275 | |
276 | err = 0; |
277 | |
278 | diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c |
279 | index 0cf2fe290230..c1de270046bf 100644 |
280 | --- a/drivers/block/virtio_blk.c |
281 | +++ b/drivers/block/virtio_blk.c |
282 | @@ -990,6 +990,7 @@ out_put_disk: |
283 | put_disk(vblk->disk); |
284 | out_free_vq: |
285 | vdev->config->del_vqs(vdev); |
286 | + kfree(vblk->vqs); |
287 | out_free_vblk: |
288 | kfree(vblk); |
289 | out_free_index: |
290 | diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c |
291 | index 87f449340202..1784530b8387 100644 |
292 | --- a/drivers/char/tpm/tpm-dev-common.c |
293 | +++ b/drivers/char/tpm/tpm-dev-common.c |
294 | @@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, |
295 | goto out; |
296 | } |
297 | |
298 | - /* atomic tpm command send and result receive. We only hold the ops |
299 | - * lock during this period so that the tpm can be unregistered even if |
300 | - * the char dev is held open. |
301 | - */ |
302 | - if (tpm_try_get_ops(priv->chip)) { |
303 | - ret = -EPIPE; |
304 | - goto out; |
305 | - } |
306 | - |
307 | priv->response_length = 0; |
308 | priv->response_read = false; |
309 | *off = 0; |
310 | @@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, |
311 | if (file->f_flags & O_NONBLOCK) { |
312 | priv->command_enqueued = true; |
313 | queue_work(tpm_dev_wq, &priv->async_work); |
314 | - tpm_put_ops(priv->chip); |
315 | mutex_unlock(&priv->buffer_mutex); |
316 | return size; |
317 | } |
318 | |
319 | + /* atomic tpm command send and result receive. We only hold the ops |
320 | + * lock during this period so that the tpm can be unregistered even if |
321 | + * the char dev is held open. |
322 | + */ |
323 | + if (tpm_try_get_ops(priv->chip)) { |
324 | + ret = -EPIPE; |
325 | + goto out; |
326 | + } |
327 | + |
328 | ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, |
329 | sizeof(priv->data_buffer)); |
330 | tpm_put_ops(priv->chip); |
331 | diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c |
332 | index adc88e1dc999..cf65a47310c3 100644 |
333 | --- a/drivers/dma-buf/dma-buf.c |
334 | +++ b/drivers/dma-buf/dma-buf.c |
335 | @@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) |
336 | dentry->d_name.name, ret > 0 ? name : ""); |
337 | } |
338 | |
339 | -static const struct dentry_operations dma_buf_dentry_ops = { |
340 | - .d_dname = dmabuffs_dname, |
341 | -}; |
342 | - |
343 | -static struct vfsmount *dma_buf_mnt; |
344 | - |
345 | -static int dma_buf_fs_init_context(struct fs_context *fc) |
346 | -{ |
347 | - struct pseudo_fs_context *ctx; |
348 | - |
349 | - ctx = init_pseudo(fc, DMA_BUF_MAGIC); |
350 | - if (!ctx) |
351 | - return -ENOMEM; |
352 | - ctx->dops = &dma_buf_dentry_ops; |
353 | - return 0; |
354 | -} |
355 | - |
356 | -static struct file_system_type dma_buf_fs_type = { |
357 | - .name = "dmabuf", |
358 | - .init_fs_context = dma_buf_fs_init_context, |
359 | - .kill_sb = kill_anon_super, |
360 | -}; |
361 | - |
362 | -static int dma_buf_release(struct inode *inode, struct file *file) |
363 | +static void dma_buf_release(struct dentry *dentry) |
364 | { |
365 | struct dma_buf *dmabuf; |
366 | |
367 | - if (!is_dma_buf_file(file)) |
368 | - return -EINVAL; |
369 | - |
370 | - dmabuf = file->private_data; |
371 | + dmabuf = dentry->d_fsdata; |
372 | |
373 | BUG_ON(dmabuf->vmapping_counter); |
374 | |
375 | @@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file) |
376 | module_put(dmabuf->owner); |
377 | kfree(dmabuf->name); |
378 | kfree(dmabuf); |
379 | +} |
380 | + |
381 | +static const struct dentry_operations dma_buf_dentry_ops = { |
382 | + .d_dname = dmabuffs_dname, |
383 | + .d_release = dma_buf_release, |
384 | +}; |
385 | + |
386 | +static struct vfsmount *dma_buf_mnt; |
387 | + |
388 | +static int dma_buf_fs_init_context(struct fs_context *fc) |
389 | +{ |
390 | + struct pseudo_fs_context *ctx; |
391 | + |
392 | + ctx = init_pseudo(fc, DMA_BUF_MAGIC); |
393 | + if (!ctx) |
394 | + return -ENOMEM; |
395 | + ctx->dops = &dma_buf_dentry_ops; |
396 | return 0; |
397 | } |
398 | |
399 | +static struct file_system_type dma_buf_fs_type = { |
400 | + .name = "dmabuf", |
401 | + .init_fs_context = dma_buf_fs_init_context, |
402 | + .kill_sb = kill_anon_super, |
403 | +}; |
404 | + |
405 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
406 | { |
407 | struct dma_buf *dmabuf; |
408 | @@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) |
409 | } |
410 | |
411 | static const struct file_operations dma_buf_fops = { |
412 | - .release = dma_buf_release, |
413 | .mmap = dma_buf_mmap_internal, |
414 | .llseek = dma_buf_llseek, |
415 | .poll = dma_buf_poll, |
416 | diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c |
417 | index ad7d2bce91cd..125a44d5a69e 100644 |
418 | --- a/drivers/edac/amd64_edac.c |
419 | +++ b/drivers/edac/amd64_edac.c |
420 | @@ -265,6 +265,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) |
421 | |
422 | if (pvt->model == 0x60) |
423 | amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); |
424 | + else |
425 | + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); |
426 | break; |
427 | |
428 | case 0x17: |
429 | diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig |
430 | index b248870a9806..6a6b412206ec 100644 |
431 | --- a/drivers/firmware/efi/Kconfig |
432 | +++ b/drivers/firmware/efi/Kconfig |
433 | @@ -219,3 +219,14 @@ config EFI_EARLYCON |
434 | depends on SERIAL_EARLYCON && !ARM && !IA64 |
435 | select FONT_SUPPORT |
436 | select ARCH_USE_MEMREMAP_PROT |
437 | + |
438 | +config EFI_CUSTOM_SSDT_OVERLAYS |
439 | + bool "Load custom ACPI SSDT overlay from an EFI variable" |
440 | + depends on EFI_VARS && ACPI |
441 | + default ACPI_TABLE_UPGRADE |
442 | + help |
443 | + Allow loading of an ACPI SSDT overlay from an EFI variable specified |
444 | + by a kernel command line option. |
445 | + |
446 | + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more |
447 | + information. |
448 | diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c |
449 | index e3861d267d9a..c1167ef5d2b3 100644 |
450 | --- a/drivers/firmware/efi/efi.c |
451 | +++ b/drivers/firmware/efi/efi.c |
452 | @@ -217,7 +217,7 @@ static void generic_ops_unregister(void) |
453 | efivars_unregister(&generic_efivars); |
454 | } |
455 | |
456 | -#if IS_ENABLED(CONFIG_ACPI) |
457 | +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS |
458 | #define EFIVAR_SSDT_NAME_MAX 16 |
459 | static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; |
460 | static int __init efivar_ssdt_setup(char *str) |
461 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c |
462 | index daf687428cdb..663314f807fa 100644 |
463 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c |
464 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c |
465 | @@ -150,6 +150,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) |
466 | (mode_info->atom_context->bios + data_offset); |
467 | switch (crev) { |
468 | case 11: |
469 | + case 12: |
470 | mem_channel_number = igp_info->v11.umachannelnumber; |
471 | /* channel width is 64 */ |
472 | return mem_channel_number * 64; |
473 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
474 | index d1d2372ab7ca..3f744e72912f 100644 |
475 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
476 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |
477 | @@ -2101,7 +2101,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, |
478 | if (r) |
479 | return r; |
480 | |
481 | - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); |
482 | + return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); |
483 | } |
484 | |
485 | static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, |
486 | @@ -2131,7 +2131,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, |
487 | if (r) |
488 | return r; |
489 | |
490 | - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); |
491 | + return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); |
492 | } |
493 | |
494 | static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, |
495 | diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c |
496 | index 47e7d11ca0c9..68d56a91d44b 100644 |
497 | --- a/drivers/gpu/drm/amd/display/dc/core/dc.c |
498 | +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c |
499 | @@ -2226,10 +2226,12 @@ void dc_commit_updates_for_stream(struct dc *dc, |
500 | |
501 | copy_stream_update_to_stream(dc, context, stream, stream_update); |
502 | |
503 | - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { |
504 | - DC_ERROR("Mode validation failed for stream update!\n"); |
505 | - dc_release_state(context); |
506 | - return; |
507 | + if (update_type > UPDATE_TYPE_FAST) { |
508 | + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { |
509 | + DC_ERROR("Mode validation failed for stream update!\n"); |
510 | + dc_release_state(context); |
511 | + return; |
512 | + } |
513 | } |
514 | |
515 | commit_planes_for_stream( |
516 | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |
517 | index d82ea994063f..edf7989d7a8e 100644 |
518 | --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |
519 | +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |
520 | @@ -2232,7 +2232,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, |
521 | |
522 | dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); |
523 | if (!dpu_enc) |
524 | - return ERR_PTR(ENOMEM); |
525 | + return ERR_PTR(-ENOMEM); |
526 | |
527 | rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, |
528 | drm_enc_mode, NULL); |
529 | diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
530 | index 9c3bdfd20337..63b4de81686a 100644 |
531 | --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
532 | +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c |
533 | @@ -262,9 +262,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) |
534 | struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); |
535 | unsigned long reg; |
536 | |
537 | - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, |
538 | - reg & SUN4I_HDMI_HPD_HIGH, |
539 | - 0, 500000)) { |
540 | + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); |
541 | + if (reg & SUN4I_HDMI_HPD_HIGH) { |
542 | cec_phys_addr_invalidate(hdmi->cec_adap); |
543 | return connector_status_disconnected; |
544 | } |
545 | diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c |
546 | index 4cf25458f0b9..740ac0a1b726 100644 |
547 | --- a/drivers/hwmon/acpi_power_meter.c |
548 | +++ b/drivers/hwmon/acpi_power_meter.c |
549 | @@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device) |
550 | |
551 | res = setup_attrs(resource); |
552 | if (res) |
553 | - goto exit_free; |
554 | + goto exit_free_capability; |
555 | |
556 | resource->hwmon_dev = hwmon_device_register(&device->dev); |
557 | if (IS_ERR(resource->hwmon_dev)) { |
558 | @@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device) |
559 | |
560 | exit_remove: |
561 | remove_attrs(resource); |
562 | +exit_free_capability: |
563 | + free_capabilities(resource); |
564 | exit_free: |
565 | kfree(resource); |
566 | exit: |
567 | diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c |
568 | index 743752a2467a..64122eb38060 100644 |
569 | --- a/drivers/hwmon/max6697.c |
570 | +++ b/drivers/hwmon/max6697.c |
571 | @@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = { |
572 | * Map device tree / platform data register bit map to chip bit map. |
573 | * Applies to alert register and over-temperature register. |
574 | */ |
575 | -#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ |
576 | +#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ |
577 | (((reg) & 0x01) << 6) | ((reg) & 0x80)) |
578 | +#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) |
579 | |
580 | #define MAX6697_REG_STAT(n) (0x44 + (n)) |
581 | |
582 | @@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data, |
583 | return ret; |
584 | |
585 | ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, |
586 | - MAX6697_MAP_BITS(pdata->alert_mask)); |
587 | + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); |
588 | if (ret < 0) |
589 | return ret; |
590 | |
591 | ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, |
592 | - MAX6697_MAP_BITS(pdata->over_temperature_mask)); |
593 | + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); |
594 | if (ret < 0) |
595 | return ret; |
596 | |
597 | diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c |
598 | index 5ac93f41bfec..8ea850eed18f 100644 |
599 | --- a/drivers/i2c/algos/i2c-algo-pca.c |
600 | +++ b/drivers/i2c/algos/i2c-algo-pca.c |
601 | @@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, |
602 | DEB2("BUS ERROR - SDA Stuck low\n"); |
603 | pca_reset(adap); |
604 | goto out; |
605 | - case 0x90: /* Bus error - SCL stuck low */ |
606 | + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ |
607 | + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ |
608 | DEB2("BUS ERROR - SCL Stuck low\n"); |
609 | pca_reset(adap); |
610 | goto out; |
611 | diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c |
612 | index 2fd717d8dd30..71d7bae2cbca 100644 |
613 | --- a/drivers/i2c/busses/i2c-mlxcpld.c |
614 | +++ b/drivers/i2c/busses/i2c-mlxcpld.c |
615 | @@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv) |
616 | if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { |
617 | mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, |
618 | &datalen, 1); |
619 | - if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) { |
620 | + if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { |
621 | dev_err(priv->dev, "Incorrect smbus block read message len\n"); |
622 | - return -E2BIG; |
623 | + return -EPROTO; |
624 | } |
625 | } else { |
626 | datalen = priv->xfer.data_len; |
627 | diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c |
628 | index 46dd50ff7c85..11210bf7fd61 100644 |
629 | --- a/drivers/infiniband/core/counters.c |
630 | +++ b/drivers/infiniband/core/counters.c |
631 | @@ -195,7 +195,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) |
632 | return ret; |
633 | } |
634 | |
635 | -static void counter_history_stat_update(const struct rdma_counter *counter) |
636 | +static void counter_history_stat_update(struct rdma_counter *counter) |
637 | { |
638 | struct ib_device *dev = counter->device; |
639 | struct rdma_port_counter *port_counter; |
640 | @@ -205,6 +205,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) |
641 | if (!port_counter->hstats) |
642 | return; |
643 | |
644 | + rdma_counter_query_stats(counter); |
645 | + |
646 | for (i = 0; i < counter->stats->num_counters; i++) |
647 | port_counter->hstats->value[i] += counter->stats->value[i]; |
648 | } |
649 | diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c |
650 | index 30ab623343d3..882204d1ef4f 100644 |
651 | --- a/drivers/irqchip/irq-gic.c |
652 | +++ b/drivers/irqchip/irq-gic.c |
653 | @@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) |
654 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
655 | bool force) |
656 | { |
657 | - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
658 | - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
659 | - u32 val, mask, bit; |
660 | - unsigned long flags; |
661 | + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); |
662 | + unsigned int cpu; |
663 | |
664 | if (!force) |
665 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
666 | @@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
667 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
668 | return -EINVAL; |
669 | |
670 | - gic_lock_irqsave(flags); |
671 | - mask = 0xff << shift; |
672 | - bit = gic_cpu_map[cpu] << shift; |
673 | - val = readl_relaxed(reg) & ~mask; |
674 | - writel_relaxed(val | bit, reg); |
675 | - gic_unlock_irqrestore(flags); |
676 | - |
677 | + writeb_relaxed(gic_cpu_map[cpu], reg); |
678 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
679 | |
680 | return IRQ_SET_MASK_OK_DONE; |
681 | diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c |
682 | index 03267609b515..6e4f3ef2dd50 100644 |
683 | --- a/drivers/md/dm-zoned-target.c |
684 | +++ b/drivers/md/dm-zoned-target.c |
685 | @@ -790,7 +790,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
686 | } |
687 | |
688 | /* Set target (no write same support) */ |
689 | - ti->max_io_len = dev->zone_nr_sectors << 9; |
690 | + ti->max_io_len = dev->zone_nr_sectors; |
691 | ti->num_flush_bios = 1; |
692 | ti->num_discard_bios = 1; |
693 | ti->num_write_zeroes_bios = 1; |
694 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
695 | index 7bcdce182ee5..e26ae298a080 100644 |
696 | --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
697 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c |
698 | @@ -1980,7 +1980,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, |
699 | u8 mem_type[CTXT_INGRESS + 1] = { 0 }; |
700 | struct cudbg_buffer temp_buff = { 0 }; |
701 | struct cudbg_ch_cntxt *buff; |
702 | - u64 *dst_off, *src_off; |
703 | u8 *ctx_buf; |
704 | u8 i, k; |
705 | int rc; |
706 | @@ -2049,8 +2048,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, |
707 | } |
708 | |
709 | for (j = 0; j < max_ctx_qid; j++) { |
710 | + __be64 *dst_off; |
711 | + u64 *src_off; |
712 | + |
713 | src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); |
714 | - dst_off = (u64 *)buff->data; |
715 | + dst_off = (__be64 *)buff->data; |
716 | |
717 | /* The data is stored in 64-bit cpu order. Convert it |
718 | * to big endian before parsing. |
719 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
720 | index 43b0f8c57da7..375e1be6a2d8 100644 |
721 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
722 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
723 | @@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, |
724 | unsigned int tid, bool dip, bool sip, bool dp, |
725 | bool sp) |
726 | { |
727 | + u8 *nat_lp = (u8 *)&f->fs.nat_lport; |
728 | + u8 *nat_fp = (u8 *)&f->fs.nat_fport; |
729 | + |
730 | if (dip) { |
731 | if (f->fs.type) { |
732 | set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, |
733 | @@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, |
734 | } |
735 | |
736 | set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, |
737 | - (dp ? f->fs.nat_lport : 0) | |
738 | - (sp ? f->fs.nat_fport << 16 : 0), 1); |
739 | + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | |
740 | + (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0), |
741 | + 1); |
742 | } |
743 | |
744 | /* Validate filter spec against configuration done on the card. */ |
745 | @@ -656,6 +660,9 @@ int set_filter_wr(struct adapter *adapter, int fidx) |
746 | fwr->fpm = htons(f->fs.mask.fport); |
747 | |
748 | if (adapter->params.filter2_wr_support) { |
749 | + u8 *nat_lp = (u8 *)&f->fs.nat_lport; |
750 | + u8 *nat_fp = (u8 *)&f->fs.nat_fport; |
751 | + |
752 | fwr->natmode_to_ulp_type = |
753 | FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? |
754 | ULP_MODE_TCPDDP : |
755 | @@ -663,8 +670,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) |
756 | FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); |
757 | memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); |
758 | memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); |
759 | - fwr->newlport = htons(f->fs.nat_lport); |
760 | - fwr->newfport = htons(f->fs.nat_fport); |
761 | + fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8); |
762 | + fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8); |
763 | } |
764 | |
765 | /* Mark the filter as "pending" and ship off the Filter Work Request. |
766 | @@ -832,16 +839,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) |
767 | struct in_addr *addr; |
768 | |
769 | addr = (struct in_addr *)ipmask; |
770 | - if (addr->s_addr == 0xffffffff) |
771 | + if (ntohl(addr->s_addr) == 0xffffffff) |
772 | return true; |
773 | } else if (family == AF_INET6) { |
774 | struct in6_addr *addr6; |
775 | |
776 | addr6 = (struct in6_addr *)ipmask; |
777 | - if (addr6->s6_addr32[0] == 0xffffffff && |
778 | - addr6->s6_addr32[1] == 0xffffffff && |
779 | - addr6->s6_addr32[2] == 0xffffffff && |
780 | - addr6->s6_addr32[3] == 0xffffffff) |
781 | + if (ntohl(addr6->s6_addr32[0]) == 0xffffffff && |
782 | + ntohl(addr6->s6_addr32[1]) == 0xffffffff && |
783 | + ntohl(addr6->s6_addr32[2]) == 0xffffffff && |
784 | + ntohl(addr6->s6_addr32[3]) == 0xffffffff) |
785 | return true; |
786 | } |
787 | return false; |
788 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
789 | index 069a51847885..deb1c1f30107 100644 |
790 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
791 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
792 | @@ -2504,7 +2504,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, |
793 | |
794 | /* Clear out filter specifications */ |
795 | memset(&f->fs, 0, sizeof(struct ch_filter_specification)); |
796 | - f->fs.val.lport = cpu_to_be16(sport); |
797 | + f->fs.val.lport = be16_to_cpu(sport); |
798 | f->fs.mask.lport = ~0; |
799 | val = (u8 *)&sip; |
800 | if ((val[0] | val[1] | val[2] | val[3]) != 0) { |
801 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
802 | index e447976bdd3e..16a939f9b04d 100644 |
803 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
804 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c |
805 | @@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = { |
806 | PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), |
807 | PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), |
808 | PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), |
809 | - PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), |
810 | - PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), |
811 | - PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), |
812 | - PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), |
813 | }; |
814 | |
815 | static struct ch_tc_flower_entry *allocate_flower_entry(void) |
816 | @@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev, |
817 | struct flow_match_ports match; |
818 | |
819 | flow_rule_match_ports(rule, &match); |
820 | - fs->val.lport = cpu_to_be16(match.key->dst); |
821 | - fs->mask.lport = cpu_to_be16(match.mask->dst); |
822 | - fs->val.fport = cpu_to_be16(match.key->src); |
823 | - fs->mask.fport = cpu_to_be16(match.mask->src); |
824 | + fs->val.lport = be16_to_cpu(match.key->dst); |
825 | + fs->mask.lport = be16_to_cpu(match.mask->dst); |
826 | + fs->val.fport = be16_to_cpu(match.key->src); |
827 | + fs->mask.fport = be16_to_cpu(match.mask->src); |
828 | |
829 | /* also initialize nat_lport/fport to same values */ |
830 | - fs->nat_lport = cpu_to_be16(match.key->dst); |
831 | - fs->nat_fport = cpu_to_be16(match.key->src); |
832 | + fs->nat_lport = fs->val.lport; |
833 | + fs->nat_fport = fs->val.fport; |
834 | } |
835 | |
836 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { |
837 | @@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, |
838 | switch (offset) { |
839 | case PEDIT_TCP_SPORT_DPORT: |
840 | if (~mask & PEDIT_TCP_UDP_SPORT_MASK) |
841 | - offload_pedit(fs, cpu_to_be32(val) >> 16, |
842 | - cpu_to_be32(mask) >> 16, |
843 | - TCP_SPORT); |
844 | + fs->nat_fport = val; |
845 | else |
846 | - offload_pedit(fs, cpu_to_be32(val), |
847 | - cpu_to_be32(mask), TCP_DPORT); |
848 | + fs->nat_lport = val >> 16; |
849 | } |
850 | fs->nat_mode = NAT_MODE_ALL; |
851 | break; |
852 | @@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, |
853 | switch (offset) { |
854 | case PEDIT_UDP_SPORT_DPORT: |
855 | if (~mask & PEDIT_TCP_UDP_SPORT_MASK) |
856 | - offload_pedit(fs, cpu_to_be32(val) >> 16, |
857 | - cpu_to_be32(mask) >> 16, |
858 | - UDP_SPORT); |
859 | + fs->nat_fport = val; |
860 | else |
861 | - offload_pedit(fs, cpu_to_be32(val), |
862 | - cpu_to_be32(mask), UDP_DPORT); |
863 | + fs->nat_lport = val >> 16; |
864 | } |
865 | fs->nat_mode = NAT_MODE_ALL; |
866 | } |
867 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c |
868 | index 02fc63fa7f25..b3a342561a96 100644 |
869 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c |
870 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c |
871 | @@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap, |
872 | bool next_header) |
873 | { |
874 | unsigned int i, j; |
875 | - u32 val, mask; |
876 | + __be32 val, mask; |
877 | int off, err; |
878 | bool found; |
879 | |
880 | @@ -216,7 +216,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) |
881 | const struct cxgb4_next_header *next; |
882 | bool found = false; |
883 | unsigned int i, j; |
884 | - u32 val, mask; |
885 | + __be32 val, mask; |
886 | int off; |
887 | |
888 | if (t->table[link_uhtid - 1].link_handle) { |
889 | @@ -230,10 +230,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) |
890 | |
891 | /* Try to find matches that allow jumps to next header. */ |
892 | for (i = 0; next[i].jump; i++) { |
893 | - if (next[i].offoff != cls->knode.sel->offoff || |
894 | - next[i].shift != cls->knode.sel->offshift || |
895 | - next[i].mask != cls->knode.sel->offmask || |
896 | - next[i].offset != cls->knode.sel->off) |
897 | + if (next[i].sel.offoff != cls->knode.sel->offoff || |
898 | + next[i].sel.offshift != cls->knode.sel->offshift || |
899 | + next[i].sel.offmask != cls->knode.sel->offmask || |
900 | + next[i].sel.off != cls->knode.sel->off) |
901 | continue; |
902 | |
903 | /* Found a possible candidate. Find a key that |
904 | @@ -245,9 +245,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) |
905 | val = cls->knode.sel->keys[j].val; |
906 | mask = cls->knode.sel->keys[j].mask; |
907 | |
908 | - if (next[i].match_off == off && |
909 | - next[i].match_val == val && |
910 | - next[i].match_mask == mask) { |
911 | + if (next[i].key.off == off && |
912 | + next[i].key.val == val && |
913 | + next[i].key.mask == mask) { |
914 | found = true; |
915 | break; |
916 | } |
917 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h |
918 | index a4b99edcc339..141085e159e5 100644 |
919 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h |
920 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h |
921 | @@ -38,12 +38,12 @@ |
922 | struct cxgb4_match_field { |
923 | int off; /* Offset from the beginning of the header to match */ |
924 | /* Fill the value/mask pair in the spec if matched */ |
925 | - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); |
926 | + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); |
927 | }; |
928 | |
929 | /* IPv4 match fields */ |
930 | static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, |
931 | - u32 val, u32 mask) |
932 | + __be32 val, __be32 mask) |
933 | { |
934 | f->val.tos = (ntohl(val) >> 16) & 0x000000FF; |
935 | f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; |
936 | @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, |
937 | } |
938 | |
939 | static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, |
940 | - u32 val, u32 mask) |
941 | + __be32 val, __be32 mask) |
942 | { |
943 | u32 mask_val; |
944 | u8 frag_val; |
945 | @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, |
946 | } |
947 | |
948 | static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, |
949 | - u32 val, u32 mask) |
950 | + __be32 val, __be32 mask) |
951 | { |
952 | f->val.proto = (ntohl(val) >> 16) & 0x000000FF; |
953 | f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; |
954 | @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, |
955 | } |
956 | |
957 | static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, |
958 | - u32 val, u32 mask) |
959 | + __be32 val, __be32 mask) |
960 | { |
961 | memcpy(&f->val.fip[0], &val, sizeof(u32)); |
962 | memcpy(&f->mask.fip[0], &mask, sizeof(u32)); |
963 | @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, |
964 | } |
965 | |
966 | static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, |
967 | - u32 val, u32 mask) |
968 | + __be32 val, __be32 mask) |
969 | { |
970 | memcpy(&f->val.lip[0], &val, sizeof(u32)); |
971 | memcpy(&f->mask.lip[0], &mask, sizeof(u32)); |
972 | @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { |
973 | |
974 | /* IPv6 match fields */ |
975 | static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, |
976 | - u32 val, u32 mask) |
977 | + __be32 val, __be32 mask) |
978 | { |
979 | f->val.tos = (ntohl(val) >> 20) & 0x000000FF; |
980 | f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; |
981 | @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, |
982 | } |
983 | |
984 | static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, |
985 | - u32 val, u32 mask) |
986 | + __be32 val, __be32 mask) |
987 | { |
988 | f->val.proto = (ntohl(val) >> 8) & 0x000000FF; |
989 | f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; |
990 | @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, |
991 | } |
992 | |
993 | static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, |
994 | - u32 val, u32 mask) |
995 | + __be32 val, __be32 mask) |
996 | { |
997 | memcpy(&f->val.fip[0], &val, sizeof(u32)); |
998 | memcpy(&f->mask.fip[0], &mask, sizeof(u32)); |
999 | @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, |
1000 | } |
1001 | |
1002 | static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, |
1003 | - u32 val, u32 mask) |
1004 | + __be32 val, __be32 mask) |
1005 | { |
1006 | memcpy(&f->val.fip[4], &val, sizeof(u32)); |
1007 | memcpy(&f->mask.fip[4], &mask, sizeof(u32)); |
1008 | @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, |
1009 | } |
1010 | |
1011 | static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, |
1012 | - u32 val, u32 mask) |
1013 | + __be32 val, __be32 mask) |
1014 | { |
1015 | memcpy(&f->val.fip[8], &val, sizeof(u32)); |
1016 | memcpy(&f->mask.fip[8], &mask, sizeof(u32)); |
1017 | @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, |
1018 | } |
1019 | |
1020 | static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, |
1021 | - u32 val, u32 mask) |
1022 | + __be32 val, __be32 mask) |
1023 | { |
1024 | memcpy(&f->val.fip[12], &val, sizeof(u32)); |
1025 | memcpy(&f->mask.fip[12], &mask, sizeof(u32)); |
1026 | @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, |
1027 | } |
1028 | |
1029 | static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, |
1030 | - u32 val, u32 mask) |
1031 | + __be32 val, __be32 mask) |
1032 | { |
1033 | memcpy(&f->val.lip[0], &val, sizeof(u32)); |
1034 | memcpy(&f->mask.lip[0], &mask, sizeof(u32)); |
1035 | @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, |
1036 | } |
1037 | |
1038 | static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, |
1039 | - u32 val, u32 mask) |
1040 | + __be32 val, __be32 mask) |
1041 | { |
1042 | memcpy(&f->val.lip[4], &val, sizeof(u32)); |
1043 | memcpy(&f->mask.lip[4], &mask, sizeof(u32)); |
1044 | @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, |
1045 | } |
1046 | |
1047 | static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, |
1048 | - u32 val, u32 mask) |
1049 | + __be32 val, __be32 mask) |
1050 | { |
1051 | memcpy(&f->val.lip[8], &val, sizeof(u32)); |
1052 | memcpy(&f->mask.lip[8], &mask, sizeof(u32)); |
1053 | @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, |
1054 | } |
1055 | |
1056 | static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, |
1057 | - u32 val, u32 mask) |
1058 | + __be32 val, __be32 mask) |
1059 | { |
1060 | memcpy(&f->val.lip[12], &val, sizeof(u32)); |
1061 | memcpy(&f->mask.lip[12], &mask, sizeof(u32)); |
1062 | @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { |
1063 | |
1064 | /* TCP/UDP match */ |
1065 | static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, |
1066 | - u32 val, u32 mask) |
1067 | + __be32 val, __be32 mask) |
1068 | { |
1069 | f->val.fport = ntohl(val) >> 16; |
1070 | f->mask.fport = ntohl(mask) >> 16; |
1071 | @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { |
1072 | }; |
1073 | |
1074 | struct cxgb4_next_header { |
1075 | - unsigned int offset; /* Offset to next header */ |
1076 | - /* offset, shift, and mask added to offset above |
1077 | + /* Offset, shift, and mask added to beginning of the header |
1078 | * to get to next header. Useful when using a header |
1079 | * field's value to jump to next header such as IHL field |
1080 | * in IPv4 header. |
1081 | */ |
1082 | - unsigned int offoff; |
1083 | - u32 shift; |
1084 | - u32 mask; |
1085 | - /* match criteria to make this jump */ |
1086 | - unsigned int match_off; |
1087 | - u32 match_val; |
1088 | - u32 match_mask; |
1089 | + struct tc_u32_sel sel; |
1090 | + struct tc_u32_key key; |
1091 | /* location of jump to make */ |
1092 | const struct cxgb4_match_field *jump; |
1093 | }; |
1094 | @@ -258,26 +252,74 @@ struct cxgb4_next_header { |
1095 | * IPv4 header. |
1096 | */ |
1097 | static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { |
1098 | - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, |
1099 | - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, |
1100 | - .jump = cxgb4_tcp_fields }, |
1101 | - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, |
1102 | - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, |
1103 | - .jump = cxgb4_udp_fields }, |
1104 | - { .jump = NULL } |
1105 | + { |
1106 | + /* TCP Jump */ |
1107 | + .sel = { |
1108 | + .off = 0, |
1109 | + .offoff = 0, |
1110 | + .offshift = 6, |
1111 | + .offmask = cpu_to_be16(0x0f00), |
1112 | + }, |
1113 | + .key = { |
1114 | + .off = 8, |
1115 | + .val = cpu_to_be32(0x00060000), |
1116 | + .mask = cpu_to_be32(0x00ff0000), |
1117 | + }, |
1118 | + .jump = cxgb4_tcp_fields, |
1119 | + }, |
1120 | + { |
1121 | + /* UDP Jump */ |
1122 | + .sel = { |
1123 | + .off = 0, |
1124 | + .offoff = 0, |
1125 | + .offshift = 6, |
1126 | + .offmask = cpu_to_be16(0x0f00), |
1127 | + }, |
1128 | + .key = { |
1129 | + .off = 8, |
1130 | + .val = cpu_to_be32(0x00110000), |
1131 | + .mask = cpu_to_be32(0x00ff0000), |
1132 | + }, |
1133 | + .jump = cxgb4_udp_fields, |
1134 | + }, |
1135 | + { .jump = NULL }, |
1136 | }; |
1137 | |
1138 | /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header |
1139 | * to get to transport layer header. |
1140 | */ |
1141 | static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { |
1142 | - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, |
1143 | - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, |
1144 | - .jump = cxgb4_tcp_fields }, |
1145 | - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, |
1146 | - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, |
1147 | - .jump = cxgb4_udp_fields }, |
1148 | - { .jump = NULL } |
1149 | + { |
1150 | + /* TCP Jump */ |
1151 | + .sel = { |
1152 | + .off = 40, |
1153 | + .offoff = 0, |
1154 | + .offshift = 0, |
1155 | + .offmask = 0, |
1156 | + }, |
1157 | + .key = { |
1158 | + .off = 4, |
1159 | + .val = cpu_to_be32(0x00000600), |
1160 | + .mask = cpu_to_be32(0x0000ff00), |
1161 | + }, |
1162 | + .jump = cxgb4_tcp_fields, |
1163 | + }, |
1164 | + { |
1165 | + /* UDP Jump */ |
1166 | + .sel = { |
1167 | + .off = 40, |
1168 | + .offoff = 0, |
1169 | + .offshift = 0, |
1170 | + .offmask = 0, |
1171 | + }, |
1172 | + .key = { |
1173 | + .off = 4, |
1174 | + .val = cpu_to_be32(0x00001100), |
1175 | + .mask = cpu_to_be32(0x0000ff00), |
1176 | + }, |
1177 | + .jump = cxgb4_udp_fields, |
1178 | + }, |
1179 | + { .jump = NULL }, |
1180 | }; |
1181 | |
1182 | struct cxgb4_link { |
1183 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1184 | index 3a45ac8f0e01..506170fe3a8b 100644 |
1185 | --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1186 | +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c |
1187 | @@ -2816,7 +2816,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, |
1188 | |
1189 | hwtstamps = skb_hwtstamps(skb); |
1190 | memset(hwtstamps, 0, sizeof(*hwtstamps)); |
1191 | - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); |
1192 | + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); |
1193 | |
1194 | return RX_PTP_PKT_SUC; |
1195 | } |
1196 | diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c |
1197 | index 355be77f4241..3cf4dc3433f9 100644 |
1198 | --- a/drivers/net/usb/smsc95xx.c |
1199 | +++ b/drivers/net/usb/smsc95xx.c |
1200 | @@ -1324,7 +1324,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) |
1201 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
1202 | |
1203 | if (pdata) { |
1204 | - cancel_delayed_work(&pdata->carrier_check); |
1205 | + cancel_delayed_work_sync(&pdata->carrier_check); |
1206 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); |
1207 | kfree(pdata); |
1208 | pdata = NULL; |
1209 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
1210 | index d4b388793f40..071b63146d4b 100644 |
1211 | --- a/drivers/nvme/host/core.c |
1212 | +++ b/drivers/nvme/host/core.c |
1213 | @@ -1088,10 +1088,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, |
1214 | dev_warn(ctrl->device, |
1215 | "Identify Descriptors failed (%d)\n", status); |
1216 | /* |
1217 | - * Don't treat an error as fatal, as we potentially already |
1218 | - * have a NGUID or EUI-64. |
1219 | + * Don't treat non-retryable errors as fatal, as we potentially |
1220 | + * already have a NGUID or EUI-64. If we failed with DNR set, |
1221 | + * we want to silently ignore the error as we can still |
1222 | + * identify the device, but if the status has DNR set, we want |
1223 | + * to propagate the error back specifically for the disk |
1224 | + * revalidation flow to make sure we don't abandon the |
1225 | + * device just because of a temporal retry-able error (such |
1226 | + * as path of transport errors). |
1227 | */ |
1228 | - if (status > 0 && !(status & NVME_SC_DNR)) |
1229 | + if (status > 0 && (status & NVME_SC_DNR)) |
1230 | status = 0; |
1231 | goto free_data; |
1232 | } |
1233 | diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c |
1234 | index 772eb05e57af..5433aa2f7601 100644 |
1235 | --- a/drivers/nvme/host/multipath.c |
1236 | +++ b/drivers/nvme/host/multipath.c |
1237 | @@ -3,6 +3,7 @@ |
1238 | * Copyright (c) 2017-2018 Christoph Hellwig. |
1239 | */ |
1240 | |
1241 | +#include <linux/backing-dev.h> |
1242 | #include <linux/moduleparam.h> |
1243 | #include <trace/events/block.h> |
1244 | #include "nvme.h" |
1245 | @@ -416,11 +417,11 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) |
1246 | if (!head->disk) |
1247 | return; |
1248 | |
1249 | - mutex_lock(&head->lock); |
1250 | - if (!(head->disk->flags & GENHD_FL_UP)) |
1251 | + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) |
1252 | device_add_disk(&head->subsys->dev, head->disk, |
1253 | nvme_ns_id_attr_groups); |
1254 | |
1255 | + mutex_lock(&head->lock); |
1256 | if (nvme_path_is_optimized(ns)) { |
1257 | int node, srcu_idx; |
1258 | |
1259 | @@ -638,30 +639,46 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, |
1260 | } |
1261 | DEVICE_ATTR_RO(ana_state); |
1262 | |
1263 | -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, |
1264 | +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, |
1265 | struct nvme_ana_group_desc *desc, void *data) |
1266 | { |
1267 | - struct nvme_ns *ns = data; |
1268 | + struct nvme_ana_group_desc *dst = data; |
1269 | |
1270 | - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { |
1271 | - nvme_update_ns_ana_state(desc, ns); |
1272 | - return -ENXIO; /* just break out of the loop */ |
1273 | - } |
1274 | + if (desc->grpid != dst->grpid) |
1275 | + return 0; |
1276 | |
1277 | - return 0; |
1278 | + *dst = *desc; |
1279 | + return -ENXIO; /* just break out of the loop */ |
1280 | } |
1281 | |
1282 | void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) |
1283 | { |
1284 | if (nvme_ctrl_use_ana(ns->ctrl)) { |
1285 | + struct nvme_ana_group_desc desc = { |
1286 | + .grpid = id->anagrpid, |
1287 | + .state = 0, |
1288 | + }; |
1289 | + |
1290 | mutex_lock(&ns->ctrl->ana_lock); |
1291 | ns->ana_grpid = le32_to_cpu(id->anagrpid); |
1292 | - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); |
1293 | + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); |
1294 | mutex_unlock(&ns->ctrl->ana_lock); |
1295 | + if (desc.state) { |
1296 | + /* found the group desc: update */ |
1297 | + nvme_update_ns_ana_state(&desc, ns); |
1298 | + } |
1299 | } else { |
1300 | ns->ana_state = NVME_ANA_OPTIMIZED; |
1301 | nvme_mpath_set_live(ns); |
1302 | } |
1303 | + |
1304 | + if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { |
1305 | + struct gendisk *disk = ns->head->disk; |
1306 | + |
1307 | + if (disk) |
1308 | + disk->queue->backing_dev_info->capabilities |= |
1309 | + BDI_CAP_STABLE_WRITES; |
1310 | + } |
1311 | } |
1312 | |
1313 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
1314 | @@ -675,6 +692,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
1315 | kblockd_schedule_work(&head->requeue_work); |
1316 | flush_work(&head->requeue_work); |
1317 | blk_cleanup_queue(head->disk->queue); |
1318 | + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { |
1319 | + /* |
1320 | + * if device_add_disk wasn't called, prevent |
1321 | + * disk release to put a bogus reference on the |
1322 | + * request queue |
1323 | + */ |
1324 | + head->disk->queue = NULL; |
1325 | + } |
1326 | put_disk(head->disk); |
1327 | } |
1328 | |
1329 | diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h |
1330 | index 22e8401352c2..ed02260862cb 100644 |
1331 | --- a/drivers/nvme/host/nvme.h |
1332 | +++ b/drivers/nvme/host/nvme.h |
1333 | @@ -345,6 +345,8 @@ struct nvme_ns_head { |
1334 | spinlock_t requeue_lock; |
1335 | struct work_struct requeue_work; |
1336 | struct mutex lock; |
1337 | + unsigned long flags; |
1338 | +#define NVME_NSHEAD_DISK_LIVE 0 |
1339 | struct nvme_ns __rcu *current_path[]; |
1340 | #endif |
1341 | }; |
1342 | diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c |
1343 | index 68e33457c814..9a06818d2816 100644 |
1344 | --- a/drivers/spi/spi-fsl-dspi.c |
1345 | +++ b/drivers/spi/spi-fsl-dspi.c |
1346 | @@ -901,6 +901,8 @@ static int dspi_suspend(struct device *dev) |
1347 | struct spi_controller *ctlr = dev_get_drvdata(dev); |
1348 | struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); |
1349 | |
1350 | + if (dspi->irq) |
1351 | + disable_irq(dspi->irq); |
1352 | spi_controller_suspend(ctlr); |
1353 | clk_disable_unprepare(dspi->clk); |
1354 | |
1355 | @@ -921,6 +923,8 @@ static int dspi_resume(struct device *dev) |
1356 | if (ret) |
1357 | return ret; |
1358 | spi_controller_resume(ctlr); |
1359 | + if (dspi->irq) |
1360 | + enable_irq(dspi->irq); |
1361 | |
1362 | return 0; |
1363 | } |
1364 | @@ -1108,8 +1112,8 @@ static int dspi_probe(struct platform_device *pdev) |
1365 | goto poll_mode; |
1366 | } |
1367 | |
1368 | - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, |
1369 | - IRQF_SHARED, pdev->name, dspi); |
1370 | + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, |
1371 | + IRQF_SHARED, pdev->name, dspi); |
1372 | if (ret < 0) { |
1373 | dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); |
1374 | goto out_clk_put; |
1375 | @@ -1122,7 +1126,7 @@ poll_mode: |
1376 | ret = dspi_request_dma(dspi, res->start); |
1377 | if (ret < 0) { |
1378 | dev_err(&pdev->dev, "can't get dma channels\n"); |
1379 | - goto out_clk_put; |
1380 | + goto out_free_irq; |
1381 | } |
1382 | } |
1383 | |
1384 | @@ -1134,11 +1138,14 @@ poll_mode: |
1385 | ret = spi_register_controller(ctlr); |
1386 | if (ret != 0) { |
1387 | dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); |
1388 | - goto out_clk_put; |
1389 | + goto out_free_irq; |
1390 | } |
1391 | |
1392 | return ret; |
1393 | |
1394 | +out_free_irq: |
1395 | + if (dspi->irq) |
1396 | + free_irq(dspi->irq, dspi); |
1397 | out_clk_put: |
1398 | clk_disable_unprepare(dspi->clk); |
1399 | out_ctlr_put: |
1400 | @@ -1154,6 +1161,8 @@ static int dspi_remove(struct platform_device *pdev) |
1401 | |
1402 | /* Disconnect from the SPI framework */ |
1403 | dspi_release_dma(dspi); |
1404 | + if (dspi->irq) |
1405 | + free_irq(dspi->irq, dspi); |
1406 | clk_disable_unprepare(dspi->clk); |
1407 | spi_unregister_controller(dspi->ctlr); |
1408 | |
1409 | diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c |
1410 | index acf4854cbb8b..d6fabd0a7da6 100644 |
1411 | --- a/drivers/thermal/mtk_thermal.c |
1412 | +++ b/drivers/thermal/mtk_thermal.c |
1413 | @@ -211,6 +211,9 @@ enum { |
1414 | /* The total number of temperature sensors in the MT8183 */ |
1415 | #define MT8183_NUM_SENSORS 6 |
1416 | |
1417 | +/* The number of banks in the MT8183 */ |
1418 | +#define MT8183_NUM_ZONES 1 |
1419 | + |
1420 | /* The number of sensing points per bank */ |
1421 | #define MT8183_NUM_SENSORS_PER_ZONE 6 |
1422 | |
1423 | @@ -498,7 +501,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = { |
1424 | |
1425 | static const struct mtk_thermal_data mt8183_thermal_data = { |
1426 | .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL, |
1427 | - .num_banks = MT8183_NUM_SENSORS_PER_ZONE, |
1428 | + .num_banks = MT8183_NUM_ZONES, |
1429 | .num_sensors = MT8183_NUM_SENSORS, |
1430 | .vts_index = mt8183_vts_index, |
1431 | .cali_val = MT8183_CALIBRATION, |
1432 | diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c |
1433 | index 755d2b5bd2c2..1ab2ffff4e7c 100644 |
1434 | --- a/drivers/thermal/rcar_gen3_thermal.c |
1435 | +++ b/drivers/thermal/rcar_gen3_thermal.c |
1436 | @@ -169,7 +169,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) |
1437 | { |
1438 | struct rcar_gen3_thermal_tsc *tsc = devdata; |
1439 | int mcelsius, val; |
1440 | - u32 reg; |
1441 | + int reg; |
1442 | |
1443 | /* Read register and convert to mili Celsius */ |
1444 | reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; |
1445 | diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c |
1446 | index 98ada1a3425c..bae88893ee8e 100644 |
1447 | --- a/drivers/usb/misc/usbtest.c |
1448 | +++ b/drivers/usb/misc/usbtest.c |
1449 | @@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf) |
1450 | |
1451 | usb_set_intfdata(intf, NULL); |
1452 | dev_dbg(&intf->dev, "disconnect\n"); |
1453 | + kfree(dev->buf); |
1454 | kfree(dev); |
1455 | } |
1456 | |
1457 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
1458 | index 947c4aad5d6a..f5df2a4195c2 100644 |
1459 | --- a/fs/cifs/connect.c |
1460 | +++ b/fs/cifs/connect.c |
1461 | @@ -5281,9 +5281,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) |
1462 | vol_info->nocase = master_tcon->nocase; |
1463 | vol_info->nohandlecache = master_tcon->nohandlecache; |
1464 | vol_info->local_lease = master_tcon->local_lease; |
1465 | + vol_info->no_lease = master_tcon->no_lease; |
1466 | + vol_info->resilient = master_tcon->use_resilient; |
1467 | + vol_info->persistent = master_tcon->use_persistent; |
1468 | + vol_info->handle_timeout = master_tcon->handle_timeout; |
1469 | vol_info->no_linux_ext = !master_tcon->unix_ext; |
1470 | + vol_info->linux_ext = master_tcon->posix_extensions; |
1471 | vol_info->sectype = master_tcon->ses->sectype; |
1472 | vol_info->sign = master_tcon->ses->sign; |
1473 | + vol_info->seal = master_tcon->seal; |
1474 | |
1475 | rc = cifs_set_vol_auth(vol_info, master_tcon->ses); |
1476 | if (rc) { |
1477 | @@ -5309,10 +5315,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) |
1478 | goto out; |
1479 | } |
1480 | |
1481 | - /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ |
1482 | - if (tcon->posix_extensions) |
1483 | - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; |
1484 | - |
1485 | if (cap_unix(ses)) |
1486 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); |
1487 | |
1488 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
1489 | index 5e6bc8fa4e46..6045b4868275 100644 |
1490 | --- a/fs/cifs/inode.c |
1491 | +++ b/fs/cifs/inode.c |
1492 | @@ -1791,6 +1791,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, |
1493 | FILE_UNIX_BASIC_INFO *info_buf_target; |
1494 | unsigned int xid; |
1495 | int rc, tmprc; |
1496 | + bool new_target = d_really_is_negative(target_dentry); |
1497 | |
1498 | if (flags & ~RENAME_NOREPLACE) |
1499 | return -EINVAL; |
1500 | @@ -1867,8 +1868,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, |
1501 | */ |
1502 | |
1503 | unlink_target: |
1504 | - /* Try unlinking the target dentry if it's not negative */ |
1505 | - if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { |
1506 | + /* |
1507 | + * If the target dentry was created during the rename, try |
1508 | + * unlinking it if it's not negative |
1509 | + */ |
1510 | + if (new_target && |
1511 | + d_really_is_positive(target_dentry) && |
1512 | + (rc == -EACCES || rc == -EEXIST)) { |
1513 | if (d_is_dir(target_dentry)) |
1514 | tmprc = cifs_rmdir(target_dir, target_dentry); |
1515 | else |
1516 | diff --git a/fs/io_uring.c b/fs/io_uring.c |
1517 | index 7fa3cd3fff4d..e0200406765c 100644 |
1518 | --- a/fs/io_uring.c |
1519 | +++ b/fs/io_uring.c |
1520 | @@ -267,6 +267,9 @@ struct io_ring_ctx { |
1521 | #if defined(CONFIG_UNIX) |
1522 | struct socket *ring_sock; |
1523 | #endif |
1524 | + |
1525 | + struct list_head task_list; |
1526 | + spinlock_t task_lock; |
1527 | }; |
1528 | |
1529 | struct sqe_submit { |
1530 | @@ -331,14 +334,18 @@ struct io_kiocb { |
1531 | #define REQ_F_ISREG 2048 /* regular file */ |
1532 | #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ |
1533 | #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ |
1534 | +#define REQ_F_CANCEL 16384 /* cancel request */ |
1535 | unsigned long fsize; |
1536 | u64 user_data; |
1537 | u32 result; |
1538 | u32 sequence; |
1539 | + struct task_struct *task; |
1540 | |
1541 | struct fs_struct *fs; |
1542 | |
1543 | struct work_struct work; |
1544 | + struct task_struct *work_task; |
1545 | + struct list_head task_list; |
1546 | }; |
1547 | |
1548 | #define IO_PLUG_THRESHOLD 2 |
1549 | @@ -425,6 +432,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
1550 | INIT_LIST_HEAD(&ctx->cancel_list); |
1551 | INIT_LIST_HEAD(&ctx->defer_list); |
1552 | INIT_LIST_HEAD(&ctx->timeout_list); |
1553 | + INIT_LIST_HEAD(&ctx->task_list); |
1554 | + spin_lock_init(&ctx->task_lock); |
1555 | return ctx; |
1556 | } |
1557 | |
1558 | @@ -492,6 +501,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) |
1559 | static inline void io_queue_async_work(struct io_ring_ctx *ctx, |
1560 | struct io_kiocb *req) |
1561 | { |
1562 | + unsigned long flags; |
1563 | int rw = 0; |
1564 | |
1565 | if (req->submit.sqe) { |
1566 | @@ -503,6 +513,13 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx, |
1567 | } |
1568 | } |
1569 | |
1570 | + req->task = current; |
1571 | + |
1572 | + spin_lock_irqsave(&ctx->task_lock, flags); |
1573 | + list_add(&req->task_list, &ctx->task_list); |
1574 | + req->work_task = NULL; |
1575 | + spin_unlock_irqrestore(&ctx->task_lock, flags); |
1576 | + |
1577 | queue_work(ctx->sqo_wq[rw], &req->work); |
1578 | } |
1579 | |
1580 | @@ -2201,6 +2218,8 @@ static void io_sq_wq_submit_work(struct work_struct *work) |
1581 | |
1582 | old_cred = override_creds(ctx->creds); |
1583 | async_list = io_async_list_from_sqe(ctx, req->submit.sqe); |
1584 | + |
1585 | + allow_kernel_signal(SIGINT); |
1586 | restart: |
1587 | do { |
1588 | struct sqe_submit *s = &req->submit; |
1589 | @@ -2232,6 +2251,12 @@ restart: |
1590 | } |
1591 | |
1592 | if (!ret) { |
1593 | + req->work_task = current; |
1594 | + if (req->flags & REQ_F_CANCEL) { |
1595 | + ret = -ECANCELED; |
1596 | + goto end_req; |
1597 | + } |
1598 | + |
1599 | s->has_user = cur_mm != NULL; |
1600 | s->needs_lock = true; |
1601 | do { |
1602 | @@ -2246,6 +2271,12 @@ restart: |
1603 | break; |
1604 | cond_resched(); |
1605 | } while (1); |
1606 | +end_req: |
1607 | + if (!list_empty(&req->task_list)) { |
1608 | + spin_lock_irq(&ctx->task_lock); |
1609 | + list_del_init(&req->task_list); |
1610 | + spin_unlock_irq(&ctx->task_lock); |
1611 | + } |
1612 | } |
1613 | |
1614 | /* drop submission reference */ |
1615 | @@ -2311,6 +2342,7 @@ restart: |
1616 | } |
1617 | |
1618 | out: |
1619 | + disallow_signal(SIGINT); |
1620 | if (cur_mm) { |
1621 | set_fs(old_fs); |
1622 | unuse_mm(cur_mm); |
1623 | @@ -3675,12 +3707,32 @@ static int io_uring_fasync(int fd, struct file *file, int on) |
1624 | return fasync_helper(fd, file, on, &ctx->cq_fasync); |
1625 | } |
1626 | |
1627 | +static void io_cancel_async_work(struct io_ring_ctx *ctx, |
1628 | + struct task_struct *task) |
1629 | +{ |
1630 | + if (list_empty(&ctx->task_list)) |
1631 | + return; |
1632 | + |
1633 | + spin_lock_irq(&ctx->task_lock); |
1634 | + while (!list_empty(&ctx->task_list)) { |
1635 | + struct io_kiocb *req; |
1636 | + |
1637 | + req = list_first_entry(&ctx->task_list, struct io_kiocb, task_list); |
1638 | + list_del_init(&req->task_list); |
1639 | + req->flags |= REQ_F_CANCEL; |
1640 | + if (req->work_task && (!task || req->task == task)) |
1641 | + send_sig(SIGINT, req->work_task, 1); |
1642 | + } |
1643 | + spin_unlock_irq(&ctx->task_lock); |
1644 | +} |
1645 | + |
1646 | static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
1647 | { |
1648 | mutex_lock(&ctx->uring_lock); |
1649 | percpu_ref_kill(&ctx->refs); |
1650 | mutex_unlock(&ctx->uring_lock); |
1651 | |
1652 | + io_cancel_async_work(ctx, NULL); |
1653 | io_kill_timeouts(ctx); |
1654 | io_poll_remove_all(ctx); |
1655 | io_iopoll_reap_events(ctx); |
1656 | @@ -3688,6 +3740,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
1657 | io_ring_ctx_free(ctx); |
1658 | } |
1659 | |
1660 | +static int io_uring_flush(struct file *file, void *data) |
1661 | +{ |
1662 | + struct io_ring_ctx *ctx = file->private_data; |
1663 | + |
1664 | + if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) |
1665 | + io_cancel_async_work(ctx, current); |
1666 | + |
1667 | + return 0; |
1668 | +} |
1669 | + |
1670 | static int io_uring_release(struct inode *inode, struct file *file) |
1671 | { |
1672 | struct io_ring_ctx *ctx = file->private_data; |
1673 | @@ -3792,6 +3854,7 @@ out_fput: |
1674 | |
1675 | static const struct file_operations io_uring_fops = { |
1676 | .release = io_uring_release, |
1677 | + .flush = io_uring_flush, |
1678 | .mmap = io_uring_mmap, |
1679 | .poll = io_uring_poll, |
1680 | .fasync = io_uring_fasync, |
1681 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
1682 | index 8650a97e2ba9..9af9b673f292 100644 |
1683 | --- a/fs/nfsd/nfs4state.c |
1684 | +++ b/fs/nfsd/nfs4state.c |
1685 | @@ -7705,9 +7705,14 @@ nfs4_state_start_net(struct net *net) |
1686 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
1687 | int ret; |
1688 | |
1689 | - ret = nfs4_state_create_net(net); |
1690 | + ret = get_nfsdfs(net); |
1691 | if (ret) |
1692 | return ret; |
1693 | + ret = nfs4_state_create_net(net); |
1694 | + if (ret) { |
1695 | + mntput(nn->nfsd_mnt); |
1696 | + return ret; |
1697 | + } |
1698 | locks_start_grace(net, &nn->nfsd4_manager); |
1699 | nfsd4_client_tracking_init(net); |
1700 | if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) |
1701 | @@ -7776,6 +7781,7 @@ nfs4_state_shutdown_net(struct net *net) |
1702 | |
1703 | nfsd4_client_tracking_exit(net); |
1704 | nfs4_state_destroy_net(net); |
1705 | + mntput(nn->nfsd_mnt); |
1706 | } |
1707 | |
1708 | void |
1709 | diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c |
1710 | index 159feae6af8b..be418fccc9d8 100644 |
1711 | --- a/fs/nfsd/nfsctl.c |
1712 | +++ b/fs/nfsd/nfsctl.c |
1713 | @@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry) |
1714 | WARN_ON_ONCE(ret); |
1715 | fsnotify_rmdir(dir, dentry); |
1716 | d_delete(dentry); |
1717 | + dput(dentry); |
1718 | inode_unlock(dir); |
1719 | } |
1720 | |
1721 | @@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = { |
1722 | }; |
1723 | MODULE_ALIAS_FS("nfsd"); |
1724 | |
1725 | +int get_nfsdfs(struct net *net) |
1726 | +{ |
1727 | + struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
1728 | + struct vfsmount *mnt; |
1729 | + |
1730 | + mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); |
1731 | + if (IS_ERR(mnt)) |
1732 | + return PTR_ERR(mnt); |
1733 | + nn->nfsd_mnt = mnt; |
1734 | + return 0; |
1735 | +} |
1736 | + |
1737 | #ifdef CONFIG_PROC_FS |
1738 | static int create_proc_exports_entry(void) |
1739 | { |
1740 | @@ -1452,7 +1465,6 @@ unsigned int nfsd_net_id; |
1741 | static __net_init int nfsd_init_net(struct net *net) |
1742 | { |
1743 | int retval; |
1744 | - struct vfsmount *mnt; |
1745 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
1746 | |
1747 | retval = nfsd_export_init(net); |
1748 | @@ -1479,16 +1491,8 @@ static __net_init int nfsd_init_net(struct net *net) |
1749 | init_waitqueue_head(&nn->ntf_wq); |
1750 | seqlock_init(&nn->boot_lock); |
1751 | |
1752 | - mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); |
1753 | - if (IS_ERR(mnt)) { |
1754 | - retval = PTR_ERR(mnt); |
1755 | - goto out_mount_err; |
1756 | - } |
1757 | - nn->nfsd_mnt = mnt; |
1758 | return 0; |
1759 | |
1760 | -out_mount_err: |
1761 | - nfsd_reply_cache_shutdown(nn); |
1762 | out_drc_error: |
1763 | nfsd_idmap_shutdown(net); |
1764 | out_idmap_error: |
1765 | @@ -1501,7 +1505,6 @@ static __net_exit void nfsd_exit_net(struct net *net) |
1766 | { |
1767 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
1768 | |
1769 | - mntput(nn->nfsd_mnt); |
1770 | nfsd_reply_cache_shutdown(nn); |
1771 | nfsd_idmap_shutdown(net); |
1772 | nfsd_export_shutdown(net); |
1773 | diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h |
1774 | index af2947551e9c..4ff0c5318a02 100644 |
1775 | --- a/fs/nfsd/nfsd.h |
1776 | +++ b/fs/nfsd/nfsd.h |
1777 | @@ -87,6 +87,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *); |
1778 | |
1779 | void nfsd_destroy(struct net *net); |
1780 | |
1781 | +int get_nfsdfs(struct net *); |
1782 | + |
1783 | struct nfsdfs_client { |
1784 | struct kref cl_ref; |
1785 | void (*cl_release)(struct kref *kref); |
1786 | @@ -97,6 +99,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn, |
1787 | struct nfsdfs_client *ncl, u32 id, const struct tree_descr *); |
1788 | void nfsd_client_rmdir(struct dentry *dentry); |
1789 | |
1790 | + |
1791 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) |
1792 | #ifdef CONFIG_NFSD_V2_ACL |
1793 | extern const struct svc_version nfsd_acl_version2; |
1794 | diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c |
1795 | index 005d1802ab40..b6f4b552c9af 100644 |
1796 | --- a/fs/nfsd/vfs.c |
1797 | +++ b/fs/nfsd/vfs.c |
1798 | @@ -1184,6 +1184,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, |
1799 | iap->ia_mode = 0; |
1800 | iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; |
1801 | |
1802 | + if (!IS_POSIXACL(dirp)) |
1803 | + iap->ia_mode &= ~current_umask(); |
1804 | + |
1805 | err = 0; |
1806 | host_err = 0; |
1807 | switch (type) { |
1808 | @@ -1416,6 +1419,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, |
1809 | goto out; |
1810 | } |
1811 | |
1812 | + if (!IS_POSIXACL(dirp)) |
1813 | + iap->ia_mode &= ~current_umask(); |
1814 | + |
1815 | host_err = vfs_create(dirp, dchild, iap->ia_mode, true); |
1816 | if (host_err < 0) { |
1817 | fh_drop_write(fhp); |
1818 | diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h |
1819 | index 24cfa96f98ea..864849e942c4 100644 |
1820 | --- a/include/crypto/if_alg.h |
1821 | +++ b/include/crypto/if_alg.h |
1822 | @@ -29,8 +29,8 @@ struct alg_sock { |
1823 | |
1824 | struct sock *parent; |
1825 | |
1826 | - unsigned int refcnt; |
1827 | - unsigned int nokey_refcnt; |
1828 | + atomic_t refcnt; |
1829 | + atomic_t nokey_refcnt; |
1830 | |
1831 | const struct af_alg_type *type; |
1832 | void *private; |
1833 | diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c |
1834 | index 7d54c7c28054..2222f3225e53 100644 |
1835 | --- a/kernel/debug/debug_core.c |
1836 | +++ b/kernel/debug/debug_core.c |
1837 | @@ -546,6 +546,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, |
1838 | arch_kgdb_ops.disable_hw_break(regs); |
1839 | |
1840 | acquirelock: |
1841 | + rcu_read_lock(); |
1842 | /* |
1843 | * Interrupts will be restored by the 'trap return' code, except when |
1844 | * single stepping. |
1845 | @@ -602,6 +603,7 @@ return_normal: |
1846 | atomic_dec(&slaves_in_kgdb); |
1847 | dbg_touch_watchdogs(); |
1848 | local_irq_restore(flags); |
1849 | + rcu_read_unlock(); |
1850 | return 0; |
1851 | } |
1852 | cpu_relax(); |
1853 | @@ -620,6 +622,7 @@ return_normal: |
1854 | raw_spin_unlock(&dbg_master_lock); |
1855 | dbg_touch_watchdogs(); |
1856 | local_irq_restore(flags); |
1857 | + rcu_read_unlock(); |
1858 | |
1859 | goto acquirelock; |
1860 | } |
1861 | @@ -743,6 +746,7 @@ kgdb_restore: |
1862 | raw_spin_unlock(&dbg_master_lock); |
1863 | dbg_touch_watchdogs(); |
1864 | local_irq_restore(flags); |
1865 | + rcu_read_unlock(); |
1866 | |
1867 | return kgdb_info[cpu].ret_state; |
1868 | } |
1869 | diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c |
1870 | index f7e4579e746c..c4b702fe1d73 100644 |
1871 | --- a/kernel/sched/debug.c |
1872 | +++ b/kernel/sched/debug.c |
1873 | @@ -258,7 +258,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) |
1874 | set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); |
1875 | set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); |
1876 | set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); |
1877 | - set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); |
1878 | + set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, proc_dointvec_minmax); |
1879 | set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax); |
1880 | set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); |
1881 | /* &table[8] is terminator */ |
1882 | diff --git a/mm/compaction.c b/mm/compaction.c |
1883 | index 672d3c78c6ab..92470625f0b1 100644 |
1884 | --- a/mm/compaction.c |
1885 | +++ b/mm/compaction.c |
1886 | @@ -2310,16 +2310,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, |
1887 | .page = NULL, |
1888 | }; |
1889 | |
1890 | - if (capture) |
1891 | - current->capture_control = &capc; |
1892 | + /* |
1893 | + * Make sure the structs are really initialized before we expose the |
1894 | + * capture control, in case we are interrupted and the interrupt handler |
1895 | + * frees a page. |
1896 | + */ |
1897 | + barrier(); |
1898 | + WRITE_ONCE(current->capture_control, &capc); |
1899 | |
1900 | ret = compact_zone(&cc, &capc); |
1901 | |
1902 | VM_BUG_ON(!list_empty(&cc.freepages)); |
1903 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
1904 | |
1905 | - *capture = capc.page; |
1906 | - current->capture_control = NULL; |
1907 | + /* |
1908 | + * Make sure we hide capture control first before we read the captured |
1909 | + * page pointer, otherwise an interrupt could free and capture a page |
1910 | + * and we would leak it. |
1911 | + */ |
1912 | + WRITE_ONCE(current->capture_control, NULL); |
1913 | + *capture = READ_ONCE(capc.page); |
1914 | |
1915 | return ret; |
1916 | } |
1917 | @@ -2333,6 +2343,7 @@ int sysctl_extfrag_threshold = 500; |
1918 | * @alloc_flags: The allocation flags of the current allocation |
1919 | * @ac: The context of current allocation |
1920 | * @prio: Determines how hard direct compaction should try to succeed |
1921 | + * @capture: Pointer to free page created by compaction will be stored here |
1922 | * |
1923 | * This is the main entry point for direct page compaction. |
1924 | */ |
1925 | diff --git a/mm/slub.c b/mm/slub.c |
1926 | index fca33abd6c42..709e31002504 100644 |
1927 | --- a/mm/slub.c |
1928 | +++ b/mm/slub.c |
1929 | @@ -644,6 +644,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) |
1930 | va_end(args); |
1931 | } |
1932 | |
1933 | +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, |
1934 | + void *freelist, void *nextfree) |
1935 | +{ |
1936 | + if ((s->flags & SLAB_CONSISTENCY_CHECKS) && |
1937 | + !check_valid_pointer(s, page, nextfree)) { |
1938 | + object_err(s, page, freelist, "Freechain corrupt"); |
1939 | + freelist = NULL; |
1940 | + slab_fix(s, "Isolate corrupted freechain"); |
1941 | + return true; |
1942 | + } |
1943 | + |
1944 | + return false; |
1945 | +} |
1946 | + |
1947 | static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
1948 | { |
1949 | unsigned int off; /* Offset of last byte */ |
1950 | @@ -1379,6 +1393,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, |
1951 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
1952 | int objects) {} |
1953 | |
1954 | +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, |
1955 | + void *freelist, void *nextfree) |
1956 | +{ |
1957 | + return false; |
1958 | +} |
1959 | #endif /* CONFIG_SLUB_DEBUG */ |
1960 | |
1961 | /* |
1962 | @@ -2062,6 +2081,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, |
1963 | void *prior; |
1964 | unsigned long counters; |
1965 | |
1966 | + /* |
1967 | + * If 'nextfree' is invalid, it is possible that the object at |
1968 | + * 'freelist' is already corrupted. So isolate all objects |
1969 | + * starting at 'freelist'. |
1970 | + */ |
1971 | + if (freelist_corrupted(s, page, freelist, nextfree)) |
1972 | + break; |
1973 | + |
1974 | do { |
1975 | prior = page->freelist; |
1976 | counters = page->counters; |
1977 | @@ -5621,7 +5648,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) |
1978 | */ |
1979 | if (buffer) |
1980 | buf = buffer; |
1981 | - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) |
1982 | + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && |
1983 | + !IS_ENABLED(CONFIG_SLUB_STATS)) |
1984 | buf = mbuf; |
1985 | else { |
1986 | buffer = (char *) get_zeroed_page(GFP_KERNEL); |
1987 | diff --git a/mm/swap_state.c b/mm/swap_state.c |
1988 | index 8e7ce9a9bc5e..4ce014dc4571 100644 |
1989 | --- a/mm/swap_state.c |
1990 | +++ b/mm/swap_state.c |
1991 | @@ -23,6 +23,7 @@ |
1992 | #include <linux/huge_mm.h> |
1993 | |
1994 | #include <asm/pgtable.h> |
1995 | +#include "internal.h" |
1996 | |
1997 | /* |
1998 | * swapper_space is a fiction, retained to simplify the path through |
1999 | @@ -418,7 +419,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
2000 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
2001 | __SetPageLocked(new_page); |
2002 | __SetPageSwapBacked(new_page); |
2003 | - err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); |
2004 | + err = add_to_swap_cache(new_page, entry, |
2005 | + gfp_mask & GFP_RECLAIM_MASK); |
2006 | if (likely(!err)) { |
2007 | /* Initiate read into locked page */ |
2008 | SetPageWorkingset(new_page); |
2009 | diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c |
2010 | index 2a65ac41055f..9ff85ee8337c 100644 |
2011 | --- a/net/rxrpc/call_event.c |
2012 | +++ b/net/rxrpc/call_event.c |
2013 | @@ -248,7 +248,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) |
2014 | if (anno_type != RXRPC_TX_ANNO_RETRANS) |
2015 | continue; |
2016 | |
2017 | + /* We need to reset the retransmission state, but we need to do |
2018 | + * so before we drop the lock as a new ACK/NAK may come in and |
2019 | + * confuse things |
2020 | + */ |
2021 | + annotation &= ~RXRPC_TX_ANNO_MASK; |
2022 | + annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT; |
2023 | + call->rxtx_annotations[ix] = annotation; |
2024 | + |
2025 | skb = call->rxtx_buffer[ix]; |
2026 | + if (!skb) |
2027 | + continue; |
2028 | + |
2029 | rxrpc_get_skb(skb, rxrpc_skb_got); |
2030 | spin_unlock_bh(&call->lock); |
2031 | |
2032 | @@ -262,24 +273,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) |
2033 | |
2034 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
2035 | spin_lock_bh(&call->lock); |
2036 | - |
2037 | - /* We need to clear the retransmit state, but there are two |
2038 | - * things we need to be aware of: A new ACK/NAK might have been |
2039 | - * received and the packet might have been hard-ACK'd (in which |
2040 | - * case it will no longer be in the buffer). |
2041 | - */ |
2042 | - if (after(seq, call->tx_hard_ack)) { |
2043 | - annotation = call->rxtx_annotations[ix]; |
2044 | - anno_type = annotation & RXRPC_TX_ANNO_MASK; |
2045 | - if (anno_type == RXRPC_TX_ANNO_RETRANS || |
2046 | - anno_type == RXRPC_TX_ANNO_NAK) { |
2047 | - annotation &= ~RXRPC_TX_ANNO_MASK; |
2048 | - annotation |= RXRPC_TX_ANNO_UNACK; |
2049 | - } |
2050 | - annotation |= RXRPC_TX_ANNO_RESENT; |
2051 | - call->rxtx_annotations[ix] = annotation; |
2052 | - } |
2053 | - |
2054 | if (after(call->tx_hard_ack, seq)) |
2055 | seq = call->tx_hard_ack; |
2056 | } |
2057 | diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c |
2058 | index a3d68159fb51..507f09c38b49 100644 |
2059 | --- a/samples/vfs/test-statx.c |
2060 | +++ b/samples/vfs/test-statx.c |
2061 | @@ -23,6 +23,8 @@ |
2062 | #include <linux/fcntl.h> |
2063 | #define statx foo |
2064 | #define statx_timestamp foo_timestamp |
2065 | +struct statx; |
2066 | +struct statx_timestamp; |
2067 | #include <sys/stat.h> |
2068 | #undef statx |
2069 | #undef statx_timestamp |
2070 | diff --git a/sound/usb/card.h b/sound/usb/card.h |
2071 | index d6219fba9699..f39f23e3525d 100644 |
2072 | --- a/sound/usb/card.h |
2073 | +++ b/sound/usb/card.h |
2074 | @@ -84,10 +84,6 @@ struct snd_usb_endpoint { |
2075 | dma_addr_t sync_dma; /* DMA address of syncbuf */ |
2076 | |
2077 | unsigned int pipe; /* the data i/o pipe */ |
2078 | - unsigned int framesize[2]; /* small/large frame sizes in samples */ |
2079 | - unsigned int sample_rem; /* remainder from division fs/fps */ |
2080 | - unsigned int sample_accum; /* sample accumulator */ |
2081 | - unsigned int fps; /* frames per second */ |
2082 | unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ |
2083 | unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ |
2084 | int freqshift; /* how much to shift the feedback value to get Q16.16 */ |
2085 | diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c |
2086 | index 9bea7d3f99f8..87cc249a31b9 100644 |
2087 | --- a/sound/usb/endpoint.c |
2088 | +++ b/sound/usb/endpoint.c |
2089 | @@ -124,12 +124,12 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) |
2090 | |
2091 | /* |
2092 | * For streaming based on information derived from sync endpoints, |
2093 | - * prepare_outbound_urb_sizes() will call slave_next_packet_size() to |
2094 | + * prepare_outbound_urb_sizes() will call next_packet_size() to |
2095 | * determine the number of samples to be sent in the next packet. |
2096 | * |
2097 | - * For implicit feedback, slave_next_packet_size() is unused. |
2098 | + * For implicit feedback, next_packet_size() is unused. |
2099 | */ |
2100 | -int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep) |
2101 | +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) |
2102 | { |
2103 | unsigned long flags; |
2104 | int ret; |
2105 | @@ -146,29 +146,6 @@ int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep) |
2106 | return ret; |
2107 | } |
2108 | |
2109 | -/* |
2110 | - * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes() |
2111 | - * will call next_packet_size() to determine the number of samples to be |
2112 | - * sent in the next packet. |
2113 | - */ |
2114 | -int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) |
2115 | -{ |
2116 | - int ret; |
2117 | - |
2118 | - if (ep->fill_max) |
2119 | - return ep->maxframesize; |
2120 | - |
2121 | - ep->sample_accum += ep->sample_rem; |
2122 | - if (ep->sample_accum >= ep->fps) { |
2123 | - ep->sample_accum -= ep->fps; |
2124 | - ret = ep->framesize[1]; |
2125 | - } else { |
2126 | - ret = ep->framesize[0]; |
2127 | - } |
2128 | - |
2129 | - return ret; |
2130 | -} |
2131 | - |
2132 | static void retire_outbound_urb(struct snd_usb_endpoint *ep, |
2133 | struct snd_urb_ctx *urb_ctx) |
2134 | { |
2135 | @@ -213,8 +190,6 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep, |
2136 | |
2137 | if (ctx->packet_size[i]) |
2138 | counts = ctx->packet_size[i]; |
2139 | - else if (ep->sync_master) |
2140 | - counts = snd_usb_endpoint_slave_next_packet_size(ep); |
2141 | else |
2142 | counts = snd_usb_endpoint_next_packet_size(ep); |
2143 | |
2144 | @@ -1086,17 +1061,10 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
2145 | ep->maxpacksize = fmt->maxpacksize; |
2146 | ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); |
2147 | |
2148 | - if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) { |
2149 | + if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) |
2150 | ep->freqn = get_usb_full_speed_rate(rate); |
2151 | - ep->fps = 1000; |
2152 | - } else { |
2153 | + else |
2154 | ep->freqn = get_usb_high_speed_rate(rate); |
2155 | - ep->fps = 8000; |
2156 | - } |
2157 | - |
2158 | - ep->sample_rem = rate % ep->fps; |
2159 | - ep->framesize[0] = rate / ep->fps; |
2160 | - ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps; |
2161 | |
2162 | /* calculate the frequency in 16.16 format */ |
2163 | ep->freqm = ep->freqn; |
2164 | @@ -1155,7 +1123,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) |
2165 | ep->active_mask = 0; |
2166 | ep->unlink_mask = 0; |
2167 | ep->phase = 0; |
2168 | - ep->sample_accum = 0; |
2169 | |
2170 | snd_usb_endpoint_start_quirk(ep); |
2171 | |
2172 | diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h |
2173 | index d23fa0a8c11b..63a39d4fa8d8 100644 |
2174 | --- a/sound/usb/endpoint.h |
2175 | +++ b/sound/usb/endpoint.h |
2176 | @@ -28,7 +28,6 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep); |
2177 | void snd_usb_endpoint_free(struct snd_usb_endpoint *ep); |
2178 | |
2179 | int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); |
2180 | -int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep); |
2181 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); |
2182 | |
2183 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, |
2184 | diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
2185 | index b971d9aaa64a..426c55b45e79 100644 |
2186 | --- a/sound/usb/pcm.c |
2187 | +++ b/sound/usb/pcm.c |
2188 | @@ -1581,8 +1581,6 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, |
2189 | for (i = 0; i < ctx->packets; i++) { |
2190 | if (ctx->packet_size[i]) |
2191 | counts = ctx->packet_size[i]; |
2192 | - else if (ep->sync_master) |
2193 | - counts = snd_usb_endpoint_slave_next_packet_size(ep); |
2194 | else |
2195 | counts = snd_usb_endpoint_next_packet_size(ep); |
2196 | |
2197 | diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c |
2198 | index d948475585ce..798284f511f1 100644 |
2199 | --- a/tools/lib/traceevent/event-parse.c |
2200 | +++ b/tools/lib/traceevent/event-parse.c |
2201 | @@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name) |
2202 | return 0; |
2203 | } |
2204 | |
2205 | +static int append(char **buf, const char *delim, const char *str) |
2206 | +{ |
2207 | + char *new_buf; |
2208 | + |
2209 | + new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1); |
2210 | + if (!new_buf) |
2211 | + return -1; |
2212 | + strcat(new_buf, delim); |
2213 | + strcat(new_buf, str); |
2214 | + *buf = new_buf; |
2215 | + return 0; |
2216 | +} |
2217 | + |
2218 | static int event_read_fields(struct tep_event *event, struct tep_format_field **fields) |
2219 | { |
2220 | struct tep_format_field *field = NULL; |
2221 | enum tep_event_type type; |
2222 | char *token; |
2223 | char *last_token; |
2224 | + char *delim = " "; |
2225 | int count = 0; |
2226 | + int ret; |
2227 | |
2228 | do { |
2229 | unsigned int size_dynamic = 0; |
2230 | @@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** |
2231 | field->flags |= TEP_FIELD_IS_POINTER; |
2232 | |
2233 | if (field->type) { |
2234 | - char *new_type; |
2235 | - new_type = realloc(field->type, |
2236 | - strlen(field->type) + |
2237 | - strlen(last_token) + 2); |
2238 | - if (!new_type) { |
2239 | - free(last_token); |
2240 | - goto fail; |
2241 | - } |
2242 | - field->type = new_type; |
2243 | - strcat(field->type, " "); |
2244 | - strcat(field->type, last_token); |
2245 | + ret = append(&field->type, delim, last_token); |
2246 | free(last_token); |
2247 | + if (ret < 0) |
2248 | + goto fail; |
2249 | } else |
2250 | field->type = last_token; |
2251 | last_token = token; |
2252 | + delim = " "; |
2253 | continue; |
2254 | } |
2255 | |
2256 | + /* Handle __attribute__((user)) */ |
2257 | + if ((type == TEP_EVENT_DELIM) && |
2258 | + strcmp("__attribute__", last_token) == 0 && |
2259 | + token[0] == '(') { |
2260 | + int depth = 1; |
2261 | + int ret; |
2262 | + |
2263 | + ret = append(&field->type, " ", last_token); |
2264 | + ret |= append(&field->type, "", "("); |
2265 | + if (ret < 0) |
2266 | + goto fail; |
2267 | + |
2268 | + delim = " "; |
2269 | + while ((type = read_token(&token)) != TEP_EVENT_NONE) { |
2270 | + if (type == TEP_EVENT_DELIM) { |
2271 | + if (token[0] == '(') |
2272 | + depth++; |
2273 | + else if (token[0] == ')') |
2274 | + depth--; |
2275 | + if (!depth) |
2276 | + break; |
2277 | + ret = append(&field->type, "", token); |
2278 | + delim = ""; |
2279 | + } else { |
2280 | + ret = append(&field->type, delim, token); |
2281 | + delim = " "; |
2282 | + } |
2283 | + if (ret < 0) |
2284 | + goto fail; |
2285 | + free(last_token); |
2286 | + last_token = token; |
2287 | + } |
2288 | + continue; |
2289 | + } |
2290 | break; |
2291 | } |
2292 | |
2293 | @@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** |
2294 | if (strcmp(token, "[") == 0) { |
2295 | enum tep_event_type last_type = type; |
2296 | char *brackets = token; |
2297 | - char *new_brackets; |
2298 | - int len; |
2299 | |
2300 | field->flags |= TEP_FIELD_IS_ARRAY; |
2301 | |
2302 | @@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** |
2303 | field->arraylen = 0; |
2304 | |
2305 | while (strcmp(token, "]") != 0) { |
2306 | + const char *delim; |
2307 | + |
2308 | if (last_type == TEP_EVENT_ITEM && |
2309 | type == TEP_EVENT_ITEM) |
2310 | - len = 2; |
2311 | + delim = " "; |
2312 | else |
2313 | - len = 1; |
2314 | + delim = ""; |
2315 | + |
2316 | last_type = type; |
2317 | |
2318 | - new_brackets = realloc(brackets, |
2319 | - strlen(brackets) + |
2320 | - strlen(token) + len); |
2321 | - if (!new_brackets) { |
2322 | + ret = append(&brackets, delim, token); |
2323 | + if (ret < 0) { |
2324 | free(brackets); |
2325 | goto fail; |
2326 | } |
2327 | - brackets = new_brackets; |
2328 | - if (len == 2) |
2329 | - strcat(brackets, " "); |
2330 | - strcat(brackets, token); |
2331 | /* We only care about the last token */ |
2332 | field->arraylen = strtoul(token, NULL, 0); |
2333 | free_token(token); |
2334 | type = read_token(&token); |
2335 | if (type == TEP_EVENT_NONE) { |
2336 | + free(brackets); |
2337 | do_warning_event(event, "failed to find token"); |
2338 | goto fail; |
2339 | } |
2340 | @@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** |
2341 | |
2342 | free_token(token); |
2343 | |
2344 | - new_brackets = realloc(brackets, strlen(brackets) + 2); |
2345 | - if (!new_brackets) { |
2346 | + ret = append(&brackets, "", "]"); |
2347 | + if (ret < 0) { |
2348 | free(brackets); |
2349 | goto fail; |
2350 | } |
2351 | - brackets = new_brackets; |
2352 | - strcat(brackets, "]"); |
2353 | |
2354 | /* add brackets to type */ |
2355 | |
2356 | @@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** |
2357 | * the format: type [] item; |
2358 | */ |
2359 | if (type == TEP_EVENT_ITEM) { |
2360 | - char *new_type; |
2361 | - new_type = realloc(field->type, |
2362 | - strlen(field->type) + |
2363 | - strlen(field->name) + |
2364 | - strlen(brackets) + 2); |
2365 | - if (!new_type) { |
2366 | + ret = append(&field->type, " ", field->name); |
2367 | + if (ret < 0) { |
2368 | free(brackets); |
2369 | goto fail; |
2370 | } |
2371 | - field->type = new_type; |
2372 | - strcat(field->type, " "); |
2373 | - strcat(field->type, field->name); |
2374 | + ret = append(&field->type, "", brackets); |
2375 | + |
2376 | size_dynamic = type_size(field->name); |
2377 | free_token(field->name); |
2378 | - strcat(field->type, brackets); |
2379 | field->name = field->alias = token; |
2380 | type = read_token(&token); |
2381 | } else { |
2382 | - char *new_type; |
2383 | - new_type = realloc(field->type, |
2384 | - strlen(field->type) + |
2385 | - strlen(brackets) + 1); |
2386 | - if (!new_type) { |
2387 | + ret = append(&field->type, "", brackets); |
2388 | + if (ret < 0) { |
2389 | free(brackets); |
2390 | goto fail; |
2391 | } |
2392 | - field->type = new_type; |
2393 | - strcat(field->type, brackets); |
2394 | } |
2395 | free(brackets); |
2396 | } |
2397 | @@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok) |
2398 | /* could just be a type pointer */ |
2399 | if ((strcmp(arg->op.op, "*") == 0) && |
2400 | type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) { |
2401 | - char *new_atom; |
2402 | + int ret; |
2403 | |
2404 | if (left->type != TEP_PRINT_ATOM) { |
2405 | do_warning_event(event, "bad pointer type"); |
2406 | goto out_free; |
2407 | } |
2408 | - new_atom = realloc(left->atom.atom, |
2409 | - strlen(left->atom.atom) + 3); |
2410 | - if (!new_atom) |
2411 | + ret = append(&left->atom.atom, " ", "*"); |
2412 | + if (ret < 0) |
2413 | goto out_warn_free; |
2414 | |
2415 | - left->atom.atom = new_atom; |
2416 | - strcat(left->atom.atom, " *"); |
2417 | free(arg->op.op); |
2418 | *arg = *left; |
2419 | free(left); |
2420 | @@ -3151,18 +3173,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg, |
2421 | } |
2422 | /* atoms can be more than one token long */ |
2423 | while (type == TEP_EVENT_ITEM) { |
2424 | - char *new_atom; |
2425 | - new_atom = realloc(atom, |
2426 | - strlen(atom) + strlen(token) + 2); |
2427 | - if (!new_atom) { |
2428 | + int ret; |
2429 | + |
2430 | + ret = append(&atom, " ", token); |
2431 | + if (ret < 0) { |
2432 | free(atom); |
2433 | *tok = NULL; |
2434 | free_token(token); |
2435 | return TEP_EVENT_ERROR; |
2436 | } |
2437 | - atom = new_atom; |
2438 | - strcat(atom, " "); |
2439 | - strcat(atom, token); |
2440 | free_token(token); |
2441 | type = read_token_item(&token); |
2442 | } |
2443 | diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh |
2444 | index 80521d46220c..31fb8265f643 100755 |
2445 | --- a/tools/testing/selftests/tpm2/test_smoke.sh |
2446 | +++ b/tools/testing/selftests/tpm2/test_smoke.sh |
2447 | @@ -1,4 +1,4 @@ |
2448 | -#!/bin/bash |
2449 | +#!/bin/sh |
2450 | # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
2451 | |
2452 | python -m unittest -v tpm2_tests.SmokeTest |
2453 | diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh |
2454 | index a6f5e346635e..3ded3011b642 100755 |
2455 | --- a/tools/testing/selftests/tpm2/test_space.sh |
2456 | +++ b/tools/testing/selftests/tpm2/test_space.sh |
2457 | @@ -1,4 +1,4 @@ |
2458 | -#!/bin/bash |
2459 | +#!/bin/sh |
2460 | # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
2461 | |
2462 | python -m unittest -v tpm2_tests.SpaceTest |