Contents of /trunk/kernel-alx/patches-4.4/0156-4.4.57-all-fixes.patch
Parent Directory | Revision Log
Revision 2892 -
(show annotations)
(download)
Mon Mar 27 13:49:28 2017 UTC (7 years, 6 months ago) by niro
File size: 37648 byte(s)
Mon Mar 27 13:49:28 2017 UTC (7 years, 6 months ago) by niro
File size: 37648 byte(s)
linux-4.4.57
1 | diff --git a/Makefile b/Makefile |
2 | index cf9303a5d621..841675e63a38 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 4 |
8 | -SUBLEVEL = 56 |
9 | +SUBLEVEL = 57 |
10 | EXTRAVERSION = |
11 | NAME = Blurry Fish Butt |
12 | |
13 | diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S |
14 | index 861e72109df2..f080abfc2f83 100644 |
15 | --- a/arch/powerpc/boot/zImage.lds.S |
16 | +++ b/arch/powerpc/boot/zImage.lds.S |
17 | @@ -68,6 +68,7 @@ SECTIONS |
18 | } |
19 | |
20 | #ifdef CONFIG_PPC64_BOOT_WRAPPER |
21 | + . = ALIGN(256); |
22 | .got : |
23 | { |
24 | __toc_start = .; |
25 | diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c |
26 | index 5cc2e7af3a7b..b379146de55b 100644 |
27 | --- a/arch/powerpc/kvm/emulate.c |
28 | +++ b/arch/powerpc/kvm/emulate.c |
29 | @@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
30 | advance = 0; |
31 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " |
32 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); |
33 | - kvmppc_core_queue_program(vcpu, 0); |
34 | } |
35 | } |
36 | |
37 | diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c |
38 | index 3a40f718baef..4004e03267cd 100644 |
39 | --- a/arch/s390/pci/pci_dma.c |
40 | +++ b/arch/s390/pci/pci_dma.c |
41 | @@ -455,7 +455,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) |
42 | zdev->dma_table = dma_alloc_cpu_table(); |
43 | if (!zdev->dma_table) { |
44 | rc = -ENOMEM; |
45 | - goto out_clean; |
46 | + goto out; |
47 | } |
48 | |
49 | /* |
50 | @@ -475,18 +475,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev) |
51 | zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); |
52 | if (!zdev->iommu_bitmap) { |
53 | rc = -ENOMEM; |
54 | - goto out_reg; |
55 | + goto free_dma_table; |
56 | } |
57 | |
58 | rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, |
59 | (u64) zdev->dma_table); |
60 | if (rc) |
61 | - goto out_reg; |
62 | - return 0; |
63 | + goto free_bitmap; |
64 | |
65 | -out_reg: |
66 | + return 0; |
67 | +free_bitmap: |
68 | + vfree(zdev->iommu_bitmap); |
69 | + zdev->iommu_bitmap = NULL; |
70 | +free_dma_table: |
71 | dma_free_cpu_table(zdev->dma_table); |
72 | -out_clean: |
73 | + zdev->dma_table = NULL; |
74 | +out: |
75 | return rc; |
76 | } |
77 | |
78 | diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c |
79 | index 440df0c7a2ee..a69321a77783 100644 |
80 | --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c |
81 | +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c |
82 | @@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req) |
83 | } |
84 | } |
85 | |
86 | +static int ghash_async_import(struct ahash_request *req, const void *in) |
87 | +{ |
88 | + struct ahash_request *cryptd_req = ahash_request_ctx(req); |
89 | + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); |
90 | + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
91 | + |
92 | + ghash_async_init(req); |
93 | + memcpy(dctx, in, sizeof(*dctx)); |
94 | + return 0; |
95 | + |
96 | +} |
97 | + |
98 | +static int ghash_async_export(struct ahash_request *req, void *out) |
99 | +{ |
100 | + struct ahash_request *cryptd_req = ahash_request_ctx(req); |
101 | + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); |
102 | + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
103 | + |
104 | + memcpy(out, dctx, sizeof(*dctx)); |
105 | + return 0; |
106 | + |
107 | +} |
108 | + |
109 | static int ghash_async_digest(struct ahash_request *req) |
110 | { |
111 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
112 | @@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = { |
113 | .final = ghash_async_final, |
114 | .setkey = ghash_async_setkey, |
115 | .digest = ghash_async_digest, |
116 | + .export = ghash_async_export, |
117 | + .import = ghash_async_import, |
118 | .halg = { |
119 | .digestsize = GHASH_DIGEST_SIZE, |
120 | + .statesize = sizeof(struct ghash_desc_ctx), |
121 | .base = { |
122 | .cra_name = "ghash", |
123 | .cra_driver_name = "ghash-clmulni", |
124 | diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c |
125 | index 9e2ba5c6e1dd..f42e78de1e10 100644 |
126 | --- a/arch/x86/xen/spinlock.c |
127 | +++ b/arch/x86/xen/spinlock.c |
128 | @@ -27,6 +27,12 @@ static bool xen_pvspin = true; |
129 | |
130 | static void xen_qlock_kick(int cpu) |
131 | { |
132 | + int irq = per_cpu(lock_kicker_irq, cpu); |
133 | + |
134 | + /* Don't kick if the target's kicker interrupt is not initialized. */ |
135 | + if (irq == -1) |
136 | + return; |
137 | + |
138 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
139 | } |
140 | |
141 | diff --git a/crypto/cryptd.c b/crypto/cryptd.c |
142 | index e7aa904cb20b..26a504db3f53 100644 |
143 | --- a/crypto/cryptd.c |
144 | +++ b/crypto/cryptd.c |
145 | @@ -642,6 +642,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
146 | inst->alg.halg.base.cra_flags = type; |
147 | |
148 | inst->alg.halg.digestsize = salg->digestsize; |
149 | + inst->alg.halg.statesize = salg->statesize; |
150 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
151 | |
152 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
153 | diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c |
154 | index a0ceb41d5ccc..b4f3930266b1 100644 |
155 | --- a/crypto/mcryptd.c |
156 | +++ b/crypto/mcryptd.c |
157 | @@ -531,6 +531,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
158 | inst->alg.halg.base.cra_flags = type; |
159 | |
160 | inst->alg.halg.digestsize = salg->digestsize; |
161 | + inst->alg.halg.statesize = salg->statesize; |
162 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
163 | |
164 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; |
165 | diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c |
166 | index 5fdac394207a..549cdbed7b0e 100644 |
167 | --- a/drivers/acpi/acpi_video.c |
168 | +++ b/drivers/acpi/acpi_video.c |
169 | @@ -1211,6 +1211,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) |
170 | union acpi_object *dod = NULL; |
171 | union acpi_object *obj; |
172 | |
173 | + if (!video->cap._DOD) |
174 | + return AE_NOT_EXIST; |
175 | + |
176 | status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); |
177 | if (!ACPI_SUCCESS(status)) { |
178 | ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); |
179 | diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c |
180 | index 65f7eecc45b0..f10a107614b4 100644 |
181 | --- a/drivers/char/tpm/tpm_tis.c |
182 | +++ b/drivers/char/tpm/tpm_tis.c |
183 | @@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip) |
184 | iowrite32(intmask, |
185 | chip->vendor.iobase + |
186 | TPM_INT_ENABLE(chip->vendor.locality)); |
187 | - free_irq(chip->vendor.irq, chip); |
188 | + devm_free_irq(chip->pdev, chip->vendor.irq, chip); |
189 | chip->vendor.irq = 0; |
190 | } |
191 | |
192 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
193 | index 8412ce5f93a7..86fa9fdc8323 100644 |
194 | --- a/drivers/cpufreq/cpufreq.c |
195 | +++ b/drivers/cpufreq/cpufreq.c |
196 | @@ -626,9 +626,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, |
197 | char *buf) |
198 | { |
199 | unsigned int cur_freq = __cpufreq_get(policy); |
200 | - if (!cur_freq) |
201 | - return sprintf(buf, "<unknown>"); |
202 | - return sprintf(buf, "%u\n", cur_freq); |
203 | + |
204 | + if (cur_freq) |
205 | + return sprintf(buf, "%u\n", cur_freq); |
206 | + |
207 | + return sprintf(buf, "<unknown>\n"); |
208 | } |
209 | |
210 | /** |
211 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c |
212 | index 7c42ff670080..a0924330d125 100644 |
213 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c |
214 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c |
215 | @@ -25,6 +25,7 @@ |
216 | * Alex Deucher |
217 | * Jerome Glisse |
218 | */ |
219 | +#include <linux/irq.h> |
220 | #include <drm/drmP.h> |
221 | #include <drm/drm_crtc_helper.h> |
222 | #include <drm/amdgpu_drm.h> |
223 | diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c |
224 | index 57c191798699..ddbf7e7e0d98 100644 |
225 | --- a/drivers/hv/hv.c |
226 | +++ b/drivers/hv/hv.c |
227 | @@ -274,7 +274,7 @@ cleanup: |
228 | * |
229 | * This routine is called normally during driver unloading or exiting. |
230 | */ |
231 | -void hv_cleanup(void) |
232 | +void hv_cleanup(bool crash) |
233 | { |
234 | union hv_x64_msr_hypercall_contents hypercall_msr; |
235 | |
236 | @@ -284,7 +284,8 @@ void hv_cleanup(void) |
237 | if (hv_context.hypercall_page) { |
238 | hypercall_msr.as_uint64 = 0; |
239 | wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); |
240 | - vfree(hv_context.hypercall_page); |
241 | + if (!crash) |
242 | + vfree(hv_context.hypercall_page); |
243 | hv_context.hypercall_page = NULL; |
244 | } |
245 | |
246 | @@ -304,7 +305,8 @@ void hv_cleanup(void) |
247 | |
248 | hypercall_msr.as_uint64 = 0; |
249 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); |
250 | - vfree(hv_context.tsc_page); |
251 | + if (!crash) |
252 | + vfree(hv_context.tsc_page); |
253 | hv_context.tsc_page = NULL; |
254 | } |
255 | #endif |
256 | diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c |
257 | index b853b4b083bd..43af91362be5 100644 |
258 | --- a/drivers/hv/hv_balloon.c |
259 | +++ b/drivers/hv/hv_balloon.c |
260 | @@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) |
261 | * If the pfn range we are dealing with is not in the current |
262 | * "hot add block", move on. |
263 | */ |
264 | - if ((start_pfn >= has->end_pfn)) |
265 | + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) |
266 | continue; |
267 | /* |
268 | * If the current hot add-request extends beyond |
269 | @@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, |
270 | * If the pfn range we are dealing with is not in the current |
271 | * "hot add block", move on. |
272 | */ |
273 | - if ((start_pfn >= has->end_pfn)) |
274 | + if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) |
275 | continue; |
276 | |
277 | old_covered_state = has->covered_end_pfn; |
278 | diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h |
279 | index 12156db2e88e..75e383e6d03d 100644 |
280 | --- a/drivers/hv/hyperv_vmbus.h |
281 | +++ b/drivers/hv/hyperv_vmbus.h |
282 | @@ -581,7 +581,7 @@ struct hv_ring_buffer_debug_info { |
283 | |
284 | extern int hv_init(void); |
285 | |
286 | -extern void hv_cleanup(void); |
287 | +extern void hv_cleanup(bool crash); |
288 | |
289 | extern int hv_post_message(union hv_connection_id connection_id, |
290 | enum hv_message_type message_type, |
291 | diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c |
292 | index 509ed9731630..802dcb409030 100644 |
293 | --- a/drivers/hv/vmbus_drv.c |
294 | +++ b/drivers/hv/vmbus_drv.c |
295 | @@ -889,7 +889,7 @@ err_alloc: |
296 | bus_unregister(&hv_bus); |
297 | |
298 | err_cleanup: |
299 | - hv_cleanup(); |
300 | + hv_cleanup(false); |
301 | |
302 | return ret; |
303 | } |
304 | @@ -1254,7 +1254,7 @@ static void hv_kexec_handler(void) |
305 | vmbus_initiate_unload(); |
306 | for_each_online_cpu(cpu) |
307 | smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); |
308 | - hv_cleanup(); |
309 | + hv_cleanup(false); |
310 | }; |
311 | |
312 | static void hv_crash_handler(struct pt_regs *regs) |
313 | @@ -1266,7 +1266,7 @@ static void hv_crash_handler(struct pt_regs *regs) |
314 | * for kdump. |
315 | */ |
316 | hv_synic_cleanup(NULL); |
317 | - hv_cleanup(); |
318 | + hv_cleanup(true); |
319 | }; |
320 | |
321 | static int __init hv_acpi_init(void) |
322 | @@ -1330,7 +1330,7 @@ static void __exit vmbus_exit(void) |
323 | &hyperv_panic_block); |
324 | } |
325 | bus_unregister(&hv_bus); |
326 | - hv_cleanup(); |
327 | + hv_cleanup(false); |
328 | for_each_online_cpu(cpu) { |
329 | tasklet_kill(hv_context.event_dpc[cpu]); |
330 | smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); |
331 | diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c |
332 | index aecec6d32463..7f1c625b08ec 100644 |
333 | --- a/drivers/isdn/gigaset/bas-gigaset.c |
334 | +++ b/drivers/isdn/gigaset/bas-gigaset.c |
335 | @@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, |
336 | return -ENODEV; |
337 | } |
338 | |
339 | + if (hostif->desc.bNumEndpoints < 1) |
340 | + return -ENODEV; |
341 | + |
342 | dev_info(&udev->dev, |
343 | "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", |
344 | __func__, le16_to_cpu(udev->descriptor.idVendor), |
345 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
346 | index ebb0dd612ebd..122af340a531 100644 |
347 | --- a/drivers/md/raid10.c |
348 | +++ b/drivers/md/raid10.c |
349 | @@ -1477,7 +1477,25 @@ static void make_request(struct mddev *mddev, struct bio *bio) |
350 | split = bio; |
351 | } |
352 | |
353 | + /* |
354 | + * If a bio is splitted, the first part of bio will pass |
355 | + * barrier but the bio is queued in current->bio_list (see |
356 | + * generic_make_request). If there is a raise_barrier() called |
357 | + * here, the second part of bio can't pass barrier. But since |
358 | + * the first part bio isn't dispatched to underlaying disks |
359 | + * yet, the barrier is never released, hence raise_barrier will |
360 | + * alays wait. We have a deadlock. |
361 | + * Note, this only happens in read path. For write path, the |
362 | + * first part of bio is dispatched in a schedule() call |
363 | + * (because of blk plug) or offloaded to raid10d. |
364 | + * Quitting from the function immediately can change the bio |
365 | + * order queued in bio_list and avoid the deadlock. |
366 | + */ |
367 | __make_request(mddev, split); |
368 | + if (split != bio && bio_data_dir(bio) == READ) { |
369 | + generic_make_request(bio); |
370 | + break; |
371 | + } |
372 | } while (split != bio); |
373 | |
374 | /* In case raid10d snuck in to freeze_array */ |
375 | diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
376 | index e8a09ff9e724..c8a7802d2953 100644 |
377 | --- a/drivers/net/hyperv/netvsc_drv.c |
378 | +++ b/drivers/net/hyperv/netvsc_drv.c |
379 | @@ -197,65 +197,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, |
380 | return ppi; |
381 | } |
382 | |
383 | -union sub_key { |
384 | - u64 k; |
385 | - struct { |
386 | - u8 pad[3]; |
387 | - u8 kb; |
388 | - u32 ka; |
389 | - }; |
390 | -}; |
391 | - |
392 | -/* Toeplitz hash function |
393 | - * data: network byte order |
394 | - * return: host byte order |
395 | - */ |
396 | -static u32 comp_hash(u8 *key, int klen, void *data, int dlen) |
397 | -{ |
398 | - union sub_key subk; |
399 | - int k_next = 4; |
400 | - u8 dt; |
401 | - int i, j; |
402 | - u32 ret = 0; |
403 | - |
404 | - subk.k = 0; |
405 | - subk.ka = ntohl(*(u32 *)key); |
406 | - |
407 | - for (i = 0; i < dlen; i++) { |
408 | - subk.kb = key[k_next]; |
409 | - k_next = (k_next + 1) % klen; |
410 | - dt = ((u8 *)data)[i]; |
411 | - for (j = 0; j < 8; j++) { |
412 | - if (dt & 0x80) |
413 | - ret ^= subk.ka; |
414 | - dt <<= 1; |
415 | - subk.k <<= 1; |
416 | - } |
417 | - } |
418 | - |
419 | - return ret; |
420 | -} |
421 | - |
422 | -static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) |
423 | -{ |
424 | - struct flow_keys flow; |
425 | - int data_len; |
426 | - |
427 | - if (!skb_flow_dissect_flow_keys(skb, &flow, 0) || |
428 | - !(flow.basic.n_proto == htons(ETH_P_IP) || |
429 | - flow.basic.n_proto == htons(ETH_P_IPV6))) |
430 | - return false; |
431 | - |
432 | - if (flow.basic.ip_proto == IPPROTO_TCP) |
433 | - data_len = 12; |
434 | - else |
435 | - data_len = 8; |
436 | - |
437 | - *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); |
438 | - |
439 | - return true; |
440 | -} |
441 | - |
442 | static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, |
443 | void *accel_priv, select_queue_fallback_t fallback) |
444 | { |
445 | @@ -268,11 +209,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, |
446 | if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) |
447 | return 0; |
448 | |
449 | - if (netvsc_set_hash(&hash, skb)) { |
450 | - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % |
451 | - ndev->real_num_tx_queues; |
452 | - skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); |
453 | - } |
454 | + hash = skb_get_hash(skb); |
455 | + q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % |
456 | + ndev->real_num_tx_queues; |
457 | |
458 | return q_idx; |
459 | } |
460 | diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c |
461 | index a009ae34c5ef..930f0f25c1ce 100644 |
462 | --- a/drivers/pinctrl/intel/pinctrl-cherryview.c |
463 | +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c |
464 | @@ -1466,12 +1466,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) |
465 | offset += range->npins; |
466 | } |
467 | |
468 | - /* Mask and clear all interrupts */ |
469 | - chv_writel(0, pctrl->regs + CHV_INTMASK); |
470 | + /* Clear all interrupts */ |
471 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
472 | |
473 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, |
474 | - handle_simple_irq, IRQ_TYPE_NONE); |
475 | + handle_bad_irq, IRQ_TYPE_NONE); |
476 | if (ret) { |
477 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
478 | goto fail; |
479 | diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h |
480 | index 5ada9268a450..a8ac4c0a1493 100644 |
481 | --- a/drivers/scsi/cxlflash/common.h |
482 | +++ b/drivers/scsi/cxlflash/common.h |
483 | @@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops; |
484 | sectors |
485 | */ |
486 | |
487 | -#define NUM_RRQ_ENTRY 16 /* for master issued cmds */ |
488 | #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) |
489 | |
490 | /* AFU command retry limit */ |
491 | @@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops; |
492 | index derivation |
493 | */ |
494 | |
495 | -#define CXLFLASH_MAX_CMDS 16 |
496 | +#define CXLFLASH_MAX_CMDS 256 |
497 | #define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS |
498 | |
499 | +/* RRQ for master issued cmds */ |
500 | +#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS |
501 | + |
502 | |
503 | static inline void check_sizes(void) |
504 | { |
505 | @@ -149,7 +151,7 @@ struct afu_cmd { |
506 | struct afu { |
507 | /* Stuff requiring alignment go first. */ |
508 | |
509 | - u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */ |
510 | + u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ |
511 | /* |
512 | * Command & data for AFU commands. |
513 | */ |
514 | diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c |
515 | index c86847c68448..2882bcac918a 100644 |
516 | --- a/drivers/scsi/cxlflash/main.c |
517 | +++ b/drivers/scsi/cxlflash/main.c |
518 | @@ -2305,7 +2305,7 @@ static struct scsi_host_template driver_template = { |
519 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, |
520 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, |
521 | .change_queue_depth = cxlflash_change_queue_depth, |
522 | - .cmd_per_lun = 16, |
523 | + .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
524 | .can_queue = CXLFLASH_MAX_CMDS, |
525 | .this_id = -1, |
526 | .sg_tablesize = SG_NONE, /* No scatter gather support */ |
527 | diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
528 | index 6bffd91b973a..c1ccf1ee99ea 100644 |
529 | --- a/drivers/scsi/libiscsi.c |
530 | +++ b/drivers/scsi/libiscsi.c |
531 | @@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) |
532 | WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); |
533 | task->state = state; |
534 | |
535 | - if (!list_empty(&task->running)) |
536 | + spin_lock_bh(&conn->taskqueuelock); |
537 | + if (!list_empty(&task->running)) { |
538 | + pr_debug_once("%s while task on list", __func__); |
539 | list_del_init(&task->running); |
540 | + } |
541 | + spin_unlock_bh(&conn->taskqueuelock); |
542 | |
543 | if (conn->task == task) |
544 | conn->task = NULL; |
545 | @@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, |
546 | if (session->tt->xmit_task(task)) |
547 | goto free_task; |
548 | } else { |
549 | + spin_lock_bh(&conn->taskqueuelock); |
550 | list_add_tail(&task->running, &conn->mgmtqueue); |
551 | + spin_unlock_bh(&conn->taskqueuelock); |
552 | iscsi_conn_queue_work(conn); |
553 | } |
554 | |
555 | @@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task) |
556 | * this may be on the requeue list already if the xmit_task callout |
557 | * is handling the r2ts while we are adding new ones |
558 | */ |
559 | + spin_lock_bh(&conn->taskqueuelock); |
560 | if (list_empty(&task->running)) |
561 | list_add_tail(&task->running, &conn->requeue); |
562 | + spin_unlock_bh(&conn->taskqueuelock); |
563 | iscsi_conn_queue_work(conn); |
564 | } |
565 | EXPORT_SYMBOL_GPL(iscsi_requeue_task); |
566 | @@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
567 | * only have one nop-out as a ping from us and targets should not |
568 | * overflow us with nop-ins |
569 | */ |
570 | + spin_lock_bh(&conn->taskqueuelock); |
571 | check_mgmt: |
572 | while (!list_empty(&conn->mgmtqueue)) { |
573 | conn->task = list_entry(conn->mgmtqueue.next, |
574 | struct iscsi_task, running); |
575 | list_del_init(&conn->task->running); |
576 | + spin_unlock_bh(&conn->taskqueuelock); |
577 | if (iscsi_prep_mgmt_task(conn, conn->task)) { |
578 | /* regular RX path uses back_lock */ |
579 | spin_lock_bh(&conn->session->back_lock); |
580 | __iscsi_put_task(conn->task); |
581 | spin_unlock_bh(&conn->session->back_lock); |
582 | conn->task = NULL; |
583 | + spin_lock_bh(&conn->taskqueuelock); |
584 | continue; |
585 | } |
586 | rc = iscsi_xmit_task(conn); |
587 | if (rc) |
588 | goto done; |
589 | + spin_lock_bh(&conn->taskqueuelock); |
590 | } |
591 | |
592 | /* process pending command queue */ |
593 | @@ -1535,19 +1547,24 @@ check_mgmt: |
594 | conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, |
595 | running); |
596 | list_del_init(&conn->task->running); |
597 | + spin_unlock_bh(&conn->taskqueuelock); |
598 | if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { |
599 | fail_scsi_task(conn->task, DID_IMM_RETRY); |
600 | + spin_lock_bh(&conn->taskqueuelock); |
601 | continue; |
602 | } |
603 | rc = iscsi_prep_scsi_cmd_pdu(conn->task); |
604 | if (rc) { |
605 | if (rc == -ENOMEM || rc == -EACCES) { |
606 | + spin_lock_bh(&conn->taskqueuelock); |
607 | list_add_tail(&conn->task->running, |
608 | &conn->cmdqueue); |
609 | conn->task = NULL; |
610 | + spin_unlock_bh(&conn->taskqueuelock); |
611 | goto done; |
612 | } else |
613 | fail_scsi_task(conn->task, DID_ABORT); |
614 | + spin_lock_bh(&conn->taskqueuelock); |
615 | continue; |
616 | } |
617 | rc = iscsi_xmit_task(conn); |
618 | @@ -1558,6 +1575,7 @@ check_mgmt: |
619 | * we need to check the mgmt queue for nops that need to |
620 | * be sent to aviod starvation |
621 | */ |
622 | + spin_lock_bh(&conn->taskqueuelock); |
623 | if (!list_empty(&conn->mgmtqueue)) |
624 | goto check_mgmt; |
625 | } |
626 | @@ -1577,12 +1595,15 @@ check_mgmt: |
627 | conn->task = task; |
628 | list_del_init(&conn->task->running); |
629 | conn->task->state = ISCSI_TASK_RUNNING; |
630 | + spin_unlock_bh(&conn->taskqueuelock); |
631 | rc = iscsi_xmit_task(conn); |
632 | if (rc) |
633 | goto done; |
634 | + spin_lock_bh(&conn->taskqueuelock); |
635 | if (!list_empty(&conn->mgmtqueue)) |
636 | goto check_mgmt; |
637 | } |
638 | + spin_unlock_bh(&conn->taskqueuelock); |
639 | spin_unlock_bh(&conn->session->frwd_lock); |
640 | return -ENODATA; |
641 | |
642 | @@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) |
643 | goto prepd_reject; |
644 | } |
645 | } else { |
646 | + spin_lock_bh(&conn->taskqueuelock); |
647 | list_add_tail(&task->running, &conn->cmdqueue); |
648 | + spin_unlock_bh(&conn->taskqueuelock); |
649 | iscsi_conn_queue_work(conn); |
650 | } |
651 | |
652 | @@ -2900,6 +2923,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, |
653 | INIT_LIST_HEAD(&conn->mgmtqueue); |
654 | INIT_LIST_HEAD(&conn->cmdqueue); |
655 | INIT_LIST_HEAD(&conn->requeue); |
656 | + spin_lock_init(&conn->taskqueuelock); |
657 | INIT_WORK(&conn->xmitwork, iscsi_xmitworker); |
658 | |
659 | /* allocate login_task used for the login/text sequences */ |
660 | diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
661 | index c14ab6c3ae40..60c21093f865 100644 |
662 | --- a/drivers/scsi/lpfc/lpfc_init.c |
663 | +++ b/drivers/scsi/lpfc/lpfc_init.c |
664 | @@ -11387,6 +11387,7 @@ static struct pci_driver lpfc_driver = { |
665 | .id_table = lpfc_id_table, |
666 | .probe = lpfc_pci_probe_one, |
667 | .remove = lpfc_pci_remove_one, |
668 | + .shutdown = lpfc_pci_remove_one, |
669 | .suspend = lpfc_pci_suspend_one, |
670 | .resume = lpfc_pci_resume_one, |
671 | .err_handler = &lpfc_err_handler, |
672 | diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c |
673 | index de18790eb21c..d72a4058fd08 100644 |
674 | --- a/drivers/target/target_core_pscsi.c |
675 | +++ b/drivers/target/target_core_pscsi.c |
676 | @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
677 | |
678 | buf = kzalloc(12, GFP_KERNEL); |
679 | if (!buf) |
680 | - return; |
681 | + goto out_free; |
682 | |
683 | memset(cdb, 0, MAX_COMMAND_SIZE); |
684 | cdb[0] = MODE_SENSE; |
685 | @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
686 | * If MODE_SENSE still returns zero, set the default value to 1024. |
687 | */ |
688 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
689 | +out_free: |
690 | if (!sdev->sector_size) |
691 | sdev->sector_size = 1024; |
692 | -out_free: |
693 | + |
694 | kfree(buf); |
695 | } |
696 | |
697 | @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
698 | sd->lun, sd->queue_depth); |
699 | } |
700 | |
701 | - dev->dev_attrib.hw_block_size = sd->sector_size; |
702 | + dev->dev_attrib.hw_block_size = |
703 | + min_not_zero((int)sd->sector_size, 512); |
704 | dev->dev_attrib.hw_max_sectors = |
705 | - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); |
706 | + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); |
707 | dev->dev_attrib.hw_queue_depth = sd->queue_depth; |
708 | |
709 | /* |
710 | @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
711 | /* |
712 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
713 | */ |
714 | - if (sd->type == TYPE_TAPE) |
715 | + if (sd->type == TYPE_TAPE) { |
716 | pscsi_tape_read_blocksize(dev, sd); |
717 | + dev->dev_attrib.hw_block_size = sd->sector_size; |
718 | + } |
719 | return 0; |
720 | } |
721 | |
722 | @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) |
723 | /* |
724 | * Called with struct Scsi_Host->host_lock called. |
725 | */ |
726 | -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
727 | +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) |
728 | __releases(sh->host_lock) |
729 | { |
730 | struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
731 | @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
732 | return 0; |
733 | } |
734 | |
735 | -/* |
736 | - * Called with struct Scsi_Host->host_lock called. |
737 | - */ |
738 | -static int pscsi_create_type_other(struct se_device *dev, |
739 | - struct scsi_device *sd) |
740 | - __releases(sh->host_lock) |
741 | -{ |
742 | - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
743 | - struct Scsi_Host *sh = sd->host; |
744 | - int ret; |
745 | - |
746 | - spin_unlock_irq(sh->host_lock); |
747 | - ret = pscsi_add_device_to_list(dev, sd); |
748 | - if (ret) |
749 | - return ret; |
750 | - |
751 | - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", |
752 | - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
753 | - sd->channel, sd->id, sd->lun); |
754 | - return 0; |
755 | -} |
756 | - |
757 | static int pscsi_configure_device(struct se_device *dev) |
758 | { |
759 | struct se_hba *hba = dev->se_hba; |
760 | @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) |
761 | case TYPE_DISK: |
762 | ret = pscsi_create_type_disk(dev, sd); |
763 | break; |
764 | - case TYPE_ROM: |
765 | - ret = pscsi_create_type_rom(dev, sd); |
766 | - break; |
767 | default: |
768 | - ret = pscsi_create_type_other(dev, sd); |
769 | + ret = pscsi_create_type_nondisk(dev, sd); |
770 | break; |
771 | } |
772 | |
773 | @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) |
774 | else if (pdv->pdv_lld_host) |
775 | scsi_host_put(pdv->pdv_lld_host); |
776 | |
777 | - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
778 | - scsi_device_put(sd); |
779 | + scsi_device_put(sd); |
780 | |
781 | pdv->pdv_sd = NULL; |
782 | } |
783 | @@ -1088,7 +1066,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) |
784 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
785 | return pdv->pdv_bd->bd_part->nr_sects; |
786 | |
787 | - dump_stack(); |
788 | return 0; |
789 | } |
790 | |
791 | diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
792 | index 2e27b1034ede..90c5dffc9fa4 100644 |
793 | --- a/drivers/target/target_core_sbc.c |
794 | +++ b/drivers/target/target_core_sbc.c |
795 | @@ -1096,9 +1096,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
796 | return ret; |
797 | break; |
798 | case VERIFY: |
799 | + case VERIFY_16: |
800 | size = 0; |
801 | - sectors = transport_get_sectors_10(cdb); |
802 | - cmd->t_task_lba = transport_lba_32(cdb); |
803 | + if (cdb[0] == VERIFY) { |
804 | + sectors = transport_get_sectors_10(cdb); |
805 | + cmd->t_task_lba = transport_lba_32(cdb); |
806 | + } else { |
807 | + sectors = transport_get_sectors_16(cdb); |
808 | + cmd->t_task_lba = transport_lba_64(cdb); |
809 | + } |
810 | cmd->execute_cmd = sbc_emulate_noop; |
811 | goto check_lba; |
812 | case REZERO_UNIT: |
813 | diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c |
814 | index f44ce09367bc..5724d7c41e29 100644 |
815 | --- a/drivers/usb/core/hcd.c |
816 | +++ b/drivers/usb/core/hcd.c |
817 | @@ -966,7 +966,7 @@ static void usb_bus_init (struct usb_bus *bus) |
818 | bus->bandwidth_allocated = 0; |
819 | bus->bandwidth_int_reqs = 0; |
820 | bus->bandwidth_isoc_reqs = 0; |
821 | - mutex_init(&bus->usb_address0_mutex); |
822 | + mutex_init(&bus->devnum_next_mutex); |
823 | |
824 | INIT_LIST_HEAD (&bus->bus_list); |
825 | } |
826 | @@ -2497,6 +2497,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, |
827 | return NULL; |
828 | } |
829 | if (primary_hcd == NULL) { |
830 | + hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex), |
831 | + GFP_KERNEL); |
832 | + if (!hcd->address0_mutex) { |
833 | + kfree(hcd); |
834 | + dev_dbg(dev, "hcd address0 mutex alloc failed\n"); |
835 | + return NULL; |
836 | + } |
837 | + mutex_init(hcd->address0_mutex); |
838 | hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), |
839 | GFP_KERNEL); |
840 | if (!hcd->bandwidth_mutex) { |
841 | @@ -2508,6 +2516,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, |
842 | dev_set_drvdata(dev, hcd); |
843 | } else { |
844 | mutex_lock(&usb_port_peer_mutex); |
845 | + hcd->address0_mutex = primary_hcd->address0_mutex; |
846 | hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex; |
847 | hcd->primary_hcd = primary_hcd; |
848 | primary_hcd->primary_hcd = primary_hcd; |
849 | @@ -2564,24 +2573,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd); |
850 | * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is |
851 | * deallocated. |
852 | * |
853 | - * Make sure to only deallocate the bandwidth_mutex when the primary HCD is |
854 | - * freed. When hcd_release() is called for either hcd in a peer set |
855 | - * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to |
856 | - * block new peering attempts |
857 | + * Make sure to deallocate the bandwidth_mutex only when the last HCD is |
858 | + * freed. When hcd_release() is called for either hcd in a peer set, |
859 | + * invalidate the peer's ->shared_hcd and ->primary_hcd pointers. |
860 | */ |
861 | static void hcd_release(struct kref *kref) |
862 | { |
863 | struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref); |
864 | |
865 | mutex_lock(&usb_port_peer_mutex); |
866 | - if (usb_hcd_is_primary_hcd(hcd)) |
867 | - kfree(hcd->bandwidth_mutex); |
868 | if (hcd->shared_hcd) { |
869 | struct usb_hcd *peer = hcd->shared_hcd; |
870 | |
871 | peer->shared_hcd = NULL; |
872 | - if (peer->primary_hcd == hcd) |
873 | - peer->primary_hcd = NULL; |
874 | + peer->primary_hcd = NULL; |
875 | + } else { |
876 | + kfree(hcd->address0_mutex); |
877 | + kfree(hcd->bandwidth_mutex); |
878 | } |
879 | mutex_unlock(&usb_port_peer_mutex); |
880 | kfree(hcd); |
881 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
882 | index 780db8bb2262..f52d8abf6979 100644 |
883 | --- a/drivers/usb/core/hub.c |
884 | +++ b/drivers/usb/core/hub.c |
885 | @@ -1980,7 +1980,7 @@ static void choose_devnum(struct usb_device *udev) |
886 | struct usb_bus *bus = udev->bus; |
887 | |
888 | /* be safe when more hub events are proceed in parallel */ |
889 | - mutex_lock(&bus->usb_address0_mutex); |
890 | + mutex_lock(&bus->devnum_next_mutex); |
891 | if (udev->wusb) { |
892 | devnum = udev->portnum + 1; |
893 | BUG_ON(test_bit(devnum, bus->devmap.devicemap)); |
894 | @@ -1998,7 +1998,7 @@ static void choose_devnum(struct usb_device *udev) |
895 | set_bit(devnum, bus->devmap.devicemap); |
896 | udev->devnum = devnum; |
897 | } |
898 | - mutex_unlock(&bus->usb_address0_mutex); |
899 | + mutex_unlock(&bus->devnum_next_mutex); |
900 | } |
901 | |
902 | static void release_devnum(struct usb_device *udev) |
903 | @@ -4262,7 +4262,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, |
904 | if (oldspeed == USB_SPEED_LOW) |
905 | delay = HUB_LONG_RESET_TIME; |
906 | |
907 | - mutex_lock(&hdev->bus->usb_address0_mutex); |
908 | + mutex_lock(hcd->address0_mutex); |
909 | |
910 | /* Reset the device; full speed may morph to high speed */ |
911 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ |
912 | @@ -4548,7 +4548,7 @@ fail: |
913 | hub_port_disable(hub, port1, 0); |
914 | update_devnum(udev, devnum); /* for disconnect processing */ |
915 | } |
916 | - mutex_unlock(&hdev->bus->usb_address0_mutex); |
917 | + mutex_unlock(hcd->address0_mutex); |
918 | return retval; |
919 | } |
920 | |
921 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
922 | index 6fe8e30eeb99..68345a9e59b8 100644 |
923 | --- a/fs/ext4/super.c |
924 | +++ b/fs/ext4/super.c |
925 | @@ -3666,7 +3666,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
926 | db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / |
927 | EXT4_DESC_PER_BLOCK(sb); |
928 | if (ext4_has_feature_meta_bg(sb)) { |
929 | - if (le32_to_cpu(es->s_first_meta_bg) >= db_count) { |
930 | + if (le32_to_cpu(es->s_first_meta_bg) > db_count) { |
931 | ext4_msg(sb, KERN_WARNING, |
932 | "first meta block group too large: %u " |
933 | "(group descriptor block count %u)", |
934 | diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h |
935 | index de7b4f97ac75..be519416c112 100644 |
936 | --- a/fs/gfs2/incore.h |
937 | +++ b/fs/gfs2/incore.h |
938 | @@ -207,7 +207,7 @@ struct lm_lockname { |
939 | struct gfs2_sbd *ln_sbd; |
940 | u64 ln_number; |
941 | unsigned int ln_type; |
942 | -}; |
943 | +} __packed __aligned(sizeof(int)); |
944 | |
945 | #define lm_name_equal(name1, name2) \ |
946 | (((name1)->ln_number == (name2)->ln_number) && \ |
947 | diff --git a/include/linux/log2.h b/include/linux/log2.h |
948 | index fd7ff3d91e6a..f38fae23bdac 100644 |
949 | --- a/include/linux/log2.h |
950 | +++ b/include/linux/log2.h |
951 | @@ -16,12 +16,6 @@ |
952 | #include <linux/bitops.h> |
953 | |
954 | /* |
955 | - * deal with unrepresentable constant logarithms |
956 | - */ |
957 | -extern __attribute__((const, noreturn)) |
958 | -int ____ilog2_NaN(void); |
959 | - |
960 | -/* |
961 | * non-constant log of base 2 calculators |
962 | * - the arch may override these in asm/bitops.h if they can be implemented |
963 | * more efficiently than using fls() and fls64() |
964 | @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
965 | #define ilog2(n) \ |
966 | ( \ |
967 | __builtin_constant_p(n) ? ( \ |
968 | - (n) < 1 ? ____ilog2_NaN() : \ |
969 | + (n) < 2 ? 0 : \ |
970 | (n) & (1ULL << 63) ? 63 : \ |
971 | (n) & (1ULL << 62) ? 62 : \ |
972 | (n) & (1ULL << 61) ? 61 : \ |
973 | @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
974 | (n) & (1ULL << 4) ? 4 : \ |
975 | (n) & (1ULL << 3) ? 3 : \ |
976 | (n) & (1ULL << 2) ? 2 : \ |
977 | - (n) & (1ULL << 1) ? 1 : \ |
978 | - (n) & (1ULL << 0) ? 0 : \ |
979 | - ____ilog2_NaN() \ |
980 | - ) : \ |
981 | + 1 ) : \ |
982 | (sizeof(n) <= 4) ? \ |
983 | __ilog2_u32(n) : \ |
984 | __ilog2_u64(n) \ |
985 | diff --git a/include/linux/usb.h b/include/linux/usb.h |
986 | index 12891ffd4bf0..8c75af6b7d5b 100644 |
987 | --- a/include/linux/usb.h |
988 | +++ b/include/linux/usb.h |
989 | @@ -371,14 +371,13 @@ struct usb_bus { |
990 | |
991 | int devnum_next; /* Next open device number in |
992 | * round-robin allocation */ |
993 | + struct mutex devnum_next_mutex; /* devnum_next mutex */ |
994 | |
995 | struct usb_devmap devmap; /* device address allocation map */ |
996 | struct usb_device *root_hub; /* Root hub */ |
997 | struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ |
998 | struct list_head bus_list; /* list of busses */ |
999 | |
1000 | - struct mutex usb_address0_mutex; /* unaddressed device mutex */ |
1001 | - |
1002 | int bandwidth_allocated; /* on this bus: how much of the time |
1003 | * reserved for periodic (intr/iso) |
1004 | * requests is used, on average? |
1005 | diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h |
1006 | index f89c24bd53a4..3993b21f3d11 100644 |
1007 | --- a/include/linux/usb/hcd.h |
1008 | +++ b/include/linux/usb/hcd.h |
1009 | @@ -180,6 +180,7 @@ struct usb_hcd { |
1010 | * bandwidth_mutex should be dropped after a successful control message |
1011 | * to the device, or resetting the bandwidth after a failed attempt. |
1012 | */ |
1013 | + struct mutex *address0_mutex; |
1014 | struct mutex *bandwidth_mutex; |
1015 | struct usb_hcd *shared_hcd; |
1016 | struct usb_hcd *primary_hcd; |
1017 | diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h |
1018 | index 4d1c46aac331..c7b1dc713cdd 100644 |
1019 | --- a/include/scsi/libiscsi.h |
1020 | +++ b/include/scsi/libiscsi.h |
1021 | @@ -196,6 +196,7 @@ struct iscsi_conn { |
1022 | struct iscsi_task *task; /* xmit task in progress */ |
1023 | |
1024 | /* xmit */ |
1025 | + spinlock_t taskqueuelock; /* protects the next three lists */ |
1026 | struct list_head mgmtqueue; /* mgmt (control) xmit queue */ |
1027 | struct list_head cmdqueue; /* data-path cmd queue */ |
1028 | struct list_head requeue; /* tasks needing another run */ |
1029 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
1030 | index 9bbe9ac23cf2..e4b5494f05f8 100644 |
1031 | --- a/kernel/events/core.c |
1032 | +++ b/kernel/events/core.c |
1033 | @@ -9230,7 +9230,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
1034 | ret = inherit_task_group(event, parent, parent_ctx, |
1035 | child, ctxn, &inherited_all); |
1036 | if (ret) |
1037 | - break; |
1038 | + goto out_unlock; |
1039 | } |
1040 | |
1041 | /* |
1042 | @@ -9246,7 +9246,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
1043 | ret = inherit_task_group(event, parent, parent_ctx, |
1044 | child, ctxn, &inherited_all); |
1045 | if (ret) |
1046 | - break; |
1047 | + goto out_unlock; |
1048 | } |
1049 | |
1050 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
1051 | @@ -9274,6 +9274,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
1052 | } |
1053 | |
1054 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
1055 | +out_unlock: |
1056 | mutex_unlock(&parent_ctx->mutex); |
1057 | |
1058 | perf_unpin_context(parent_ctx); |
1059 | diff --git a/kernel/fork.c b/kernel/fork.c |
1060 | index 2e55b53399de..278a2ddad351 100644 |
1061 | --- a/kernel/fork.c |
1062 | +++ b/kernel/fork.c |
1063 | @@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk) |
1064 | *stackend = STACK_END_MAGIC; /* for overflow detection */ |
1065 | } |
1066 | |
1067 | -static struct task_struct *dup_task_struct(struct task_struct *orig) |
1068 | +static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
1069 | { |
1070 | struct task_struct *tsk; |
1071 | struct thread_info *ti; |
1072 | - int node = tsk_fork_get_node(orig); |
1073 | int err; |
1074 | |
1075 | + if (node == NUMA_NO_NODE) |
1076 | + node = tsk_fork_get_node(orig); |
1077 | tsk = alloc_task_struct_node(node); |
1078 | if (!tsk) |
1079 | return NULL; |
1080 | @@ -1270,7 +1271,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1081 | int __user *child_tidptr, |
1082 | struct pid *pid, |
1083 | int trace, |
1084 | - unsigned long tls) |
1085 | + unsigned long tls, |
1086 | + int node) |
1087 | { |
1088 | int retval; |
1089 | struct task_struct *p; |
1090 | @@ -1323,7 +1325,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
1091 | goto fork_out; |
1092 | |
1093 | retval = -ENOMEM; |
1094 | - p = dup_task_struct(current); |
1095 | + p = dup_task_struct(current, node); |
1096 | if (!p) |
1097 | goto fork_out; |
1098 | |
1099 | @@ -1699,7 +1701,8 @@ static inline void init_idle_pids(struct pid_link *links) |
1100 | struct task_struct *fork_idle(int cpu) |
1101 | { |
1102 | struct task_struct *task; |
1103 | - task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0); |
1104 | + task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, |
1105 | + cpu_to_node(cpu)); |
1106 | if (!IS_ERR(task)) { |
1107 | init_idle_pids(task->pids); |
1108 | init_idle(task, cpu); |
1109 | @@ -1744,7 +1747,7 @@ long _do_fork(unsigned long clone_flags, |
1110 | } |
1111 | |
1112 | p = copy_process(clone_flags, stack_start, stack_size, |
1113 | - child_tidptr, NULL, trace, tls); |
1114 | + child_tidptr, NULL, trace, tls, NUMA_NO_NODE); |
1115 | /* |
1116 | * Do this prior waking up the new thread - the thread pointer |
1117 | * might get invalid after that point, if the thread exits quickly. |
1118 | diff --git a/mm/percpu.c b/mm/percpu.c |
1119 | index 1f376bce413c..ef6353f0adbd 100644 |
1120 | --- a/mm/percpu.c |
1121 | +++ b/mm/percpu.c |
1122 | @@ -1012,8 +1012,11 @@ area_found: |
1123 | mutex_unlock(&pcpu_alloc_mutex); |
1124 | } |
1125 | |
1126 | - if (chunk != pcpu_reserved_chunk) |
1127 | + if (chunk != pcpu_reserved_chunk) { |
1128 | + spin_lock_irqsave(&pcpu_lock, flags); |
1129 | pcpu_nr_empty_pop_pages -= occ_pages; |
1130 | + spin_unlock_irqrestore(&pcpu_lock, flags); |
1131 | + } |
1132 | |
1133 | if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
1134 | pcpu_schedule_balance_work(); |
1135 | diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h |
1136 | index 41446668ccce..d5677d39c1e4 100644 |
1137 | --- a/tools/include/linux/log2.h |
1138 | +++ b/tools/include/linux/log2.h |
1139 | @@ -13,12 +13,6 @@ |
1140 | #define _TOOLS_LINUX_LOG2_H |
1141 | |
1142 | /* |
1143 | - * deal with unrepresentable constant logarithms |
1144 | - */ |
1145 | -extern __attribute__((const, noreturn)) |
1146 | -int ____ilog2_NaN(void); |
1147 | - |
1148 | -/* |
1149 | * non-constant log of base 2 calculators |
1150 | * - the arch may override these in asm/bitops.h if they can be implemented |
1151 | * more efficiently than using fls() and fls64() |
1152 | @@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
1153 | #define ilog2(n) \ |
1154 | ( \ |
1155 | __builtin_constant_p(n) ? ( \ |
1156 | - (n) < 1 ? ____ilog2_NaN() : \ |
1157 | + (n) < 2 ? 0 : \ |
1158 | (n) & (1ULL << 63) ? 63 : \ |
1159 | (n) & (1ULL << 62) ? 62 : \ |
1160 | (n) & (1ULL << 61) ? 61 : \ |
1161 | @@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
1162 | (n) & (1ULL << 4) ? 4 : \ |
1163 | (n) & (1ULL << 3) ? 3 : \ |
1164 | (n) & (1ULL << 2) ? 2 : \ |
1165 | - (n) & (1ULL << 1) ? 1 : \ |
1166 | - (n) & (1ULL << 0) ? 0 : \ |
1167 | - ____ilog2_NaN() \ |
1168 | - ) : \ |
1169 | + 1 ) : \ |
1170 | (sizeof(n) <= 4) ? \ |
1171 | __ilog2_u32(n) : \ |
1172 | __ilog2_u64(n) \ |