Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0117-4.9.18-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 29223 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 004f90a4e613..c10d0e634e68 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 17
9 +SUBLEVEL = 18
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
14 index 7bd69bd43a01..1d8c24dc04d4 100644
15 --- a/arch/parisc/include/asm/cacheflush.h
16 +++ b/arch/parisc/include/asm/cacheflush.h
17 @@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
18
19 #define flush_kernel_dcache_range(start,size) \
20 flush_kernel_dcache_range_asm((start), (start)+(size));
21 -/* vmap range flushes and invalidates. Architecturally, we don't need
22 - * the invalidate, because the CPU should refuse to speculate once an
23 - * area has been flushed, so invalidate is left empty */
24 -static inline void flush_kernel_vmap_range(void *vaddr, int size)
25 -{
26 - unsigned long start = (unsigned long)vaddr;
27 -
28 - flush_kernel_dcache_range_asm(start, start + size);
29 -}
30 -static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
31 -{
32 - unsigned long start = (unsigned long)vaddr;
33 - void *cursor = vaddr;
34
35 - for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
36 - struct page *page = vmalloc_to_page(cursor);
37 -
38 - if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
39 - flush_kernel_dcache_page(page);
40 - }
41 - flush_kernel_dcache_range_asm(start, start + size);
42 -}
43 +void flush_kernel_vmap_range(void *vaddr, int size);
44 +void invalidate_kernel_vmap_range(void *vaddr, int size);
45
46 #define flush_cache_vmap(start, end) flush_cache_all()
47 #define flush_cache_vunmap(start, end) flush_cache_all()
48 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
49 index 977f0a4f5ecf..53ec75f8e237 100644
50 --- a/arch/parisc/kernel/cache.c
51 +++ b/arch/parisc/kernel/cache.c
52 @@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
53 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
54 }
55 }
56 +
57 +void flush_kernel_vmap_range(void *vaddr, int size)
58 +{
59 + unsigned long start = (unsigned long)vaddr;
60 +
61 + if ((unsigned long)size > parisc_cache_flush_threshold)
62 + flush_data_cache();
63 + else
64 + flush_kernel_dcache_range_asm(start, start + size);
65 +}
66 +EXPORT_SYMBOL(flush_kernel_vmap_range);
67 +
68 +void invalidate_kernel_vmap_range(void *vaddr, int size)
69 +{
70 + unsigned long start = (unsigned long)vaddr;
71 +
72 + if ((unsigned long)size > parisc_cache_flush_threshold)
73 + flush_data_cache();
74 + else
75 + flush_kernel_dcache_range_asm(start, start + size);
76 +}
77 +EXPORT_SYMBOL(invalidate_kernel_vmap_range);
78 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
79 index 40639439d8b3..e81afc378850 100644
80 --- a/arch/parisc/kernel/process.c
81 +++ b/arch/parisc/kernel/process.c
82 @@ -139,6 +139,8 @@ void machine_power_off(void)
83
84 printk(KERN_EMERG "System shut down completed.\n"
85 "Please power this system off now.");
86 +
87 + for (;;);
88 }
89
90 void (*pm_power_off)(void) = machine_power_off;
91 diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
92 index 861e72109df2..f080abfc2f83 100644
93 --- a/arch/powerpc/boot/zImage.lds.S
94 +++ b/arch/powerpc/boot/zImage.lds.S
95 @@ -68,6 +68,7 @@ SECTIONS
96 }
97
98 #ifdef CONFIG_PPC64_BOOT_WRAPPER
99 + . = ALIGN(256);
100 .got :
101 {
102 __toc_start = .;
103 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
104 index 6e6c1fb60fbc..272608f102fb 100644
105 --- a/drivers/cpufreq/cpufreq.c
106 +++ b/drivers/cpufreq/cpufreq.c
107 @@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
108 char *buf)
109 {
110 unsigned int cur_freq = __cpufreq_get(policy);
111 - if (!cur_freq)
112 - return sprintf(buf, "<unknown>");
113 - return sprintf(buf, "%u\n", cur_freq);
114 +
115 + if (cur_freq)
116 + return sprintf(buf, "%u\n", cur_freq);
117 +
118 + return sprintf(buf, "<unknown>\n");
119 }
120
121 /**
122 diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
123 index b447a01ab21a..09e6a7320bb2 100644
124 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
125 +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
126 @@ -3506,6 +3506,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
127 max_sclk = 75000;
128 max_mclk = 80000;
129 }
130 + } else if (adev->asic_type == CHIP_OLAND) {
131 + if ((adev->pdev->device == 0x6604) &&
132 + (adev->pdev->subsystem_vendor == 0x1028) &&
133 + (adev->pdev->subsystem_device == 0x066F)) {
134 + max_sclk = 75000;
135 + }
136 }
137 /* Apply dpm quirks */
138 while (p && p->chip_device != 0) {
139 diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
140 index 8703f56b7947..246d1aea87bc 100644
141 --- a/drivers/gpu/drm/vc4/vc4_drv.c
142 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
143 @@ -61,21 +61,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
144 if (ret < 0)
145 return ret;
146 args->value = V3D_READ(V3D_IDENT0);
147 - pm_runtime_put(&vc4->v3d->pdev->dev);
148 + pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
149 + pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
150 break;
151 case DRM_VC4_PARAM_V3D_IDENT1:
152 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
153 if (ret < 0)
154 return ret;
155 args->value = V3D_READ(V3D_IDENT1);
156 - pm_runtime_put(&vc4->v3d->pdev->dev);
157 + pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
158 + pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
159 break;
160 case DRM_VC4_PARAM_V3D_IDENT2:
161 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
162 if (ret < 0)
163 return ret;
164 args->value = V3D_READ(V3D_IDENT2);
165 - pm_runtime_put(&vc4->v3d->pdev->dev);
166 + pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
167 + pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
168 break;
169 case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
170 args->value = true;
171 diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
172 index 18e37171e9c8..ab3016982466 100644
173 --- a/drivers/gpu/drm/vc4/vc4_gem.c
174 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
175 @@ -711,8 +711,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
176 }
177
178 mutex_lock(&vc4->power_lock);
179 - if (--vc4->power_refcount == 0)
180 - pm_runtime_put(&vc4->v3d->pdev->dev);
181 + if (--vc4->power_refcount == 0) {
182 + pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
183 + pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
184 + }
185 mutex_unlock(&vc4->power_lock);
186
187 kfree(exec);
188 diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
189 index e6d3c6028341..7cc346ad9b0b 100644
190 --- a/drivers/gpu/drm/vc4/vc4_v3d.c
191 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
192 @@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
193 return ret;
194 }
195
196 + pm_runtime_use_autosuspend(dev);
197 + pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
198 pm_runtime_enable(dev);
199
200 return 0;
201 diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
202 index 2543cf5b8b51..917321ce832f 100644
203 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
204 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
205 @@ -608,9 +608,7 @@ static bool
206 vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
207 {
208 uint32_t max_branch_target = 0;
209 - bool found_shader_end = false;
210 int ip;
211 - int shader_end_ip = 0;
212 int last_branch = -2;
213
214 for (ip = 0; ip < validation_state->max_ip; ip++) {
215 @@ -621,8 +619,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
216 uint32_t branch_target_ip;
217
218 if (sig == QPU_SIG_PROG_END) {
219 - shader_end_ip = ip;
220 - found_shader_end = true;
221 + /* There are two delay slots after program end is
222 + * signaled that are still executed, then we're
223 + * finished. validation_state->max_ip is the
224 + * instruction after the last valid instruction in the
225 + * program.
226 + */
227 + validation_state->max_ip = ip + 3;
228 continue;
229 }
230
231 @@ -676,15 +679,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
232 }
233 set_bit(after_delay_ip, validation_state->branch_targets);
234 max_branch_target = max(max_branch_target, after_delay_ip);
235 -
236 - /* There are two delay slots after program end is signaled
237 - * that are still executed, then we're finished.
238 - */
239 - if (found_shader_end && ip == shader_end_ip + 2)
240 - break;
241 }
242
243 - if (max_branch_target > shader_end_ip) {
244 + if (max_branch_target > validation_state->max_ip - 3) {
245 DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
246 return false;
247 }
248 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
249 index aecec6d32463..7f1c625b08ec 100644
250 --- a/drivers/isdn/gigaset/bas-gigaset.c
251 +++ b/drivers/isdn/gigaset/bas-gigaset.c
252 @@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
253 return -ENODEV;
254 }
255
256 + if (hostif->desc.bNumEndpoints < 1)
257 + return -ENODEV;
258 +
259 dev_info(&udev->dev,
260 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
261 __func__, le16_to_cpu(udev->descriptor.idVendor),
262 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
263 index 39fddda2fef2..55b5e0e77b17 100644
264 --- a/drivers/md/raid10.c
265 +++ b/drivers/md/raid10.c
266 @@ -1470,7 +1470,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
267 split = bio;
268 }
269
270 + /*
271 + * If a bio is splitted, the first part of bio will pass
272 + * barrier but the bio is queued in current->bio_list (see
273 + * generic_make_request). If there is a raise_barrier() called
274 + * here, the second part of bio can't pass barrier. But since
275 + * the first part bio isn't dispatched to underlaying disks
276 + * yet, the barrier is never released, hence raise_barrier will
277 + * alays wait. We have a deadlock.
278 + * Note, this only happens in read path. For write path, the
279 + * first part of bio is dispatched in a schedule() call
280 + * (because of blk plug) or offloaded to raid10d.
281 + * Quitting from the function immediately can change the bio
282 + * order queued in bio_list and avoid the deadlock.
283 + */
284 __make_request(mddev, split);
285 + if (split != bio && bio_data_dir(bio) == READ) {
286 + generic_make_request(bio);
287 + break;
288 + }
289 } while (split != bio);
290
291 /* In case raid10d snuck in to freeze_array */
292 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
293 index f9b6fba689ff..a530f08592cd 100644
294 --- a/drivers/scsi/libiscsi.c
295 +++ b/drivers/scsi/libiscsi.c
296 @@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
297 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
298 task->state = state;
299
300 - if (!list_empty(&task->running))
301 + spin_lock_bh(&conn->taskqueuelock);
302 + if (!list_empty(&task->running)) {
303 + pr_debug_once("%s while task on list", __func__);
304 list_del_init(&task->running);
305 + }
306 + spin_unlock_bh(&conn->taskqueuelock);
307
308 if (conn->task == task)
309 conn->task = NULL;
310 @@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
311 if (session->tt->xmit_task(task))
312 goto free_task;
313 } else {
314 + spin_lock_bh(&conn->taskqueuelock);
315 list_add_tail(&task->running, &conn->mgmtqueue);
316 + spin_unlock_bh(&conn->taskqueuelock);
317 iscsi_conn_queue_work(conn);
318 }
319
320 @@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
321 * this may be on the requeue list already if the xmit_task callout
322 * is handling the r2ts while we are adding new ones
323 */
324 + spin_lock_bh(&conn->taskqueuelock);
325 if (list_empty(&task->running))
326 list_add_tail(&task->running, &conn->requeue);
327 + spin_unlock_bh(&conn->taskqueuelock);
328 iscsi_conn_queue_work(conn);
329 }
330 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
331 @@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
332 * only have one nop-out as a ping from us and targets should not
333 * overflow us with nop-ins
334 */
335 + spin_lock_bh(&conn->taskqueuelock);
336 check_mgmt:
337 while (!list_empty(&conn->mgmtqueue)) {
338 conn->task = list_entry(conn->mgmtqueue.next,
339 struct iscsi_task, running);
340 list_del_init(&conn->task->running);
341 + spin_unlock_bh(&conn->taskqueuelock);
342 if (iscsi_prep_mgmt_task(conn, conn->task)) {
343 /* regular RX path uses back_lock */
344 spin_lock_bh(&conn->session->back_lock);
345 __iscsi_put_task(conn->task);
346 spin_unlock_bh(&conn->session->back_lock);
347 conn->task = NULL;
348 + spin_lock_bh(&conn->taskqueuelock);
349 continue;
350 }
351 rc = iscsi_xmit_task(conn);
352 if (rc)
353 goto done;
354 + spin_lock_bh(&conn->taskqueuelock);
355 }
356
357 /* process pending command queue */
358 @@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
359 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
360 running);
361 list_del_init(&conn->task->running);
362 + spin_unlock_bh(&conn->taskqueuelock);
363 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
364 fail_scsi_task(conn->task, DID_IMM_RETRY);
365 + spin_lock_bh(&conn->taskqueuelock);
366 continue;
367 }
368 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
369 if (rc) {
370 if (rc == -ENOMEM || rc == -EACCES) {
371 + spin_lock_bh(&conn->taskqueuelock);
372 list_add_tail(&conn->task->running,
373 &conn->cmdqueue);
374 conn->task = NULL;
375 + spin_unlock_bh(&conn->taskqueuelock);
376 goto done;
377 } else
378 fail_scsi_task(conn->task, DID_ABORT);
379 + spin_lock_bh(&conn->taskqueuelock);
380 continue;
381 }
382 rc = iscsi_xmit_task(conn);
383 @@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
384 * we need to check the mgmt queue for nops that need to
385 * be sent to aviod starvation
386 */
387 + spin_lock_bh(&conn->taskqueuelock);
388 if (!list_empty(&conn->mgmtqueue))
389 goto check_mgmt;
390 }
391 @@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
392 conn->task = task;
393 list_del_init(&conn->task->running);
394 conn->task->state = ISCSI_TASK_RUNNING;
395 + spin_unlock_bh(&conn->taskqueuelock);
396 rc = iscsi_xmit_task(conn);
397 if (rc)
398 goto done;
399 + spin_lock_bh(&conn->taskqueuelock);
400 if (!list_empty(&conn->mgmtqueue))
401 goto check_mgmt;
402 }
403 + spin_unlock_bh(&conn->taskqueuelock);
404 spin_unlock_bh(&conn->session->frwd_lock);
405 return -ENODATA;
406
407 @@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
408 goto prepd_reject;
409 }
410 } else {
411 + spin_lock_bh(&conn->taskqueuelock);
412 list_add_tail(&task->running, &conn->cmdqueue);
413 + spin_unlock_bh(&conn->taskqueuelock);
414 iscsi_conn_queue_work(conn);
415 }
416
417 @@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
418 INIT_LIST_HEAD(&conn->mgmtqueue);
419 INIT_LIST_HEAD(&conn->cmdqueue);
420 INIT_LIST_HEAD(&conn->requeue);
421 + spin_lock_init(&conn->taskqueuelock);
422 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
423
424 /* allocate login_task used for the login/text sequences */
425 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
426 index 734a0428ef0e..f7e3f27bb5c5 100644
427 --- a/drivers/scsi/lpfc/lpfc_init.c
428 +++ b/drivers/scsi/lpfc/lpfc_init.c
429 @@ -11393,6 +11393,7 @@ static struct pci_driver lpfc_driver = {
430 .id_table = lpfc_id_table,
431 .probe = lpfc_pci_probe_one,
432 .remove = lpfc_pci_remove_one,
433 + .shutdown = lpfc_pci_remove_one,
434 .suspend = lpfc_pci_suspend_one,
435 .resume = lpfc_pci_resume_one,
436 .err_handler = &lpfc_err_handler,
437 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
438 index bff9689f5ca9..feab7ea8e823 100644
439 --- a/drivers/scsi/qla2xxx/qla_target.c
440 +++ b/drivers/scsi/qla2xxx/qla_target.c
441 @@ -5375,16 +5375,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
442
443 static int
444 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
445 - struct atio_from_isp *atio)
446 + struct atio_from_isp *atio, bool ha_locked)
447 {
448 struct qla_hw_data *ha = vha->hw;
449 uint16_t status;
450 + unsigned long flags;
451
452 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
453 return 0;
454
455 + if (!ha_locked)
456 + spin_lock_irqsave(&ha->hardware_lock, flags);
457 status = temp_sam_status;
458 qlt_send_busy(vha, atio, status);
459 + if (!ha_locked)
460 + spin_unlock_irqrestore(&ha->hardware_lock, flags);
461 +
462 return 1;
463 }
464
465 @@ -5429,7 +5435,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
466
467
468 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
469 - rc = qlt_chk_qfull_thresh_hold(vha, atio);
470 + rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
471 if (rc != 0) {
472 tgt->atio_irq_cmd_count--;
473 return;
474 @@ -5552,7 +5558,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
475 break;
476 }
477
478 - rc = qlt_chk_qfull_thresh_hold(vha, atio);
479 + rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
480 if (rc != 0) {
481 tgt->irq_cmd_count--;
482 return;
483 @@ -6794,6 +6800,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
484 spin_lock_irqsave(&ha->hardware_lock, flags);
485 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
486 spin_unlock_irqrestore(&ha->hardware_lock, flags);
487 +
488 + kfree(op);
489 }
490
491 void
492 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
493 index 9125d9358dea..ef1c8c158f66 100644
494 --- a/drivers/target/target_core_pscsi.c
495 +++ b/drivers/target/target_core_pscsi.c
496 @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
497
498 buf = kzalloc(12, GFP_KERNEL);
499 if (!buf)
500 - return;
501 + goto out_free;
502
503 memset(cdb, 0, MAX_COMMAND_SIZE);
504 cdb[0] = MODE_SENSE;
505 @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
506 * If MODE_SENSE still returns zero, set the default value to 1024.
507 */
508 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
509 +out_free:
510 if (!sdev->sector_size)
511 sdev->sector_size = 1024;
512 -out_free:
513 +
514 kfree(buf);
515 }
516
517 @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
518 sd->lun, sd->queue_depth);
519 }
520
521 - dev->dev_attrib.hw_block_size = sd->sector_size;
522 + dev->dev_attrib.hw_block_size =
523 + min_not_zero((int)sd->sector_size, 512);
524 dev->dev_attrib.hw_max_sectors =
525 - min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
526 + min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
527 dev->dev_attrib.hw_queue_depth = sd->queue_depth;
528
529 /*
530 @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
531 /*
532 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
533 */
534 - if (sd->type == TYPE_TAPE)
535 + if (sd->type == TYPE_TAPE) {
536 pscsi_tape_read_blocksize(dev, sd);
537 + dev->dev_attrib.hw_block_size = sd->sector_size;
538 + }
539 return 0;
540 }
541
542 @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
543 /*
544 * Called with struct Scsi_Host->host_lock called.
545 */
546 -static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
547 +static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
548 __releases(sh->host_lock)
549 {
550 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
551 @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
552 return 0;
553 }
554
555 -/*
556 - * Called with struct Scsi_Host->host_lock called.
557 - */
558 -static int pscsi_create_type_other(struct se_device *dev,
559 - struct scsi_device *sd)
560 - __releases(sh->host_lock)
561 -{
562 - struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
563 - struct Scsi_Host *sh = sd->host;
564 - int ret;
565 -
566 - spin_unlock_irq(sh->host_lock);
567 - ret = pscsi_add_device_to_list(dev, sd);
568 - if (ret)
569 - return ret;
570 -
571 - pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
572 - phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
573 - sd->channel, sd->id, sd->lun);
574 - return 0;
575 -}
576 -
577 static int pscsi_configure_device(struct se_device *dev)
578 {
579 struct se_hba *hba = dev->se_hba;
580 @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
581 case TYPE_DISK:
582 ret = pscsi_create_type_disk(dev, sd);
583 break;
584 - case TYPE_ROM:
585 - ret = pscsi_create_type_rom(dev, sd);
586 - break;
587 default:
588 - ret = pscsi_create_type_other(dev, sd);
589 + ret = pscsi_create_type_nondisk(dev, sd);
590 break;
591 }
592
593 @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
594 else if (pdv->pdv_lld_host)
595 scsi_host_put(pdv->pdv_lld_host);
596
597 - if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
598 - scsi_device_put(sd);
599 + scsi_device_put(sd);
600
601 pdv->pdv_sd = NULL;
602 }
603 @@ -1069,7 +1047,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
604 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
605 return pdv->pdv_bd->bd_part->nr_sects;
606
607 - dump_stack();
608 return 0;
609 }
610
611 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
612 index aabd6602da6c..a53fb23a0411 100644
613 --- a/drivers/target/target_core_sbc.c
614 +++ b/drivers/target/target_core_sbc.c
615 @@ -1104,9 +1104,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
616 return ret;
617 break;
618 case VERIFY:
619 + case VERIFY_16:
620 size = 0;
621 - sectors = transport_get_sectors_10(cdb);
622 - cmd->t_task_lba = transport_lba_32(cdb);
623 + if (cdb[0] == VERIFY) {
624 + sectors = transport_get_sectors_10(cdb);
625 + cmd->t_task_lba = transport_lba_32(cdb);
626 + } else {
627 + sectors = transport_get_sectors_16(cdb);
628 + cmd->t_task_lba = transport_lba_64(cdb);
629 + }
630 cmd->execute_cmd = sbc_emulate_noop;
631 goto check_lba;
632 case REZERO_UNIT:
633 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
634 index afe29ba42a4e..5fa9ba1de429 100644
635 --- a/fs/ext4/super.c
636 +++ b/fs/ext4/super.c
637 @@ -3830,7 +3830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
638 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
639 EXT4_DESC_PER_BLOCK(sb);
640 if (ext4_has_feature_meta_bg(sb)) {
641 - if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
642 + if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
643 ext4_msg(sb, KERN_WARNING,
644 "first meta block group too large: %u "
645 "(group descriptor block count %u)",
646 diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
647 index a6a3389a07fc..51519c2836b5 100644
648 --- a/fs/gfs2/incore.h
649 +++ b/fs/gfs2/incore.h
650 @@ -207,7 +207,7 @@ struct lm_lockname {
651 struct gfs2_sbd *ln_sbd;
652 u64 ln_number;
653 unsigned int ln_type;
654 -};
655 +} __packed __aligned(sizeof(int));
656
657 #define lm_name_equal(name1, name2) \
658 (((name1)->ln_number == (name2)->ln_number) && \
659 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
660 index 609840de31d3..1536aeb0abab 100644
661 --- a/fs/nfs/nfs4proc.c
662 +++ b/fs/nfs/nfs4proc.c
663 @@ -7426,11 +7426,11 @@ static void nfs4_exchange_id_release(void *data)
664 struct nfs41_exchange_id_data *cdata =
665 (struct nfs41_exchange_id_data *)data;
666
667 - nfs_put_client(cdata->args.client);
668 if (cdata->xprt) {
669 xprt_put(cdata->xprt);
670 rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
671 }
672 + nfs_put_client(cdata->args.client);
673 kfree(cdata->res.impl_id);
674 kfree(cdata->res.server_scope);
675 kfree(cdata->res.server_owner);
676 @@ -7537,10 +7537,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
677 task_setup_data.callback_data = calldata;
678
679 task = rpc_run_task(&task_setup_data);
680 - if (IS_ERR(task)) {
681 - status = PTR_ERR(task);
682 - goto out_impl_id;
683 - }
684 + if (IS_ERR(task))
685 + return PTR_ERR(task);
686
687 if (!xprt) {
688 status = rpc_wait_for_completion_task(task);
689 @@ -7568,6 +7566,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
690 kfree(calldata->res.server_owner);
691 out_calldata:
692 kfree(calldata);
693 + nfs_put_client(clp);
694 goto out;
695 }
696
697 diff --git a/include/linux/log2.h b/include/linux/log2.h
698 index fd7ff3d91e6a..f38fae23bdac 100644
699 --- a/include/linux/log2.h
700 +++ b/include/linux/log2.h
701 @@ -16,12 +16,6 @@
702 #include <linux/bitops.h>
703
704 /*
705 - * deal with unrepresentable constant logarithms
706 - */
707 -extern __attribute__((const, noreturn))
708 -int ____ilog2_NaN(void);
709 -
710 -/*
711 * non-constant log of base 2 calculators
712 * - the arch may override these in asm/bitops.h if they can be implemented
713 * more efficiently than using fls() and fls64()
714 @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
715 #define ilog2(n) \
716 ( \
717 __builtin_constant_p(n) ? ( \
718 - (n) < 1 ? ____ilog2_NaN() : \
719 + (n) < 2 ? 0 : \
720 (n) & (1ULL << 63) ? 63 : \
721 (n) & (1ULL << 62) ? 62 : \
722 (n) & (1ULL << 61) ? 61 : \
723 @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
724 (n) & (1ULL << 4) ? 4 : \
725 (n) & (1ULL << 3) ? 3 : \
726 (n) & (1ULL << 2) ? 2 : \
727 - (n) & (1ULL << 1) ? 1 : \
728 - (n) & (1ULL << 0) ? 0 : \
729 - ____ilog2_NaN() \
730 - ) : \
731 + 1 ) : \
732 (sizeof(n) <= 4) ? \
733 __ilog2_u32(n) : \
734 __ilog2_u64(n) \
735 diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
736 index 4d1c46aac331..c7b1dc713cdd 100644
737 --- a/include/scsi/libiscsi.h
738 +++ b/include/scsi/libiscsi.h
739 @@ -196,6 +196,7 @@ struct iscsi_conn {
740 struct iscsi_task *task; /* xmit task in progress */
741
742 /* xmit */
743 + spinlock_t taskqueuelock; /* protects the next three lists */
744 struct list_head mgmtqueue; /* mgmt (control) xmit queue */
745 struct list_head cmdqueue; /* data-path cmd queue */
746 struct list_head requeue; /* tasks needing another run */
747 diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
748 index 2bd673783f1a..a57242e0d5a6 100644
749 --- a/kernel/cgroup_pids.c
750 +++ b/kernel/cgroup_pids.c
751 @@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
752 /* Only log the first time events_limit is incremented. */
753 if (atomic64_inc_return(&pids->events_limit) == 1) {
754 pr_info("cgroup: fork rejected by pids controller in ");
755 - pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
756 + pr_cont_cgroup_path(css->cgroup);
757 pr_cont("\n");
758 }
759 cgroup_file_notify(&pids->events_file);
760 diff --git a/kernel/events/core.c b/kernel/events/core.c
761 index 4b3323151a2f..07c0dc806dfc 100644
762 --- a/kernel/events/core.c
763 +++ b/kernel/events/core.c
764 @@ -10333,6 +10333,17 @@ void perf_event_free_task(struct task_struct *task)
765 continue;
766
767 mutex_lock(&ctx->mutex);
768 + raw_spin_lock_irq(&ctx->lock);
769 + /*
770 + * Destroy the task <-> ctx relation and mark the context dead.
771 + *
772 + * This is important because even though the task hasn't been
773 + * exposed yet the context has been (through child_list).
774 + */
775 + RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
776 + WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
777 + put_task_struct(task); /* cannot be last */
778 + raw_spin_unlock_irq(&ctx->lock);
779 again:
780 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
781 group_entry)
782 @@ -10586,7 +10597,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
783 ret = inherit_task_group(event, parent, parent_ctx,
784 child, ctxn, &inherited_all);
785 if (ret)
786 - break;
787 + goto out_unlock;
788 }
789
790 /*
791 @@ -10602,7 +10613,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
792 ret = inherit_task_group(event, parent, parent_ctx,
793 child, ctxn, &inherited_all);
794 if (ret)
795 - break;
796 + goto out_unlock;
797 }
798
799 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
800 @@ -10630,6 +10641,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
801 }
802
803 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
804 +out_unlock:
805 mutex_unlock(&parent_ctx->mutex);
806
807 perf_unpin_context(parent_ctx);
808 diff --git a/mm/percpu.c b/mm/percpu.c
809 index 255714302394..f014cebbf405 100644
810 --- a/mm/percpu.c
811 +++ b/mm/percpu.c
812 @@ -1010,8 +1010,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
813 mutex_unlock(&pcpu_alloc_mutex);
814 }
815
816 - if (chunk != pcpu_reserved_chunk)
817 + if (chunk != pcpu_reserved_chunk) {
818 + spin_lock_irqsave(&pcpu_lock, flags);
819 pcpu_nr_empty_pop_pages -= occ_pages;
820 + spin_unlock_irqrestore(&pcpu_lock, flags);
821 + }
822
823 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
824 pcpu_schedule_balance_work();
825 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
826 index e2c37061edbe..69502fa68a3c 100644
827 --- a/net/sunrpc/xprtrdma/verbs.c
828 +++ b/net/sunrpc/xprtrdma/verbs.c
829 @@ -486,7 +486,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
830 struct ib_cq *sendcq, *recvcq;
831 int rc;
832
833 - max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
834 + max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
835 + RPCRDMA_MAX_SEND_SGES);
836 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
837 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
838 return -ENOMEM;
839 diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
840 index 41446668ccce..d5677d39c1e4 100644
841 --- a/tools/include/linux/log2.h
842 +++ b/tools/include/linux/log2.h
843 @@ -13,12 +13,6 @@
844 #define _TOOLS_LINUX_LOG2_H
845
846 /*
847 - * deal with unrepresentable constant logarithms
848 - */
849 -extern __attribute__((const, noreturn))
850 -int ____ilog2_NaN(void);
851 -
852 -/*
853 * non-constant log of base 2 calculators
854 * - the arch may override these in asm/bitops.h if they can be implemented
855 * more efficiently than using fls() and fls64()
856 @@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
857 #define ilog2(n) \
858 ( \
859 __builtin_constant_p(n) ? ( \
860 - (n) < 1 ? ____ilog2_NaN() : \
861 + (n) < 2 ? 0 : \
862 (n) & (1ULL << 63) ? 63 : \
863 (n) & (1ULL << 62) ? 62 : \
864 (n) & (1ULL << 61) ? 61 : \
865 @@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
866 (n) & (1ULL << 4) ? 4 : \
867 (n) & (1ULL << 3) ? 3 : \
868 (n) & (1ULL << 2) ? 2 : \
869 - (n) & (1ULL << 1) ? 1 : \
870 - (n) & (1ULL << 0) ? 0 : \
871 - ____ilog2_NaN() \
872 - ) : \
873 + 1 ) : \
874 (sizeof(n) <= 4) ? \
875 __ilog2_u32(n) : \
876 __ilog2_u64(n) \