Annotation of /trunk/kernel-alx/patches-4.14/0161-4.14.62-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(hide annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 26877 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 26877 byte(s)
-added up to patches-4.14.79
1 | niro | 3238 | diff --git a/Makefile b/Makefile |
2 | index 4bd65eabd298..d407ecfdee0b 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,7 +1,7 @@ | ||
6 | # SPDX-License-Identifier: GPL-2.0 | ||
7 | VERSION = 4 | ||
8 | PATCHLEVEL = 14 | ||
9 | -SUBLEVEL = 61 | ||
10 | +SUBLEVEL = 62 | ||
11 | EXTRAVERSION = | ||
12 | NAME = Petit Gorille | ||
13 | |||
14 | diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c | ||
15 | index f96830ffd9f1..75c6b98585ba 100644 | ||
16 | --- a/drivers/i2c/busses/i2c-imx.c | ||
17 | +++ b/drivers/i2c/busses/i2c-imx.c | ||
18 | @@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, | ||
19 | goto err_desc; | ||
20 | } | ||
21 | |||
22 | + reinit_completion(&dma->cmd_complete); | ||
23 | txdesc->callback = i2c_imx_dma_callback; | ||
24 | txdesc->callback_param = i2c_imx; | ||
25 | if (dma_submit_error(dmaengine_submit(txdesc))) { | ||
26 | @@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, | ||
27 | * The first byte must be transmitted by the CPU. | ||
28 | */ | ||
29 | imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); | ||
30 | - reinit_completion(&i2c_imx->dma->cmd_complete); | ||
31 | time_left = wait_for_completion_timeout( | ||
32 | &i2c_imx->dma->cmd_complete, | ||
33 | msecs_to_jiffies(DMA_TIMEOUT)); | ||
34 | @@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, | ||
35 | if (result) | ||
36 | return result; | ||
37 | |||
38 | - reinit_completion(&i2c_imx->dma->cmd_complete); | ||
39 | time_left = wait_for_completion_timeout( | ||
40 | &i2c_imx->dma->cmd_complete, | ||
41 | msecs_to_jiffies(DMA_TIMEOUT)); | ||
42 | diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c | ||
43 | index f0b06b14e782..16249b0953ff 100644 | ||
44 | --- a/drivers/idle/intel_idle.c | ||
45 | +++ b/drivers/idle/intel_idle.c | ||
46 | @@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = { | ||
47 | }; | ||
48 | |||
49 | #define ICPU(model, cpu) \ | ||
50 | - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } | ||
51 | + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu } | ||
52 | |||
53 | static const struct x86_cpu_id intel_idle_ids[] __initconst = { | ||
54 | ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), | ||
55 | @@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void) | ||
56 | return -ENODEV; | ||
57 | } | ||
58 | |||
59 | + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { | ||
60 | + pr_debug("Please enable MWAIT in BIOS SETUP\n"); | ||
61 | + return -ENODEV; | ||
62 | + } | ||
63 | + | ||
64 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
65 | return -ENODEV; | ||
66 | |||
67 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c | ||
68 | index f5643d107cc6..a67d03716510 100644 | ||
69 | --- a/drivers/nvme/host/pci.c | ||
70 | +++ b/drivers/nvme/host/pci.c | ||
71 | @@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); | ||
72 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | ||
73 | */ | ||
74 | struct nvme_dev { | ||
75 | - struct nvme_queue **queues; | ||
76 | + struct nvme_queue *queues; | ||
77 | struct blk_mq_tag_set tagset; | ||
78 | struct blk_mq_tag_set admin_tagset; | ||
79 | u32 __iomem *dbs; | ||
80 | @@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
81 | unsigned int hctx_idx) | ||
82 | { | ||
83 | struct nvme_dev *dev = data; | ||
84 | - struct nvme_queue *nvmeq = dev->queues[0]; | ||
85 | + struct nvme_queue *nvmeq = &dev->queues[0]; | ||
86 | |||
87 | WARN_ON(hctx_idx != 0); | ||
88 | WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); | ||
89 | @@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
90 | unsigned int hctx_idx) | ||
91 | { | ||
92 | struct nvme_dev *dev = data; | ||
93 | - struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; | ||
94 | + struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; | ||
95 | |||
96 | if (!nvmeq->tags) | ||
97 | nvmeq->tags = &dev->tagset.tags[hctx_idx]; | ||
98 | @@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, | ||
99 | struct nvme_dev *dev = set->driver_data; | ||
100 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | ||
101 | int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; | ||
102 | - struct nvme_queue *nvmeq = dev->queues[queue_idx]; | ||
103 | + struct nvme_queue *nvmeq = &dev->queues[queue_idx]; | ||
104 | |||
105 | BUG_ON(!nvmeq); | ||
106 | iod->nvmeq = nvmeq; | ||
107 | @@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) | ||
108 | static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) | ||
109 | { | ||
110 | struct nvme_dev *dev = to_nvme_dev(ctrl); | ||
111 | - struct nvme_queue *nvmeq = dev->queues[0]; | ||
112 | + struct nvme_queue *nvmeq = &dev->queues[0]; | ||
113 | struct nvme_command c; | ||
114 | |||
115 | memset(&c, 0, sizeof(c)); | ||
116 | @@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) | ||
117 | if (nvmeq->sq_cmds) | ||
118 | dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), | ||
119 | nvmeq->sq_cmds, nvmeq->sq_dma_addr); | ||
120 | - kfree(nvmeq); | ||
121 | } | ||
122 | |||
123 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) | ||
124 | @@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) | ||
125 | int i; | ||
126 | |||
127 | for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { | ||
128 | - struct nvme_queue *nvmeq = dev->queues[i]; | ||
129 | dev->ctrl.queue_count--; | ||
130 | - dev->queues[i] = NULL; | ||
131 | - nvme_free_queue(nvmeq); | ||
132 | + nvme_free_queue(&dev->queues[i]); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | @@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) | ||
137 | |||
138 | static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) | ||
139 | { | ||
140 | - struct nvme_queue *nvmeq = dev->queues[0]; | ||
141 | + struct nvme_queue *nvmeq = &dev->queues[0]; | ||
142 | |||
143 | - if (!nvmeq) | ||
144 | - return; | ||
145 | if (nvme_suspend_queue(nvmeq)) | ||
146 | return; | ||
147 | |||
148 | @@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | -static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | ||
153 | - int depth, int node) | ||
154 | +static int nvme_alloc_queue(struct nvme_dev *dev, int qid, | ||
155 | + int depth, int node) | ||
156 | { | ||
157 | - struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, | ||
158 | - node); | ||
159 | - if (!nvmeq) | ||
160 | - return NULL; | ||
161 | + struct nvme_queue *nvmeq = &dev->queues[qid]; | ||
162 | + | ||
163 | + if (dev->ctrl.queue_count > qid) | ||
164 | + return 0; | ||
165 | |||
166 | nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), | ||
167 | &nvmeq->cq_dma_addr, GFP_KERNEL); | ||
168 | @@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | ||
169 | nvmeq->q_depth = depth; | ||
170 | nvmeq->qid = qid; | ||
171 | nvmeq->cq_vector = -1; | ||
172 | - dev->queues[qid] = nvmeq; | ||
173 | dev->ctrl.queue_count++; | ||
174 | |||
175 | - return nvmeq; | ||
176 | + return 0; | ||
177 | |||
178 | free_cqdma: | ||
179 | dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, | ||
180 | nvmeq->cq_dma_addr); | ||
181 | free_nvmeq: | ||
182 | - kfree(nvmeq); | ||
183 | - return NULL; | ||
184 | + return -ENOMEM; | ||
185 | } | ||
186 | |||
187 | static int queue_request_irq(struct nvme_queue *nvmeq) | ||
188 | @@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) | ||
189 | if (result < 0) | ||
190 | return result; | ||
191 | |||
192 | - nvmeq = dev->queues[0]; | ||
193 | - if (!nvmeq) { | ||
194 | - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, | ||
195 | - dev_to_node(dev->dev)); | ||
196 | - if (!nvmeq) | ||
197 | - return -ENOMEM; | ||
198 | - } | ||
199 | + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, | ||
200 | + dev_to_node(dev->dev)); | ||
201 | + if (result) | ||
202 | + return result; | ||
203 | |||
204 | + nvmeq = &dev->queues[0]; | ||
205 | aqa = nvmeq->q_depth - 1; | ||
206 | aqa |= aqa << 16; | ||
207 | |||
208 | @@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) | ||
209 | |||
210 | for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { | ||
211 | /* vector == qid - 1, match nvme_create_queue */ | ||
212 | - if (!nvme_alloc_queue(dev, i, dev->q_depth, | ||
213 | + if (nvme_alloc_queue(dev, i, dev->q_depth, | ||
214 | pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { | ||
215 | ret = -ENOMEM; | ||
216 | break; | ||
217 | @@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) | ||
218 | |||
219 | max = min(dev->max_qid, dev->ctrl.queue_count - 1); | ||
220 | for (i = dev->online_queues; i <= max; i++) { | ||
221 | - ret = nvme_create_queue(dev->queues[i], i); | ||
222 | + ret = nvme_create_queue(&dev->queues[i], i); | ||
223 | if (ret) | ||
224 | break; | ||
225 | } | ||
226 | @@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | ||
227 | |||
228 | static int nvme_setup_io_queues(struct nvme_dev *dev) | ||
229 | { | ||
230 | - struct nvme_queue *adminq = dev->queues[0]; | ||
231 | + struct nvme_queue *adminq = &dev->queues[0]; | ||
232 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
233 | int result, nr_io_queues; | ||
234 | unsigned long size; | ||
235 | @@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) | ||
236 | retry: | ||
237 | timeout = ADMIN_TIMEOUT; | ||
238 | for (; i > 0; i--, sent++) | ||
239 | - if (nvme_delete_queue(dev->queues[i], opcode)) | ||
240 | + if (nvme_delete_queue(&dev->queues[i], opcode)) | ||
241 | break; | ||
242 | |||
243 | while (sent--) { | ||
244 | @@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | ||
245 | |||
246 | queues = dev->online_queues - 1; | ||
247 | for (i = dev->ctrl.queue_count - 1; i > 0; i--) | ||
248 | - nvme_suspend_queue(dev->queues[i]); | ||
249 | + nvme_suspend_queue(&dev->queues[i]); | ||
250 | |||
251 | if (dead) { | ||
252 | /* A device might become IO incapable very soon during | ||
253 | @@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | ||
254 | * queue_count can be 0 here. | ||
255 | */ | ||
256 | if (dev->ctrl.queue_count) | ||
257 | - nvme_suspend_queue(dev->queues[0]); | ||
258 | + nvme_suspend_queue(&dev->queues[0]); | ||
259 | } else { | ||
260 | nvme_disable_io_queues(dev, queues); | ||
261 | nvme_disable_admin_queue(dev, shutdown); | ||
262 | @@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
263 | dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); | ||
264 | if (!dev) | ||
265 | return -ENOMEM; | ||
266 | - dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), | ||
267 | + | ||
268 | + dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue), | ||
269 | GFP_KERNEL, node); | ||
270 | if (!dev->queues) | ||
271 | goto free; | ||
272 | diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c | ||
273 | index 8e21211b904b..b7a5d1065378 100644 | ||
274 | --- a/drivers/nvme/target/fc.c | ||
275 | +++ b/drivers/nvme/target/fc.c | ||
276 | @@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod { | ||
277 | struct work_struct work; | ||
278 | } __aligned(sizeof(unsigned long long)); | ||
279 | |||
280 | +/* desired maximum for a single sequence - if sg list allows it */ | ||
281 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) | ||
282 | -#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) | ||
283 | |||
284 | enum nvmet_fcp_datadir { | ||
285 | NVMET_FCP_NODATA, | ||
286 | @@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod { | ||
287 | struct nvme_fc_cmd_iu cmdiubuf; | ||
288 | struct nvme_fc_ersp_iu rspiubuf; | ||
289 | dma_addr_t rspdma; | ||
290 | + struct scatterlist *next_sg; | ||
291 | struct scatterlist *data_sg; | ||
292 | int data_sg_cnt; | ||
293 | u32 total_length; | ||
294 | @@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, | ||
295 | INIT_LIST_HEAD(&newrec->assoc_list); | ||
296 | kref_init(&newrec->ref); | ||
297 | ida_init(&newrec->assoc_cnt); | ||
298 | - newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, | ||
299 | - template->max_sgl_segments); | ||
300 | + newrec->max_sg_cnt = template->max_sgl_segments; | ||
301 | |||
302 | ret = nvmet_fc_alloc_ls_iodlist(newrec); | ||
303 | if (ret) { | ||
304 | @@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) | ||
305 | ((fod->io_dir == NVMET_FCP_WRITE) ? | ||
306 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); | ||
307 | /* note: write from initiator perspective */ | ||
308 | + fod->next_sg = fod->data_sg; | ||
309 | |||
310 | return 0; | ||
311 | |||
312 | @@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, | ||
313 | struct nvmet_fc_fcp_iod *fod, u8 op) | ||
314 | { | ||
315 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | ||
316 | + struct scatterlist *sg = fod->next_sg; | ||
317 | unsigned long flags; | ||
318 | - u32 tlen; | ||
319 | + u32 remaininglen = fod->total_length - fod->offset; | ||
320 | + u32 tlen = 0; | ||
321 | int ret; | ||
322 | |||
323 | fcpreq->op = op; | ||
324 | fcpreq->offset = fod->offset; | ||
325 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; | ||
326 | |||
327 | - tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, | ||
328 | - (fod->total_length - fod->offset)); | ||
329 | + /* | ||
330 | + * for next sequence: | ||
331 | + * break at a sg element boundary | ||
332 | + * attempt to keep sequence length capped at | ||
333 | + * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to | ||
334 | + * be longer if a single sg element is larger | ||
335 | + * than that amount. This is done to avoid creating | ||
336 | + * a new sg list to use for the tgtport api. | ||
337 | + */ | ||
338 | + fcpreq->sg = sg; | ||
339 | + fcpreq->sg_cnt = 0; | ||
340 | + while (tlen < remaininglen && | ||
341 | + fcpreq->sg_cnt < tgtport->max_sg_cnt && | ||
342 | + tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { | ||
343 | + fcpreq->sg_cnt++; | ||
344 | + tlen += sg_dma_len(sg); | ||
345 | + sg = sg_next(sg); | ||
346 | + } | ||
347 | + if (tlen < remaininglen && fcpreq->sg_cnt == 0) { | ||
348 | + fcpreq->sg_cnt++; | ||
349 | + tlen += min_t(u32, sg_dma_len(sg), remaininglen); | ||
350 | + sg = sg_next(sg); | ||
351 | + } | ||
352 | + if (tlen < remaininglen) | ||
353 | + fod->next_sg = sg; | ||
354 | + else | ||
355 | + fod->next_sg = NULL; | ||
356 | + | ||
357 | fcpreq->transfer_length = tlen; | ||
358 | fcpreq->transferred_length = 0; | ||
359 | fcpreq->fcp_error = 0; | ||
360 | fcpreq->rsplen = 0; | ||
361 | |||
362 | - fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; | ||
363 | - fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); | ||
364 | - | ||
365 | /* | ||
366 | * If the last READDATA request: check if LLDD supports | ||
367 | * combined xfr with response. | ||
368 | diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c | ||
369 | index a8da543b3814..4708eb9df71b 100644 | ||
370 | --- a/drivers/pci/pci-acpi.c | ||
371 | +++ b/drivers/pci/pci-acpi.c | ||
372 | @@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) | ||
373 | union acpi_object *obj; | ||
374 | struct pci_host_bridge *bridge; | ||
375 | |||
376 | - if (acpi_pci_disabled || !bus->bridge) | ||
377 | + if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) | ||
378 | return; | ||
379 | |||
380 | acpi_pci_slot_enumerate(bus); | ||
381 | diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c | ||
382 | index 9ce28c4f9812..b09d29931393 100644 | ||
383 | --- a/drivers/scsi/qla2xxx/qla_attr.c | ||
384 | +++ b/drivers/scsi/qla2xxx/qla_attr.c | ||
385 | @@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | ||
386 | msleep(1000); | ||
387 | |||
388 | qla24xx_disable_vp(vha); | ||
389 | + qla2x00_wait_for_sess_deletion(vha); | ||
390 | |||
391 | vha->flags.delete_progress = 1; | ||
392 | |||
393 | diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h | ||
394 | index f852ca60c49f..89706341514e 100644 | ||
395 | --- a/drivers/scsi/qla2xxx/qla_gbl.h | ||
396 | +++ b/drivers/scsi/qla2xxx/qla_gbl.h | ||
397 | @@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, | ||
398 | uint16_t *); | ||
399 | int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); | ||
400 | int qla24xx_async_abort_cmd(srb_t *); | ||
401 | +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); | ||
402 | |||
403 | /* | ||
404 | * Global Functions in qla_mid.c source file. | ||
405 | diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c | ||
406 | index 59ecc4eda6cd..2a19ec0660cb 100644 | ||
407 | --- a/drivers/scsi/qla2xxx/qla_gs.c | ||
408 | +++ b/drivers/scsi/qla2xxx/qla_gs.c | ||
409 | @@ -3368,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) | ||
410 | return rval; | ||
411 | |||
412 | done_free_sp: | ||
413 | + spin_lock_irqsave(&vha->hw->vport_slock, flags); | ||
414 | + list_del(&sp->elem); | ||
415 | + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); | ||
416 | + | ||
417 | if (sp->u.iocb_cmd.u.ctarg.req) { | ||
418 | dma_free_coherent(&vha->hw->pdev->dev, | ||
419 | sizeof(struct ct_sns_pkt), | ||
420 | diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c | ||
421 | index bcde6130f121..1d42d38f5a45 100644 | ||
422 | --- a/drivers/scsi/qla2xxx/qla_init.c | ||
423 | +++ b/drivers/scsi/qla2xxx/qla_init.c | ||
424 | @@ -1326,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, | ||
425 | |||
426 | wait_for_completion(&tm_iocb->u.tmf.comp); | ||
427 | |||
428 | - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? | ||
429 | - QLA_SUCCESS : QLA_FUNCTION_FAILED; | ||
430 | + rval = tm_iocb->u.tmf.data; | ||
431 | |||
432 | - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { | ||
433 | - ql_dbg(ql_dbg_taskm, vha, 0x8030, | ||
434 | + if (rval != QLA_SUCCESS) { | ||
435 | + ql_log(ql_log_warn, vha, 0x8030, | ||
436 | "TM IOCB failed (%x).\n", rval); | ||
437 | } | ||
438 | |||
439 | diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h | ||
440 | index 9a2c86eacf44..3f5a0f0f8b62 100644 | ||
441 | --- a/drivers/scsi/qla2xxx/qla_inline.h | ||
442 | +++ b/drivers/scsi/qla2xxx/qla_inline.h | ||
443 | @@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) | ||
444 | sp->fcport = fcport; | ||
445 | sp->iocbs = 1; | ||
446 | sp->vha = qpair->vha; | ||
447 | + INIT_LIST_HEAD(&sp->elem); | ||
448 | + | ||
449 | done: | ||
450 | if (!sp) | ||
451 | QLA_QPAIR_MARK_NOT_BUSY(qpair); | ||
452 | diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c | ||
453 | index d77dde89118e..375a88e18afe 100644 | ||
454 | --- a/drivers/scsi/qla2xxx/qla_mid.c | ||
455 | +++ b/drivers/scsi/qla2xxx/qla_mid.c | ||
456 | @@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) | ||
457 | { | ||
458 | unsigned long flags; | ||
459 | int ret; | ||
460 | + fc_port_t *fcport; | ||
461 | |||
462 | ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); | ||
463 | atomic_set(&vha->loop_state, LOOP_DOWN); | ||
464 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | ||
465 | + list_for_each_entry(fcport, &vha->vp_fcports, list) | ||
466 | + fcport->logout_on_delete = 0; | ||
467 | + | ||
468 | + qla2x00_mark_all_devices_lost(vha, 0); | ||
469 | |||
470 | /* Remove port id from vp target map */ | ||
471 | spin_lock_irqsave(&vha->hw->vport_slock, flags); | ||
472 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c | ||
473 | index 1be76695e692..7d7fb5bbb600 100644 | ||
474 | --- a/drivers/scsi/qla2xxx/qla_os.c | ||
475 | +++ b/drivers/scsi/qla2xxx/qla_os.c | ||
476 | @@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) | ||
477 | * qla2x00_wait_for_sess_deletion can only be called from remove_one. | ||
478 | * it has dependency on UNLOADING flag to stop device discovery | ||
479 | */ | ||
480 | -static void | ||
481 | +void | ||
482 | qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) | ||
483 | { | ||
484 | qla2x00_mark_all_devices_lost(vha, 0); | ||
485 | @@ -5794,8 +5794,9 @@ qla2x00_do_dpc(void *data) | ||
486 | set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); | ||
487 | } | ||
488 | |||
489 | - if (test_and_clear_bit(ISP_ABORT_NEEDED, | ||
490 | - &base_vha->dpc_flags)) { | ||
491 | + if (test_and_clear_bit | ||
492 | + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && | ||
493 | + !test_bit(UNLOADING, &base_vha->dpc_flags)) { | ||
494 | |||
495 | ql_dbg(ql_dbg_dpc, base_vha, 0x4007, | ||
496 | "ISP abort scheduled.\n"); | ||
497 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c | ||
498 | index 7fa50e12f18e..5b62e06567a3 100644 | ||
499 | --- a/fs/btrfs/extent_io.c | ||
500 | +++ b/fs/btrfs/extent_io.c | ||
501 | @@ -4280,6 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | ||
502 | struct extent_map *em; | ||
503 | u64 start = page_offset(page); | ||
504 | u64 end = start + PAGE_SIZE - 1; | ||
505 | + struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); | ||
506 | |||
507 | if (gfpflags_allow_blocking(mask) && | ||
508 | page->mapping->host->i_size > SZ_16M) { | ||
509 | @@ -4302,6 +4303,8 @@ int try_release_extent_mapping(struct extent_map_tree *map, | ||
510 | extent_map_end(em) - 1, | ||
511 | EXTENT_LOCKED | EXTENT_WRITEBACK, | ||
512 | 0, NULL)) { | ||
513 | + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | ||
514 | + &btrfs_inode->runtime_flags); | ||
515 | remove_extent_mapping(map, em); | ||
516 | /* once for the rb tree */ | ||
517 | free_extent_map(em); | ||
518 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c | ||
519 | index 6b0c1ea95196..f30d2bf40471 100644 | ||
520 | --- a/fs/ext4/super.c | ||
521 | +++ b/fs/ext4/super.c | ||
522 | @@ -2301,7 +2301,7 @@ static int ext4_check_descriptors(struct super_block *sb, | ||
523 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
524 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | ||
525 | ext4_fsblk_t last_block; | ||
526 | - ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; | ||
527 | + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); | ||
528 | ext4_fsblk_t block_bitmap; | ||
529 | ext4_fsblk_t inode_bitmap; | ||
530 | ext4_fsblk_t inode_table; | ||
531 | @@ -4038,13 +4038,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | ||
532 | goto failed_mount2; | ||
533 | } | ||
534 | } | ||
535 | + sbi->s_gdb_count = db_count; | ||
536 | if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { | ||
537 | ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); | ||
538 | ret = -EFSCORRUPTED; | ||
539 | goto failed_mount2; | ||
540 | } | ||
541 | |||
542 | - sbi->s_gdb_count = db_count; | ||
543 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); | ||
544 | spin_lock_init(&sbi->s_next_gen_lock); | ||
545 | |||
546 | diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c | ||
547 | index c60f3d32ee91..a6797986b625 100644 | ||
548 | --- a/fs/jfs/xattr.c | ||
549 | +++ b/fs/jfs/xattr.c | ||
550 | @@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) | ||
551 | if (size > PSIZE) { | ||
552 | /* | ||
553 | * To keep the rest of the code simple. Allocate a | ||
554 | - * contiguous buffer to work with | ||
555 | + * contiguous buffer to work with. Make the buffer large | ||
556 | + * enough to make use of the whole extent. | ||
557 | */ | ||
558 | - ea_buf->xattr = kmalloc(size, GFP_KERNEL); | ||
559 | + ea_buf->max_size = (size + sb->s_blocksize - 1) & | ||
560 | + ~(sb->s_blocksize - 1); | ||
561 | + | ||
562 | + ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL); | ||
563 | if (ea_buf->xattr == NULL) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | ea_buf->flag = EA_MALLOC; | ||
567 | - ea_buf->max_size = (size + sb->s_blocksize - 1) & | ||
568 | - ~(sb->s_blocksize - 1); | ||
569 | |||
570 | if (ea_size == 0) | ||
571 | return 0; | ||
572 | diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c | ||
573 | index 5c16db86b38f..40e53a4fc0a6 100644 | ||
574 | --- a/fs/xfs/libxfs/xfs_attr_leaf.c | ||
575 | +++ b/fs/xfs/libxfs/xfs_attr_leaf.c | ||
576 | @@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) | ||
577 | ASSERT(blkno == 0); | ||
578 | error = xfs_attr3_leaf_create(args, blkno, &bp); | ||
579 | if (error) { | ||
580 | - error = xfs_da_shrink_inode(args, 0, bp); | ||
581 | - bp = NULL; | ||
582 | - if (error) | ||
583 | + /* xfs_attr3_leaf_create may not have instantiated a block */ | ||
584 | + if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0)) | ||
585 | goto out; | ||
586 | xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ | ||
587 | memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ | ||
588 | diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c | ||
589 | index 43005fbe8b1e..544b5211221c 100644 | ||
590 | --- a/fs/xfs/xfs_icache.c | ||
591 | +++ b/fs/xfs/xfs_icache.c | ||
592 | @@ -305,6 +305,46 @@ xfs_reinit_inode( | ||
593 | return error; | ||
594 | } | ||
595 | |||
596 | +/* | ||
597 | + * If we are allocating a new inode, then check what was returned is | ||
598 | + * actually a free, empty inode. If we are not allocating an inode, | ||
599 | + * then check we didn't find a free inode. | ||
600 | + * | ||
601 | + * Returns: | ||
602 | + * 0 if the inode free state matches the lookup context | ||
603 | + * -ENOENT if the inode is free and we are not allocating | ||
604 | + * -EFSCORRUPTED if there is any state mismatch at all | ||
605 | + */ | ||
606 | +static int | ||
607 | +xfs_iget_check_free_state( | ||
608 | + struct xfs_inode *ip, | ||
609 | + int flags) | ||
610 | +{ | ||
611 | + if (flags & XFS_IGET_CREATE) { | ||
612 | + /* should be a free inode */ | ||
613 | + if (VFS_I(ip)->i_mode != 0) { | ||
614 | + xfs_warn(ip->i_mount, | ||
615 | +"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", | ||
616 | + ip->i_ino, VFS_I(ip)->i_mode); | ||
617 | + return -EFSCORRUPTED; | ||
618 | + } | ||
619 | + | ||
620 | + if (ip->i_d.di_nblocks != 0) { | ||
621 | + xfs_warn(ip->i_mount, | ||
622 | +"Corruption detected! Free inode 0x%llx has blocks allocated!", | ||
623 | + ip->i_ino); | ||
624 | + return -EFSCORRUPTED; | ||
625 | + } | ||
626 | + return 0; | ||
627 | + } | ||
628 | + | ||
629 | + /* should be an allocated inode */ | ||
630 | + if (VFS_I(ip)->i_mode == 0) | ||
631 | + return -ENOENT; | ||
632 | + | ||
633 | + return 0; | ||
634 | +} | ||
635 | + | ||
636 | /* | ||
637 | * Check the validity of the inode we just found it the cache | ||
638 | */ | ||
639 | @@ -354,12 +394,12 @@ xfs_iget_cache_hit( | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | - * If lookup is racing with unlink return an error immediately. | ||
644 | + * Check the inode free state is valid. This also detects lookup | ||
645 | + * racing with unlinks. | ||
646 | */ | ||
647 | - if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { | ||
648 | - error = -ENOENT; | ||
649 | + error = xfs_iget_check_free_state(ip, flags); | ||
650 | + if (error) | ||
651 | goto out_error; | ||
652 | - } | ||
653 | |||
654 | /* | ||
655 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | ||
656 | @@ -475,10 +515,14 @@ xfs_iget_cache_miss( | ||
657 | |||
658 | trace_xfs_iget_miss(ip); | ||
659 | |||
660 | - if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { | ||
661 | - error = -ENOENT; | ||
662 | + | ||
663 | + /* | ||
664 | + * Check the inode free state is valid. This also detects lookup | ||
665 | + * racing with unlinks. | ||
666 | + */ | ||
667 | + error = xfs_iget_check_free_state(ip, flags); | ||
668 | + if (error) | ||
669 | goto out_destroy; | ||
670 | - } | ||
671 | |||
672 | /* | ||
673 | * Preload the radix tree so we can insert safely under the | ||
674 | diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h | ||
675 | index 289e4d54e3e0..5caa062a02b2 100644 | ||
676 | --- a/include/linux/ring_buffer.h | ||
677 | +++ b/include/linux/ring_buffer.h | ||
678 | @@ -160,6 +160,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer); | ||
679 | void ring_buffer_record_off(struct ring_buffer *buffer); | ||
680 | void ring_buffer_record_on(struct ring_buffer *buffer); | ||
681 | int ring_buffer_record_is_on(struct ring_buffer *buffer); | ||
682 | +int ring_buffer_record_is_set_on(struct ring_buffer *buffer); | ||
683 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); | ||
684 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); | ||
685 | |||
686 | diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c | ||
687 | index b02caa442776..069311541577 100644 | ||
688 | --- a/kernel/irq/manage.c | ||
689 | +++ b/kernel/irq/manage.c | ||
690 | @@ -1030,6 +1030,13 @@ static int irq_setup_forced_threading(struct irqaction *new) | ||
691 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
692 | return 0; | ||
693 | |||
694 | + /* | ||
695 | + * No further action required for interrupts which are requested as | ||
696 | + * threaded interrupts already | ||
697 | + */ | ||
698 | + if (new->handler == irq_default_primary_handler) | ||
699 | + return 0; | ||
700 | + | ||
701 | new->flags |= IRQF_ONESHOT; | ||
702 | |||
703 | /* | ||
704 | @@ -1037,7 +1044,7 @@ static int irq_setup_forced_threading(struct irqaction *new) | ||
705 | * thread handler. We force thread them as well by creating a | ||
706 | * secondary action. | ||
707 | */ | ||
708 | - if (new->handler != irq_default_primary_handler && new->thread_fn) { | ||
709 | + if (new->handler && new->thread_fn) { | ||
710 | /* Allocate the secondary action */ | ||
711 | new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
712 | if (!new->secondary) | ||
713 | diff --git a/kernel/softirq.c b/kernel/softirq.c | ||
714 | index e89c3b0cff6d..f40ac7191257 100644 | ||
715 | --- a/kernel/softirq.c | ||
716 | +++ b/kernel/softirq.c | ||
717 | @@ -382,7 +382,7 @@ static inline void tick_irq_exit(void) | ||
718 | |||
719 | /* Make sure that timer wheel updates are propagated */ | ||
720 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | ||
721 | - if (!in_interrupt()) | ||
722 | + if (!in_irq()) | ||
723 | tick_nohz_irq_exit(); | ||
724 | } | ||
725 | #endif | ||
726 | diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | ||
727 | index bb2af74e6b62..ea3c062e7e1c 100644 | ||
728 | --- a/kernel/time/tick-sched.c | ||
729 | +++ b/kernel/time/tick-sched.c | ||
730 | @@ -676,7 +676,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | ||
731 | |||
732 | static inline bool local_timer_softirq_pending(void) | ||
733 | { | ||
734 | - return local_softirq_pending() & TIMER_SOFTIRQ; | ||
735 | + return local_softirq_pending() & BIT(TIMER_SOFTIRQ); | ||
736 | } | ||
737 | |||
738 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | ||
739 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c | ||
740 | index 36f018b15392..fd7809004297 100644 | ||
741 | --- a/kernel/trace/ring_buffer.c | ||
742 | +++ b/kernel/trace/ring_buffer.c | ||
743 | @@ -3109,6 +3109,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer) | ||
744 | return !atomic_read(&buffer->record_disabled); | ||
745 | } | ||
746 | |||
747 | +/** | ||
748 | + * ring_buffer_record_is_set_on - return true if the ring buffer is set writable | ||
749 | + * @buffer: The ring buffer to see if write is set enabled | ||
750 | + * | ||
751 | + * Returns true if the ring buffer is set writable by ring_buffer_record_on(). | ||
752 | + * Note that this does NOT mean it is in a writable state. | ||
753 | + * | ||
754 | + * It may return true when the ring buffer has been disabled by | ||
755 | + * ring_buffer_record_disable(), as that is a temporary disabling of | ||
756 | + * the ring buffer. | ||
757 | + */ | ||
758 | +int ring_buffer_record_is_set_on(struct ring_buffer *buffer) | ||
759 | +{ | ||
760 | + return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); | ||
761 | +} | ||
762 | + | ||
763 | /** | ||
764 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | ||
765 | * @buffer: The ring buffer to stop writes to. | ||
766 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | ||
767 | index e268750bd4ad..20919489883f 100644 | ||
768 | --- a/kernel/trace/trace.c | ||
769 | +++ b/kernel/trace/trace.c | ||
770 | @@ -1366,6 +1366,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
771 | |||
772 | arch_spin_lock(&tr->max_lock); | ||
773 | |||
774 | + /* Inherit the recordable setting from trace_buffer */ | ||
775 | + if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) | ||
776 | + ring_buffer_record_on(tr->max_buffer.buffer); | ||
777 | + else | ||
778 | + ring_buffer_record_off(tr->max_buffer.buffer); | ||
779 | + | ||
780 | buf = tr->trace_buffer.buffer; | ||
781 | tr->trace_buffer.buffer = tr->max_buffer.buffer; | ||
782 | tr->max_buffer.buffer = buf; | ||
783 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c | ||
784 | index 68c9d1833b95..c67abda5d639 100644 | ||
785 | --- a/net/netlink/af_netlink.c | ||
786 | +++ b/net/netlink/af_netlink.c | ||
787 | @@ -981,8 +981,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | ||
788 | |||
789 | if (nlk->ngroups == 0) | ||
790 | groups = 0; | ||
791 | - else | ||
792 | - groups &= (1ULL << nlk->ngroups) - 1; | ||
793 | + else if (nlk->ngroups < 8*sizeof(groups)) | ||
794 | + groups &= (1UL << nlk->ngroups) - 1; | ||
795 | |||
796 | bound = nlk->bound; | ||
797 | if (bound) { |