Contents of /trunk/kernel-alx-legacy/patches-4.9/0351-4.9.252-all-fixes.patch
Parent Directory | Revision Log
Revision 3653 -
(show annotations)
(download)
Mon Oct 24 14:07:32 2022 UTC (23 months ago) by niro
File size: 26605 byte(s)
Mon Oct 24 14:07:32 2022 UTC (23 months ago) by niro
File size: 26605 byte(s)
-linux-4.9.252
1 | diff --git a/Makefile b/Makefile |
2 | index 8ebbb60f2078a..2213fe336705f 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 251 |
9 | +SUBLEVEL = 252 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c |
14 | index f989145480c8f..bf236b7af8c1a 100644 |
15 | --- a/arch/arm/mach-omap2/omap_device.c |
16 | +++ b/arch/arm/mach-omap2/omap_device.c |
17 | @@ -224,10 +224,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, |
18 | break; |
19 | case BUS_NOTIFY_BIND_DRIVER: |
20 | od = to_omap_device(pdev); |
21 | - if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && |
22 | - pm_runtime_status_suspended(dev)) { |
23 | + if (od) { |
24 | od->_driver_status = BUS_NOTIFY_BIND_DRIVER; |
25 | - pm_runtime_set_active(dev); |
26 | + if (od->_state == OMAP_DEVICE_STATE_ENABLED && |
27 | + pm_runtime_status_suspended(dev)) { |
28 | + pm_runtime_set_active(dev); |
29 | + } |
30 | } |
31 | break; |
32 | case BUS_NOTIFY_ADD_DEVICE: |
33 | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c |
34 | index 0ad81fa13688f..10d80456f38f1 100644 |
35 | --- a/arch/arm64/kvm/sys_regs.c |
36 | +++ b/arch/arm64/kvm/sys_regs.c |
37 | @@ -450,6 +450,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
38 | { |
39 | u64 pmcr, val; |
40 | |
41 | + /* No PMU available, PMCR_EL0 may UNDEF... */ |
42 | + if (!kvm_arm_support_pmu_v3()) |
43 | + return; |
44 | + |
45 | pmcr = read_sysreg(pmcr_el0); |
46 | /* |
47 | * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN |
48 | diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h |
49 | index 6b8b2d57fdc8c..e588028922a83 100644 |
50 | --- a/arch/powerpc/include/asm/book3s/32/pgtable.h |
51 | +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h |
52 | @@ -411,9 +411,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
53 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
54 | flush_hash_entry(mm, ptep, addr); |
55 | __asm__ __volatile__("\ |
56 | - stw%U0%X0 %2,%0\n\ |
57 | + stw%X0 %2,%0\n\ |
58 | eieio\n\ |
59 | - stw%U0%X0 %L2,%1" |
60 | + stw%X1 %L2,%1" |
61 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) |
62 | : "r" (pte) : "memory"); |
63 | |
64 | diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h |
65 | index 1263c22d60d85..330fe178c0c5e 100644 |
66 | --- a/arch/powerpc/include/asm/nohash/pgtable.h |
67 | +++ b/arch/powerpc/include/asm/nohash/pgtable.h |
68 | @@ -155,9 +155,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
69 | flush_hash_entry(mm, ptep, addr); |
70 | #endif |
71 | __asm__ __volatile__("\ |
72 | - stw%U0%X0 %2,%0\n\ |
73 | + stw%X0 %2,%0\n\ |
74 | eieio\n\ |
75 | - stw%U0%X0 %L2,%1" |
76 | + stw%X1 %L2,%1" |
77 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) |
78 | : "r" (pte) : "memory"); |
79 | |
80 | diff --git a/block/genhd.c b/block/genhd.c |
81 | index fcd6d4fae657c..9c1adfd768d2c 100644 |
82 | --- a/block/genhd.c |
83 | +++ b/block/genhd.c |
84 | @@ -159,14 +159,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) |
85 | part = rcu_dereference(ptbl->part[piter->idx]); |
86 | if (!part) |
87 | continue; |
88 | + get_device(part_to_dev(part)); |
89 | + piter->part = part; |
90 | if (!part_nr_sects_read(part) && |
91 | !(piter->flags & DISK_PITER_INCL_EMPTY) && |
92 | !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && |
93 | - piter->idx == 0)) |
94 | + piter->idx == 0)) { |
95 | + put_device(part_to_dev(part)); |
96 | + piter->part = NULL; |
97 | continue; |
98 | + } |
99 | |
100 | - get_device(part_to_dev(part)); |
101 | - piter->part = part; |
102 | piter->idx += inc; |
103 | break; |
104 | } |
105 | diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig |
106 | index 39dd30b6ef86e..894102fd5a069 100644 |
107 | --- a/drivers/block/Kconfig |
108 | +++ b/drivers/block/Kconfig |
109 | @@ -530,6 +530,7 @@ config BLK_DEV_RBD |
110 | config BLK_DEV_RSXX |
111 | tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" |
112 | depends on PCI |
113 | + select CRC32 |
114 | help |
115 | Device driver for IBM's high speed PCIe SSD |
116 | storage device: Flash Adapter 900GB Full Height. |
117 | diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c |
118 | index 0b5bf135b0907..59f16807921ad 100644 |
119 | --- a/drivers/cpufreq/powernow-k8.c |
120 | +++ b/drivers/cpufreq/powernow-k8.c |
121 | @@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data *data) |
122 | |
123 | /* Take a frequency, and issue the fid/vid transition command */ |
124 | static int transition_frequency_fidvid(struct powernow_k8_data *data, |
125 | - unsigned int index) |
126 | + unsigned int index, |
127 | + struct cpufreq_policy *policy) |
128 | { |
129 | - struct cpufreq_policy *policy; |
130 | u32 fid = 0; |
131 | u32 vid = 0; |
132 | int res; |
133 | @@ -921,9 +921,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, |
134 | freqs.old = find_khz_freq_from_fid(data->currfid); |
135 | freqs.new = find_khz_freq_from_fid(fid); |
136 | |
137 | - policy = cpufreq_cpu_get(smp_processor_id()); |
138 | - cpufreq_cpu_put(policy); |
139 | - |
140 | cpufreq_freq_transition_begin(policy, &freqs); |
141 | res = transition_fid_vid(data, fid, vid); |
142 | cpufreq_freq_transition_end(policy, &freqs, res); |
143 | @@ -978,7 +975,7 @@ static long powernowk8_target_fn(void *arg) |
144 | |
145 | powernow_k8_acpi_pst_values(data, newstate); |
146 | |
147 | - ret = transition_frequency_fidvid(data, newstate); |
148 | + ret = transition_frequency_fidvid(data, newstate, pol); |
149 | |
150 | if (ret) { |
151 | pr_err("transition frequency failed\n"); |
152 | diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c |
153 | index ef99ef0bb1ca2..f00652585ee31 100644 |
154 | --- a/drivers/dma/xilinx/xilinx_dma.c |
155 | +++ b/drivers/dma/xilinx/xilinx_dma.c |
156 | @@ -2357,7 +2357,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
157 | has_dre = false; |
158 | |
159 | if (!has_dre) |
160 | - xdev->common.copy_align = fls(width - 1); |
161 | + xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); |
162 | |
163 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || |
164 | of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || |
165 | @@ -2630,7 +2630,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) |
166 | } |
167 | |
168 | /* Register the DMA engine with the core */ |
169 | - dma_async_device_register(&xdev->common); |
170 | + err = dma_async_device_register(&xdev->common); |
171 | + if (err) { |
172 | + dev_err(xdev->dev, "failed to register the dma device\n"); |
173 | + goto error; |
174 | + } |
175 | |
176 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, |
177 | xdev); |
178 | diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
179 | index 4548d89abcdc3..ff8168c60b35a 100644 |
180 | --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
181 | +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
182 | @@ -882,7 +882,7 @@ eb_vma_misplaced(struct i915_vma *vma) |
183 | return !only_mappable_for_reloc(entry->flags); |
184 | |
185 | if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && |
186 | - (vma->node.start + vma->node.size - 1) >> 32) |
187 | + (vma->node.start + vma->node.size + 4095) >> 32) |
188 | return true; |
189 | |
190 | return false; |
191 | diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c |
192 | index ce125ec23d2a5..88ba1a65c2830 100644 |
193 | --- a/drivers/iommu/intel_irq_remapping.c |
194 | +++ b/drivers/iommu/intel_irq_remapping.c |
195 | @@ -1350,6 +1350,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, |
196 | irq_data = irq_domain_get_irq_data(domain, virq + i); |
197 | irq_cfg = irqd_cfg(irq_data); |
198 | if (!irq_data || !irq_cfg) { |
199 | + if (!i) |
200 | + kfree(data); |
201 | ret = -EINVAL; |
202 | goto out_free_data; |
203 | } |
204 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
205 | index 8cd7227fbdfce..3dd0bc8804c1a 100644 |
206 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
207 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
208 | @@ -930,6 +930,7 @@ err_destroy_groups: |
209 | ft->g[ft->num_groups] = NULL; |
210 | mlx5e_destroy_groups(ft); |
211 | kvfree(in); |
212 | + kfree(ft->g); |
213 | |
214 | return err; |
215 | } |
216 | diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig |
217 | index 4e9fe75d70675..069f933b0add2 100644 |
218 | --- a/drivers/net/wan/Kconfig |
219 | +++ b/drivers/net/wan/Kconfig |
220 | @@ -295,6 +295,7 @@ config SLIC_DS26522 |
221 | tristate "Slic Maxim ds26522 card support" |
222 | depends on SPI |
223 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST |
224 | + select BITREVERSE |
225 | help |
226 | This module initializes and configures the slic maxim card |
227 | in T1 or E1 mode. |
228 | diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig |
229 | index 6dfedc8bd6a3d..7df13a684d2df 100644 |
230 | --- a/drivers/net/wireless/ath/wil6210/Kconfig |
231 | +++ b/drivers/net/wireless/ath/wil6210/Kconfig |
232 | @@ -1,6 +1,7 @@ |
233 | config WIL6210 |
234 | tristate "Wilocity 60g WiFi card wil6210 support" |
235 | select WANT_DEV_COREDUMP |
236 | + select CRC32 |
237 | depends on CFG80211 |
238 | depends on PCI |
239 | default n |
240 | diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
241 | index da3834fe5e570..9bb6a574ab2fe 100644 |
242 | --- a/drivers/spi/spi-pxa2xx.c |
243 | +++ b/drivers/spi/spi-pxa2xx.c |
244 | @@ -1606,7 +1606,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) |
245 | return -ENODEV; |
246 | } |
247 | |
248 | - master = spi_alloc_master(dev, sizeof(struct driver_data)); |
249 | + master = devm_spi_alloc_master(dev, sizeof(*drv_data)); |
250 | if (!master) { |
251 | dev_err(&pdev->dev, "cannot alloc spi_master\n"); |
252 | pxa_ssp_free(ssp); |
253 | @@ -1788,7 +1788,6 @@ out_error_clock_enabled: |
254 | free_irq(ssp->irq, drv_data); |
255 | |
256 | out_error_master_alloc: |
257 | - spi_master_put(master); |
258 | pxa_ssp_free(ssp); |
259 | return status; |
260 | } |
261 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
262 | index e738b4621cbba..ecd707f74ddcb 100644 |
263 | --- a/drivers/target/target_core_transport.c |
264 | +++ b/drivers/target/target_core_transport.c |
265 | @@ -1736,6 +1736,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, |
266 | case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: |
267 | case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: |
268 | case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: |
269 | + case TCM_TOO_MANY_TARGET_DESCS: |
270 | + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: |
271 | + case TCM_TOO_MANY_SEGMENT_DESCS: |
272 | + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: |
273 | break; |
274 | case TCM_OUT_OF_RESOURCES: |
275 | sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
276 | @@ -2886,6 +2890,26 @@ static const struct sense_info sense_info_table[] = { |
277 | .key = ILLEGAL_REQUEST, |
278 | .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ |
279 | }, |
280 | + [TCM_TOO_MANY_TARGET_DESCS] = { |
281 | + .key = ILLEGAL_REQUEST, |
282 | + .asc = 0x26, |
283 | + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ |
284 | + }, |
285 | + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { |
286 | + .key = ILLEGAL_REQUEST, |
287 | + .asc = 0x26, |
288 | + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ |
289 | + }, |
290 | + [TCM_TOO_MANY_SEGMENT_DESCS] = { |
291 | + .key = ILLEGAL_REQUEST, |
292 | + .asc = 0x26, |
293 | + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ |
294 | + }, |
295 | + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { |
296 | + .key = ILLEGAL_REQUEST, |
297 | + .asc = 0x26, |
298 | + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ |
299 | + }, |
300 | [TCM_PARAMETER_LIST_LENGTH_ERROR] = { |
301 | .key = ILLEGAL_REQUEST, |
302 | .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ |
303 | diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c |
304 | index 18848ba8d2ba0..84e3bf1132fd5 100644 |
305 | --- a/drivers/target/target_core_xcopy.c |
306 | +++ b/drivers/target/target_core_xcopy.c |
307 | @@ -52,64 +52,87 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) |
308 | return 0; |
309 | } |
310 | |
311 | -static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, |
312 | - bool src) |
313 | +/** |
314 | + * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers |
315 | + * |
316 | + * @se_dev: device being considered for match |
317 | + * @dev_wwn: XCOPY requested NAA dev_wwn |
318 | + * @return: 1 on match, 0 on no-match |
319 | + */ |
320 | +static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, |
321 | + const unsigned char *dev_wwn) |
322 | { |
323 | - struct se_device *se_dev; |
324 | - unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; |
325 | + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; |
326 | int rc; |
327 | |
328 | - if (src) |
329 | - dev_wwn = &xop->dst_tid_wwn[0]; |
330 | - else |
331 | - dev_wwn = &xop->src_tid_wwn[0]; |
332 | - |
333 | - mutex_lock(&g_device_mutex); |
334 | - list_for_each_entry(se_dev, &g_device_list, g_dev_node) { |
335 | - |
336 | - if (!se_dev->dev_attrib.emulate_3pc) |
337 | - continue; |
338 | + if (!se_dev->dev_attrib.emulate_3pc) { |
339 | + pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); |
340 | + return 0; |
341 | + } |
342 | |
343 | - memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); |
344 | - target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); |
345 | + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); |
346 | + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); |
347 | |
348 | - rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); |
349 | - if (rc != 0) |
350 | - continue; |
351 | + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); |
352 | + if (rc != 0) { |
353 | + pr_debug("XCOPY: skip non-matching: %*ph\n", |
354 | + XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); |
355 | + return 0; |
356 | + } |
357 | + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); |
358 | |
359 | - if (src) { |
360 | - xop->dst_dev = se_dev; |
361 | - pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" |
362 | - " se_dev\n", xop->dst_dev); |
363 | - } else { |
364 | - xop->src_dev = se_dev; |
365 | - pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" |
366 | - " se_dev\n", xop->src_dev); |
367 | - } |
368 | + return 1; |
369 | +} |
370 | |
371 | - rc = target_depend_item(&se_dev->dev_group.cg_item); |
372 | - if (rc != 0) { |
373 | - pr_err("configfs_depend_item attempt failed:" |
374 | - " %d for se_dev: %p\n", rc, se_dev); |
375 | - mutex_unlock(&g_device_mutex); |
376 | - return rc; |
377 | +static int target_xcopy_locate_se_dev_e4(struct se_session *sess, |
378 | + const unsigned char *dev_wwn, |
379 | + struct se_device **_found_dev, |
380 | + struct percpu_ref **_found_lun_ref) |
381 | +{ |
382 | + struct se_dev_entry *deve; |
383 | + struct se_node_acl *nacl; |
384 | + struct se_lun *this_lun = NULL; |
385 | + struct se_device *found_dev = NULL; |
386 | + |
387 | + /* cmd with NULL sess indicates no associated $FABRIC_MOD */ |
388 | + if (!sess) |
389 | + goto err_out; |
390 | + |
391 | + pr_debug("XCOPY 0xe4: searching for: %*ph\n", |
392 | + XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); |
393 | + |
394 | + nacl = sess->se_node_acl; |
395 | + rcu_read_lock(); |
396 | + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { |
397 | + struct se_device *this_dev; |
398 | + int rc; |
399 | + |
400 | + this_lun = rcu_dereference(deve->se_lun); |
401 | + this_dev = rcu_dereference_raw(this_lun->lun_se_dev); |
402 | + |
403 | + rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); |
404 | + if (rc) { |
405 | + if (percpu_ref_tryget_live(&this_lun->lun_ref)) |
406 | + found_dev = this_dev; |
407 | + break; |
408 | } |
409 | - |
410 | - pr_debug("Called configfs_depend_item for se_dev: %p" |
411 | - " se_dev->se_dev_group: %p\n", se_dev, |
412 | - &se_dev->dev_group); |
413 | - |
414 | - mutex_unlock(&g_device_mutex); |
415 | - return 0; |
416 | } |
417 | - mutex_unlock(&g_device_mutex); |
418 | - |
419 | + rcu_read_unlock(); |
420 | + if (found_dev == NULL) |
421 | + goto err_out; |
422 | + |
423 | + pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", |
424 | + found_dev, &found_dev->dev_group); |
425 | + *_found_dev = found_dev; |
426 | + *_found_lun_ref = &this_lun->lun_ref; |
427 | + return 0; |
428 | +err_out: |
429 | pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); |
430 | return -EINVAL; |
431 | } |
432 | |
433 | static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, |
434 | - unsigned char *p, bool src) |
435 | + unsigned char *p, unsigned short cscd_index) |
436 | { |
437 | unsigned char *desc = p; |
438 | unsigned short ript; |
439 | @@ -154,7 +177,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op |
440 | return -EINVAL; |
441 | } |
442 | |
443 | - if (src) { |
444 | + if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { |
445 | + pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " |
446 | + "dest\n", cscd_index); |
447 | + return 0; |
448 | + } |
449 | + |
450 | + if (cscd_index == xop->stdi) { |
451 | memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); |
452 | /* |
453 | * Determine if the source designator matches the local device |
454 | @@ -166,10 +195,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op |
455 | pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" |
456 | " received xop\n", xop->src_dev); |
457 | } |
458 | - } else { |
459 | + } |
460 | + |
461 | + if (cscd_index == xop->dtdi) { |
462 | memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); |
463 | /* |
464 | - * Determine if the destination designator matches the local device |
465 | + * Determine if the destination designator matches the local |
466 | + * device. If @cscd_index corresponds to both source (stdi) and |
467 | + * destination (dtdi), or dtdi comes after stdi, then |
468 | + * XCOL_DEST_RECV_OP wins. |
469 | */ |
470 | if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], |
471 | XCOPY_NAA_IEEE_REGEX_LEN)) { |
472 | @@ -189,9 +223,9 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
473 | { |
474 | struct se_device *local_dev = se_cmd->se_dev; |
475 | unsigned char *desc = p; |
476 | - int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; |
477 | + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; |
478 | + unsigned short cscd_index = 0; |
479 | unsigned short start = 0; |
480 | - bool src = true; |
481 | |
482 | *sense_ret = TCM_INVALID_PARAMETER_LIST; |
483 | |
484 | @@ -214,25 +248,19 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
485 | |
486 | while (start < tdll) { |
487 | /* |
488 | - * Check target descriptor identification with 0xE4 type with |
489 | - * use VPD 0x83 WWPN matching .. |
490 | + * Check target descriptor identification with 0xE4 type, and |
491 | + * compare the current index with the CSCD descriptor IDs in |
492 | + * the segment descriptor. Use VPD 0x83 WWPN matching .. |
493 | */ |
494 | switch (desc[0]) { |
495 | case 0xe4: |
496 | rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, |
497 | - &desc[0], src); |
498 | + &desc[0], cscd_index); |
499 | if (rc != 0) |
500 | goto out; |
501 | - /* |
502 | - * Assume target descriptors are in source -> destination order.. |
503 | - */ |
504 | - if (src) |
505 | - src = false; |
506 | - else |
507 | - src = true; |
508 | start += XCOPY_TARGET_DESC_LEN; |
509 | desc += XCOPY_TARGET_DESC_LEN; |
510 | - ret++; |
511 | + cscd_index++; |
512 | break; |
513 | default: |
514 | pr_err("XCOPY unsupported descriptor type code:" |
515 | @@ -241,10 +269,25 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
516 | } |
517 | } |
518 | |
519 | - if (xop->op_origin == XCOL_SOURCE_RECV_OP) |
520 | - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); |
521 | - else |
522 | - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); |
523 | + switch (xop->op_origin) { |
524 | + case XCOL_SOURCE_RECV_OP: |
525 | + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, |
526 | + xop->dst_tid_wwn, |
527 | + &xop->dst_dev, |
528 | + &xop->remote_lun_ref); |
529 | + break; |
530 | + case XCOL_DEST_RECV_OP: |
531 | + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, |
532 | + xop->src_tid_wwn, |
533 | + &xop->src_dev, |
534 | + &xop->remote_lun_ref); |
535 | + break; |
536 | + default: |
537 | + pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " |
538 | + "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); |
539 | + rc = -EINVAL; |
540 | + break; |
541 | + } |
542 | /* |
543 | * If a matching IEEE NAA 0x83 descriptor for the requested device |
544 | * is not located on this node, return COPY_ABORTED with ASQ/ASQC |
545 | @@ -261,7 +304,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
546 | pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", |
547 | xop->dst_dev, &xop->dst_tid_wwn[0]); |
548 | |
549 | - return ret; |
550 | + return cscd_index; |
551 | |
552 | out: |
553 | return -EINVAL; |
554 | @@ -305,17 +348,26 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op |
555 | |
556 | static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, |
557 | struct xcopy_op *xop, unsigned char *p, |
558 | - unsigned int sdll) |
559 | + unsigned int sdll, sense_reason_t *sense_ret) |
560 | { |
561 | unsigned char *desc = p; |
562 | unsigned int start = 0; |
563 | int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; |
564 | |
565 | + *sense_ret = TCM_INVALID_PARAMETER_LIST; |
566 | + |
567 | if (offset != 0) { |
568 | pr_err("XCOPY segment descriptor list length is not" |
569 | " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); |
570 | return -EINVAL; |
571 | } |
572 | + if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { |
573 | + pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" |
574 | + " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); |
575 | + /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ |
576 | + *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; |
577 | + return -EINVAL; |
578 | + } |
579 | |
580 | while (start < sdll) { |
581 | /* |
582 | @@ -372,18 +424,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) |
583 | |
584 | static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) |
585 | { |
586 | - struct se_device *remote_dev; |
587 | - |
588 | if (xop->op_origin == XCOL_SOURCE_RECV_OP) |
589 | - remote_dev = xop->dst_dev; |
590 | + pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); |
591 | else |
592 | - remote_dev = xop->src_dev; |
593 | + pr_debug("putting src lun_ref for %p\n", xop->src_dev); |
594 | |
595 | - pr_debug("Calling configfs_undepend_item for" |
596 | - " remote_dev: %p remote_dev->dev_group: %p\n", |
597 | - remote_dev, &remote_dev->dev_group.cg_item); |
598 | - |
599 | - target_undepend_item(&remote_dev->dev_group.cg_item); |
600 | + percpu_ref_put(xop->remote_lun_ref); |
601 | } |
602 | |
603 | static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) |
604 | @@ -893,6 +939,20 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) |
605 | " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, |
606 | tdll, sdll, inline_dl); |
607 | |
608 | + /* |
609 | + * skip over the target descriptors until segment descriptors |
610 | + * have been passed - CSCD ids are needed to determine src and dest. |
611 | + */ |
612 | + seg_desc = &p[16] + tdll; |
613 | + |
614 | + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, |
615 | + sdll, &ret); |
616 | + if (rc <= 0) |
617 | + goto out; |
618 | + |
619 | + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, |
620 | + rc * XCOPY_SEGMENT_DESC_LEN); |
621 | + |
622 | rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); |
623 | if (rc <= 0) |
624 | goto out; |
625 | @@ -910,18 +970,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) |
626 | |
627 | pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, |
628 | rc * XCOPY_TARGET_DESC_LEN); |
629 | - seg_desc = &p[16]; |
630 | - seg_desc += (rc * XCOPY_TARGET_DESC_LEN); |
631 | - |
632 | - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); |
633 | - if (rc <= 0) { |
634 | - xcopy_pt_undepend_remotedev(xop); |
635 | - goto out; |
636 | - } |
637 | transport_kunmap_data_sg(se_cmd); |
638 | |
639 | - pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, |
640 | - rc * XCOPY_SEGMENT_DESC_LEN); |
641 | INIT_WORK(&xop->xop_work, target_xcopy_do_work); |
642 | queue_work(xcopy_wq, &xop->xop_work); |
643 | return TCM_NO_SENSE; |
644 | diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h |
645 | index 700a981c7b415..7db8d0c9223f8 100644 |
646 | --- a/drivers/target/target_core_xcopy.h |
647 | +++ b/drivers/target/target_core_xcopy.h |
648 | @@ -19,6 +19,7 @@ struct xcopy_op { |
649 | struct se_device *dst_dev; |
650 | unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; |
651 | unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; |
652 | + struct percpu_ref *remote_lun_ref; |
653 | |
654 | sector_t src_lba; |
655 | sector_t dst_lba; |
656 | diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c |
657 | index 9213a9e046ae0..99caaae01caba 100644 |
658 | --- a/fs/ubifs/io.c |
659 | +++ b/fs/ubifs/io.c |
660 | @@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) |
661 | { |
662 | uint32_t crc; |
663 | |
664 | - ubifs_assert(pad >= 0 && !(pad & 7)); |
665 | + ubifs_assert(pad >= 0); |
666 | |
667 | if (pad >= UBIFS_PAD_NODE_SZ) { |
668 | struct ubifs_ch *ch = buf; |
669 | @@ -721,6 +721,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) |
670 | * write-buffer. |
671 | */ |
672 | memcpy(wbuf->buf + wbuf->used, buf, len); |
673 | + if (aligned_len > len) { |
674 | + ubifs_assert(aligned_len - len < 8); |
675 | + ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); |
676 | + } |
677 | |
678 | if (aligned_len == wbuf->avail) { |
679 | dbg_io("flush jhead %s wbuf to LEB %d:%d", |
680 | @@ -813,13 +817,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) |
681 | } |
682 | |
683 | spin_lock(&wbuf->lock); |
684 | - if (aligned_len) |
685 | + if (aligned_len) { |
686 | /* |
687 | * And now we have what's left and what does not take whole |
688 | * max. write unit, so write it to the write-buffer and we are |
689 | * done. |
690 | */ |
691 | memcpy(wbuf->buf, buf + written, len); |
692 | + if (aligned_len > len) { |
693 | + ubifs_assert(aligned_len - len < 8); |
694 | + ubifs_pad(c, wbuf->buf + len, aligned_len - len); |
695 | + } |
696 | + } |
697 | |
698 | if (c->leb_size - wbuf->offs >= c->max_write_size) |
699 | wbuf->size = c->max_write_size; |
700 | diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h |
701 | index 4fdb1d9848444..36198563fb8bc 100644 |
702 | --- a/include/asm-generic/vmlinux.lds.h |
703 | +++ b/include/asm-generic/vmlinux.lds.h |
704 | @@ -460,7 +460,10 @@ |
705 | */ |
706 | #define TEXT_TEXT \ |
707 | ALIGN_FUNCTION(); \ |
708 | - *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ |
709 | + *(.text.hot .text.hot.*) \ |
710 | + *(TEXT_MAIN .text.fixup) \ |
711 | + *(.text.unlikely .text.unlikely.*) \ |
712 | + *(.text.unknown .text.unknown.*) \ |
713 | *(.ref.text) \ |
714 | MEM_KEEP(init.text) \ |
715 | MEM_KEEP(exit.text) \ |
716 | diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
717 | index 30f99ce4c6cea..8a70d38f13329 100644 |
718 | --- a/include/target/target_core_base.h |
719 | +++ b/include/target/target_core_base.h |
720 | @@ -178,6 +178,10 @@ enum tcm_sense_reason_table { |
721 | TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), |
722 | TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), |
723 | TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), |
724 | + TCM_TOO_MANY_TARGET_DESCS = R(0x19), |
725 | + TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a), |
726 | + TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), |
727 | + TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), |
728 | #undef R |
729 | }; |
730 | |
731 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
732 | index a4c4234976862..026f4525063c1 100644 |
733 | --- a/net/core/skbuff.c |
734 | +++ b/net/core/skbuff.c |
735 | @@ -1592,6 +1592,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) |
736 | skb->csum = csum_block_sub(skb->csum, |
737 | skb_checksum(skb, len, delta, 0), |
738 | len); |
739 | + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
740 | + int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; |
741 | + int offset = skb_checksum_start_offset(skb) + skb->csum_offset; |
742 | + |
743 | + if (offset + sizeof(__sum16) > hdlen) |
744 | + return -EINVAL; |
745 | } |
746 | return __pskb_trim(skb, len); |
747 | } |
748 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
749 | index c37e9598262e5..3164bae4024a4 100644 |
750 | --- a/net/ipv4/ip_output.c |
751 | +++ b/net/ipv4/ip_output.c |
752 | @@ -300,7 +300,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk |
753 | if (skb_is_gso(skb)) |
754 | return ip_finish_output_gso(net, sk, skb, mtu); |
755 | |
756 | - if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) |
757 | + if (skb->len > mtu || IPCB(skb)->frag_max_size) |
758 | return ip_fragment(net, sk, skb, mtu, ip_finish_output2); |
759 | |
760 | return ip_finish_output2(net, sk, skb); |
761 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
762 | index 5f2e3334cccec..26e1dbc958189 100644 |
763 | --- a/net/ipv4/ip_tunnel.c |
764 | +++ b/net/ipv4/ip_tunnel.c |
765 | @@ -743,7 +743,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
766 | goto tx_error; |
767 | } |
768 | |
769 | - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { |
770 | + df = tnl_params->frag_off; |
771 | + if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) |
772 | + df |= (inner_iph->frag_off & htons(IP_DF)); |
773 | + |
774 | + if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) { |
775 | ip_rt_put(rt); |
776 | goto tx_error; |
777 | } |
778 | @@ -771,10 +775,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
779 | ttl = ip4_dst_hoplimit(&rt->dst); |
780 | } |
781 | |
782 | - df = tnl_params->frag_off; |
783 | - if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) |
784 | - df |= (inner_iph->frag_off&htons(IP_DF)); |
785 | - |
786 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) |
787 | + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); |
788 | if (max_headroom > dev->needed_headroom) |