Contents of /trunk/kernel-alx/patches-4.14/0151-4.14.52-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(show annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 64344 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 64344 byte(s)
-added up to patches-4.14.79
1 | diff --git a/Makefile b/Makefile |
2 | index a33376204c17..e2e4009bbfed 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 14 |
9 | -SUBLEVEL = 51 |
10 | +SUBLEVEL = 52 |
11 | EXTRAVERSION = |
12 | NAME = Petit Gorille |
13 | |
14 | diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c |
15 | index 18dd8f22e353..665d0f6cd62f 100644 |
16 | --- a/arch/x86/kernel/cpu/intel_rdt.c |
17 | +++ b/arch/x86/kernel/cpu/intel_rdt.c |
18 | @@ -773,6 +773,8 @@ static __init void rdt_quirks(void) |
19 | case INTEL_FAM6_SKYLAKE_X: |
20 | if (boot_cpu_data.x86_stepping <= 4) |
21 | set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); |
22 | + else |
23 | + set_rdt_options("!l3cat"); |
24 | } |
25 | } |
26 | |
27 | diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c |
28 | index 231ad23b24a9..8fec687b3e44 100644 |
29 | --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c |
30 | +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c |
31 | @@ -48,7 +48,7 @@ static struct dentry *dfs_inj; |
32 | |
33 | static u8 n_banks; |
34 | |
35 | -#define MAX_FLAG_OPT_SIZE 3 |
36 | +#define MAX_FLAG_OPT_SIZE 4 |
37 | #define NBCFG 0x44 |
38 | |
39 | enum injection_type { |
40 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
41 | index 74c35513ada5..49979c095f31 100644 |
42 | --- a/block/blk-mq.c |
43 | +++ b/block/blk-mq.c |
44 | @@ -2252,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) |
45 | |
46 | mutex_lock(&set->tag_list_lock); |
47 | list_del_rcu(&q->tag_set_list); |
48 | - INIT_LIST_HEAD(&q->tag_set_list); |
49 | if (list_is_singular(&set->tag_list)) { |
50 | /* just transitioned to unshared */ |
51 | set->flags &= ~BLK_MQ_F_TAG_SHARED; |
52 | @@ -2260,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) |
53 | blk_mq_update_tag_set_depth(set, false); |
54 | } |
55 | mutex_unlock(&set->tag_list_lock); |
56 | - |
57 | synchronize_rcu(); |
58 | + INIT_LIST_HEAD(&q->tag_set_list); |
59 | } |
60 | |
61 | static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, |
62 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
63 | index 71008dbabe98..cad2530a5b52 100644 |
64 | --- a/drivers/ata/libata-core.c |
65 | +++ b/drivers/ata/libata-core.c |
66 | @@ -4543,9 +4543,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
67 | ATA_HORKAGE_ZERO_AFTER_TRIM | |
68 | ATA_HORKAGE_NOLPM, }, |
69 | |
70 | - /* Sandisk devices which are known to not handle LPM well */ |
71 | - { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, |
72 | - |
73 | /* devices that don't properly handle queued TRIM commands */ |
74 | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
75 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
76 | diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c |
77 | index de4ddd0e8550..b3ed8f9953a8 100644 |
78 | --- a/drivers/ata/libata-zpodd.c |
79 | +++ b/drivers/ata/libata-zpodd.c |
80 | @@ -35,7 +35,7 @@ struct zpodd { |
81 | static int eject_tray(struct ata_device *dev) |
82 | { |
83 | struct ata_taskfile tf; |
84 | - static const char cdb[] = { GPCMD_START_STOP_UNIT, |
85 | + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT, |
86 | 0, 0, 0, |
87 | 0x02, /* LoEj */ |
88 | 0, 0, 0, 0, 0, 0, 0, |
89 | diff --git a/drivers/base/core.c b/drivers/base/core.c |
90 | index c8501cdb95f4..a359934ffd85 100644 |
91 | --- a/drivers/base/core.c |
92 | +++ b/drivers/base/core.c |
93 | @@ -1461,7 +1461,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) |
94 | |
95 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
96 | if (!dir) |
97 | - return NULL; |
98 | + return ERR_PTR(-ENOMEM); |
99 | |
100 | dir->class = class; |
101 | kobject_init(&dir->kobj, &class_dir_ktype); |
102 | @@ -1471,7 +1471,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) |
103 | retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); |
104 | if (retval < 0) { |
105 | kobject_put(&dir->kobj); |
106 | - return NULL; |
107 | + return ERR_PTR(retval); |
108 | } |
109 | return &dir->kobj; |
110 | } |
111 | @@ -1778,6 +1778,10 @@ int device_add(struct device *dev) |
112 | |
113 | parent = get_device(dev->parent); |
114 | kobj = get_device_parent(dev, parent); |
115 | + if (IS_ERR(kobj)) { |
116 | + error = PTR_ERR(kobj); |
117 | + goto parent_error; |
118 | + } |
119 | if (kobj) |
120 | dev->kobj.parent = kobj; |
121 | |
122 | @@ -1876,6 +1880,7 @@ int device_add(struct device *dev) |
123 | kobject_del(&dev->kobj); |
124 | Error: |
125 | cleanup_glue_dir(dev, glue_dir); |
126 | +parent_error: |
127 | put_device(parent); |
128 | name_error: |
129 | kfree(dev->p); |
130 | @@ -2695,6 +2700,11 @@ int device_move(struct device *dev, struct device *new_parent, |
131 | device_pm_lock(); |
132 | new_parent = get_device(new_parent); |
133 | new_parent_kobj = get_device_parent(dev, new_parent); |
134 | + if (IS_ERR(new_parent_kobj)) { |
135 | + error = PTR_ERR(new_parent_kobj); |
136 | + put_device(new_parent); |
137 | + goto out; |
138 | + } |
139 | |
140 | pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), |
141 | __func__, new_parent ? dev_name(new_parent) : "<NULL>"); |
142 | diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
143 | index 86258b00a1d4..6fb64e73bc96 100644 |
144 | --- a/drivers/block/nbd.c |
145 | +++ b/drivers/block/nbd.c |
146 | @@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = { |
147 | static void nbd_dev_remove(struct nbd_device *nbd) |
148 | { |
149 | struct gendisk *disk = nbd->disk; |
150 | + struct request_queue *q; |
151 | + |
152 | if (disk) { |
153 | + q = disk->queue; |
154 | del_gendisk(disk); |
155 | - blk_cleanup_queue(disk->queue); |
156 | + blk_cleanup_queue(q); |
157 | blk_mq_free_tag_set(&nbd->tag_set); |
158 | disk->private_data = NULL; |
159 | put_disk(disk); |
160 | @@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd) |
161 | static void nbd_size_update(struct nbd_device *nbd) |
162 | { |
163 | struct nbd_config *config = nbd->config; |
164 | + struct block_device *bdev = bdget_disk(nbd->disk, 0); |
165 | + |
166 | blk_queue_logical_block_size(nbd->disk->queue, config->blksize); |
167 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
168 | set_capacity(nbd->disk, config->bytesize >> 9); |
169 | + if (bdev) { |
170 | + if (bdev->bd_disk) |
171 | + bd_set_size(bdev, config->bytesize); |
172 | + else |
173 | + bdev->bd_invalidated = 1; |
174 | + bdput(bdev); |
175 | + } |
176 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); |
177 | } |
178 | |
179 | @@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, |
180 | struct nbd_config *config = nbd->config; |
181 | config->blksize = blocksize; |
182 | config->bytesize = blocksize * nr_blocks; |
183 | + if (nbd->task_recv != NULL) |
184 | + nbd_size_update(nbd); |
185 | } |
186 | |
187 | static void nbd_complete_rq(struct request *req) |
188 | @@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b |
189 | if (ret) |
190 | return ret; |
191 | |
192 | - bd_set_size(bdev, config->bytesize); |
193 | if (max_part) |
194 | bdev->bd_invalidated = 1; |
195 | mutex_unlock(&nbd->config_lock); |
196 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
197 | index 789fc3a8289f..93754300cb57 100644 |
198 | --- a/drivers/cpufreq/cpufreq.c |
199 | +++ b/drivers/cpufreq/cpufreq.c |
200 | @@ -693,6 +693,8 @@ static ssize_t store_##file_name \ |
201 | struct cpufreq_policy new_policy; \ |
202 | \ |
203 | memcpy(&new_policy, policy, sizeof(*policy)); \ |
204 | + new_policy.min = policy->user_policy.min; \ |
205 | + new_policy.max = policy->user_policy.max; \ |
206 | \ |
207 | ret = sscanf(buf, "%u", &new_policy.object); \ |
208 | if (ret != 1) \ |
209 | diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c |
210 | index ca38229b045a..43e14bb512c8 100644 |
211 | --- a/drivers/cpufreq/cpufreq_governor.c |
212 | +++ b/drivers/cpufreq/cpufreq_governor.c |
213 | @@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) |
214 | * calls, so the previous load value can be used then. |
215 | */ |
216 | load = j_cdbs->prev_load; |
217 | - } else if (unlikely(time_elapsed > 2 * sampling_rate && |
218 | + } else if (unlikely((int)idle_time > 2 * sampling_rate && |
219 | j_cdbs->prev_load)) { |
220 | /* |
221 | * If the CPU had gone completely idle and a task has |
222 | @@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) |
223 | * clear prev_load to guarantee that the load will be |
224 | * computed again next time. |
225 | * |
226 | - * Detecting this situation is easy: the governor's |
227 | - * utilization update handler would not have run during |
228 | - * CPU-idle periods. Hence, an unusually large |
229 | - * 'time_elapsed' (as compared to the sampling rate) |
230 | + * Detecting this situation is easy: an unusually large |
231 | + * 'idle_time' (as compared to the sampling rate) |
232 | * indicates this scenario. |
233 | */ |
234 | load = j_cdbs->prev_load; |
235 | @@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) |
236 | j_cdbs->prev_load = load; |
237 | } |
238 | |
239 | - if (time_elapsed > 2 * sampling_rate) { |
240 | - unsigned int periods = time_elapsed / sampling_rate; |
241 | + if (unlikely((int)idle_time > 2 * sampling_rate)) { |
242 | + unsigned int periods = idle_time / sampling_rate; |
243 | |
244 | if (periods < idle_periods) |
245 | idle_periods = periods; |
246 | diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
247 | index 20d824f74f99..90d7be08fea0 100644 |
248 | --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
249 | +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c |
250 | @@ -204,8 +204,7 @@ static void ish_remove(struct pci_dev *pdev) |
251 | kfree(ishtp_dev); |
252 | } |
253 | |
254 | -#ifdef CONFIG_PM |
255 | -static struct device *ish_resume_device; |
256 | +static struct device __maybe_unused *ish_resume_device; |
257 | |
258 | /* 50ms to get resume response */ |
259 | #define WAIT_FOR_RESUME_ACK_MS 50 |
260 | @@ -219,7 +218,7 @@ static struct device *ish_resume_device; |
261 | * in that case a simple resume message is enough, others we need |
262 | * a reset sequence. |
263 | */ |
264 | -static void ish_resume_handler(struct work_struct *work) |
265 | +static void __maybe_unused ish_resume_handler(struct work_struct *work) |
266 | { |
267 | struct pci_dev *pdev = to_pci_dev(ish_resume_device); |
268 | struct ishtp_device *dev = pci_get_drvdata(pdev); |
269 | @@ -261,7 +260,7 @@ static void ish_resume_handler(struct work_struct *work) |
270 | * |
271 | * Return: 0 to the pm core |
272 | */ |
273 | -static int ish_suspend(struct device *device) |
274 | +static int __maybe_unused ish_suspend(struct device *device) |
275 | { |
276 | struct pci_dev *pdev = to_pci_dev(device); |
277 | struct ishtp_device *dev = pci_get_drvdata(pdev); |
278 | @@ -287,7 +286,7 @@ static int ish_suspend(struct device *device) |
279 | return 0; |
280 | } |
281 | |
282 | -static DECLARE_WORK(resume_work, ish_resume_handler); |
283 | +static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler); |
284 | /** |
285 | * ish_resume() - ISH resume callback |
286 | * @device: device pointer |
287 | @@ -296,7 +295,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler); |
288 | * |
289 | * Return: 0 to the pm core |
290 | */ |
291 | -static int ish_resume(struct device *device) |
292 | +static int __maybe_unused ish_resume(struct device *device) |
293 | { |
294 | struct pci_dev *pdev = to_pci_dev(device); |
295 | struct ishtp_device *dev = pci_get_drvdata(pdev); |
296 | @@ -310,21 +309,14 @@ static int ish_resume(struct device *device) |
297 | return 0; |
298 | } |
299 | |
300 | -static const struct dev_pm_ops ish_pm_ops = { |
301 | - .suspend = ish_suspend, |
302 | - .resume = ish_resume, |
303 | -}; |
304 | -#define ISHTP_ISH_PM_OPS (&ish_pm_ops) |
305 | -#else |
306 | -#define ISHTP_ISH_PM_OPS NULL |
307 | -#endif /* CONFIG_PM */ |
308 | +static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume); |
309 | |
310 | static struct pci_driver ish_driver = { |
311 | .name = KBUILD_MODNAME, |
312 | .id_table = ish_pci_tbl, |
313 | .probe = ish_probe, |
314 | .remove = ish_remove, |
315 | - .driver.pm = ISHTP_ISH_PM_OPS, |
316 | + .driver.pm = &ish_pm_ops, |
317 | }; |
318 | |
319 | module_pci_driver(ish_driver); |
320 | diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
321 | index 69afd7968d9c..18d5b99d13f1 100644 |
322 | --- a/drivers/hid/wacom_sys.c |
323 | +++ b/drivers/hid/wacom_sys.c |
324 | @@ -284,6 +284,14 @@ static void wacom_usage_mapping(struct hid_device *hdev, |
325 | } |
326 | } |
327 | |
328 | + /* 2nd-generation Intuos Pro Large has incorrect Y maximum */ |
329 | + if (hdev->vendor == USB_VENDOR_ID_WACOM && |
330 | + hdev->product == 0x0358 && |
331 | + WACOM_PEN_FIELD(field) && |
332 | + wacom_equivalent_usage(usage->hid) == HID_GD_Y) { |
333 | + field->logical_maximum = 43200; |
334 | + } |
335 | + |
336 | switch (usage->hid) { |
337 | case HID_GD_X: |
338 | features->x_max = field->logical_maximum; |
339 | diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c |
340 | index 5931aa2fe997..61084ba69a99 100644 |
341 | --- a/drivers/net/bonding/bond_options.c |
342 | +++ b/drivers/net/bonding/bond_options.c |
343 | @@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond, |
344 | slave->dev->name); |
345 | rcu_assign_pointer(bond->primary_slave, slave); |
346 | strcpy(bond->params.primary, slave->dev->name); |
347 | + bond->force_primary = true; |
348 | bond_select_active_slave(bond); |
349 | goto out; |
350 | } |
351 | diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
352 | index 3a7241c8713c..6890478a0851 100644 |
353 | --- a/drivers/net/hyperv/netvsc_drv.c |
354 | +++ b/drivers/net/hyperv/netvsc_drv.c |
355 | @@ -123,8 +123,10 @@ static int netvsc_open(struct net_device *net) |
356 | } |
357 | |
358 | rdev = nvdev->extension; |
359 | - if (!rdev->link_state) |
360 | + if (!rdev->link_state) { |
361 | netif_carrier_on(net); |
362 | + netif_tx_wake_all_queues(net); |
363 | + } |
364 | |
365 | if (vf_netdev) { |
366 | /* Setting synthetic device up transparently sets |
367 | diff --git a/drivers/net/tap.c b/drivers/net/tap.c |
368 | index bfd4ded0a53f..773a3fea8f0e 100644 |
369 | --- a/drivers/net/tap.c |
370 | +++ b/drivers/net/tap.c |
371 | @@ -777,13 +777,16 @@ static ssize_t tap_put_user(struct tap_queue *q, |
372 | int total; |
373 | |
374 | if (q->flags & IFF_VNET_HDR) { |
375 | + int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; |
376 | struct virtio_net_hdr vnet_hdr; |
377 | + |
378 | vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
379 | if (iov_iter_count(iter) < vnet_hdr_len) |
380 | return -EINVAL; |
381 | |
382 | if (virtio_net_hdr_from_skb(skb, &vnet_hdr, |
383 | - tap_is_little_endian(q), true)) |
384 | + tap_is_little_endian(q), true, |
385 | + vlan_hlen)) |
386 | BUG(); |
387 | |
388 | if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != |
389 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
390 | index 3d9ad11e4f28..cb17ffadfc30 100644 |
391 | --- a/drivers/net/tun.c |
392 | +++ b/drivers/net/tun.c |
393 | @@ -1648,7 +1648,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
394 | return -EINVAL; |
395 | |
396 | if (virtio_net_hdr_from_skb(skb, &gso, |
397 | - tun_is_little_endian(tun), true)) { |
398 | + tun_is_little_endian(tun), true, |
399 | + vlan_hlen)) { |
400 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
401 | pr_err("unexpected GSO type: " |
402 | "0x%x, gso_size %d, hdr_len %d\n", |
403 | diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
404 | index 9e1b74590682..f5316ab68a0a 100644 |
405 | --- a/drivers/net/usb/cdc_ncm.c |
406 | +++ b/drivers/net/usb/cdc_ncm.c |
407 | @@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
408 | * accordingly. Otherwise, we should check here. |
409 | */ |
410 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) |
411 | - delayed_ndp_size = ctx->max_ndp_size; |
412 | + delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); |
413 | else |
414 | delayed_ndp_size = 0; |
415 | |
416 | @@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
417 | /* If requested, put NDP at end of frame. */ |
418 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { |
419 | nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; |
420 | - cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size); |
421 | + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size); |
422 | nth16->wNdpIndex = cpu_to_le16(skb_out->len); |
423 | skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size); |
424 | |
425 | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
426 | index 9e93e7a5df7e..910c46b47769 100644 |
427 | --- a/drivers/net/virtio_net.c |
428 | +++ b/drivers/net/virtio_net.c |
429 | @@ -1237,7 +1237,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
430 | hdr = skb_vnet_hdr(skb); |
431 | |
432 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
433 | - virtio_is_little_endian(vi->vdev), false)) |
434 | + virtio_is_little_endian(vi->vdev), false, |
435 | + 0)) |
436 | BUG(); |
437 | |
438 | if (vi->mergeable_rx_bufs) |
439 | diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c |
440 | index 1610722b8099..747eef82cefd 100644 |
441 | --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c |
442 | +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c |
443 | @@ -8,6 +8,7 @@ |
444 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
445 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
446 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
447 | + * Copyright(c) 2018 Intel Corporation |
448 | * |
449 | * This program is free software; you can redistribute it and/or modify |
450 | * it under the terms of version 2 of the GNU General Public License as |
451 | @@ -30,6 +31,7 @@ |
452 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
453 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
454 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
455 | + * Copyright(c) 2018 Intel Corporation |
456 | * All rights reserved. |
457 | * |
458 | * Redistribution and use in source and binary forms, with or without |
459 | @@ -174,7 +176,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, |
460 | static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, |
461 | const struct fw_img *image) |
462 | { |
463 | - int sec_idx, idx; |
464 | + int sec_idx, idx, ret; |
465 | u32 offset = 0; |
466 | |
467 | /* |
468 | @@ -201,17 +203,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, |
469 | */ |
470 | if (sec_idx >= image->num_sec - 1) { |
471 | IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); |
472 | - iwl_free_fw_paging(fwrt); |
473 | - return -EINVAL; |
474 | + ret = -EINVAL; |
475 | + goto err; |
476 | } |
477 | |
478 | /* copy the CSS block to the dram */ |
479 | IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", |
480 | sec_idx); |
481 | |
482 | + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { |
483 | + IWL_ERR(fwrt, "CSS block is larger than paging size\n"); |
484 | + ret = -EINVAL; |
485 | + goto err; |
486 | + } |
487 | + |
488 | memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), |
489 | image->sec[sec_idx].data, |
490 | - fwrt->fw_paging_db[0].fw_paging_size); |
491 | + image->sec[sec_idx].len); |
492 | dma_sync_single_for_device(fwrt->trans->dev, |
493 | fwrt->fw_paging_db[0].fw_paging_phys, |
494 | fwrt->fw_paging_db[0].fw_paging_size, |
495 | @@ -232,6 +240,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, |
496 | for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { |
497 | struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; |
498 | |
499 | + if (block->fw_paging_size > image->sec[sec_idx].len - offset) { |
500 | + IWL_ERR(fwrt, |
501 | + "Paging: paging size is larger than remaining data in block %d\n", |
502 | + idx); |
503 | + ret = -EINVAL; |
504 | + goto err; |
505 | + } |
506 | + |
507 | memcpy(page_address(block->fw_paging_block), |
508 | image->sec[sec_idx].data + offset, |
509 | block->fw_paging_size); |
510 | @@ -242,19 +258,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, |
511 | |
512 | IWL_DEBUG_FW(fwrt, |
513 | "Paging: copied %d paging bytes to block %d\n", |
514 | - fwrt->fw_paging_db[idx].fw_paging_size, |
515 | - idx); |
516 | + block->fw_paging_size, idx); |
517 | + |
518 | + offset += block->fw_paging_size; |
519 | |
520 | - offset += fwrt->fw_paging_db[idx].fw_paging_size; |
521 | + if (offset > image->sec[sec_idx].len) { |
522 | + IWL_ERR(fwrt, |
523 | + "Paging: offset goes over section size\n"); |
524 | + ret = -EINVAL; |
525 | + goto err; |
526 | + } |
527 | } |
528 | |
529 | /* copy the last paging block */ |
530 | if (fwrt->num_of_pages_in_last_blk > 0) { |
531 | struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; |
532 | |
533 | + if (image->sec[sec_idx].len - offset > block->fw_paging_size) { |
534 | + IWL_ERR(fwrt, |
535 | + "Paging: last block is larger than paging size\n"); |
536 | + ret = -EINVAL; |
537 | + goto err; |
538 | + } |
539 | + |
540 | memcpy(page_address(block->fw_paging_block), |
541 | image->sec[sec_idx].data + offset, |
542 | - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); |
543 | + image->sec[sec_idx].len - offset); |
544 | dma_sync_single_for_device(fwrt->trans->dev, |
545 | block->fw_paging_phys, |
546 | block->fw_paging_size, |
547 | @@ -266,6 +295,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, |
548 | } |
549 | |
550 | return 0; |
551 | + |
552 | +err: |
553 | + iwl_free_fw_paging(fwrt); |
554 | + return ret; |
555 | } |
556 | |
557 | static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, |
558 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
559 | index 50e48afd88ff..244e5256c526 100644 |
560 | --- a/drivers/vhost/vhost.c |
561 | +++ b/drivers/vhost/vhost.c |
562 | @@ -2382,6 +2382,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) |
563 | struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); |
564 | if (!node) |
565 | return NULL; |
566 | + |
567 | + /* Make sure all padding within the structure is initialized. */ |
568 | + memset(&node->msg, 0, sizeof node->msg); |
569 | node->vq = vq; |
570 | node->msg.type = type; |
571 | return node; |
572 | diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c |
573 | index 74f2e6e6202a..8851d441e5fd 100644 |
574 | --- a/drivers/w1/masters/mxc_w1.c |
575 | +++ b/drivers/w1/masters/mxc_w1.c |
576 | @@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev) |
577 | if (IS_ERR(mdev->clk)) |
578 | return PTR_ERR(mdev->clk); |
579 | |
580 | + err = clk_prepare_enable(mdev->clk); |
581 | + if (err) |
582 | + return err; |
583 | + |
584 | clkrate = clk_get_rate(mdev->clk); |
585 | if (clkrate < 10000000) |
586 | dev_warn(&pdev->dev, |
587 | @@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev) |
588 | |
589 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
590 | mdev->regs = devm_ioremap_resource(&pdev->dev, res); |
591 | - if (IS_ERR(mdev->regs)) |
592 | - return PTR_ERR(mdev->regs); |
593 | - |
594 | - err = clk_prepare_enable(mdev->clk); |
595 | - if (err) |
596 | - return err; |
597 | + if (IS_ERR(mdev->regs)) { |
598 | + err = PTR_ERR(mdev->regs); |
599 | + goto out_disable_clk; |
600 | + } |
601 | |
602 | /* Software reset 1-Wire module */ |
603 | writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET); |
604 | @@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev) |
605 | |
606 | err = w1_add_master_device(&mdev->bus_master); |
607 | if (err) |
608 | - clk_disable_unprepare(mdev->clk); |
609 | + goto out_disable_clk; |
610 | |
611 | + return 0; |
612 | + |
613 | +out_disable_clk: |
614 | + clk_disable_unprepare(mdev->clk); |
615 | return err; |
616 | } |
617 | |
618 | diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c |
619 | index a7c5a9861bef..8311e8ed76de 100644 |
620 | --- a/fs/binfmt_misc.c |
621 | +++ b/fs/binfmt_misc.c |
622 | @@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count) |
623 | s = strchr(p, del); |
624 | if (!s) |
625 | goto einval; |
626 | - *s++ = '\0'; |
627 | - e->offset = simple_strtoul(p, &p, 10); |
628 | + *s = '\0'; |
629 | + if (p != s) { |
630 | + int r = kstrtoint(p, 10, &e->offset); |
631 | + if (r != 0 || e->offset < 0) |
632 | + goto einval; |
633 | + } |
634 | + p = s; |
635 | if (*p++) |
636 | goto einval; |
637 | pr_debug("register: offset: %#x\n", e->offset); |
638 | @@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count) |
639 | if (e->mask && |
640 | string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size) |
641 | goto einval; |
642 | - if (e->size + e->offset > BINPRM_BUF_SIZE) |
643 | + if (e->size > BINPRM_BUF_SIZE || |
644 | + BINPRM_BUF_SIZE - e->size < e->offset) |
645 | goto einval; |
646 | pr_debug("register: magic/mask length: %i\n", e->size); |
647 | if (USE_DEBUG) { |
648 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
649 | index 8ecbac3b862e..3a07900971c3 100644 |
650 | --- a/fs/btrfs/inode.c |
651 | +++ b/fs/btrfs/inode.c |
652 | @@ -1027,8 +1027,10 @@ static noinline int cow_file_range(struct inode *inode, |
653 | ram_size, /* ram_bytes */ |
654 | BTRFS_COMPRESS_NONE, /* compress_type */ |
655 | BTRFS_ORDERED_REGULAR /* type */); |
656 | - if (IS_ERR(em)) |
657 | + if (IS_ERR(em)) { |
658 | + ret = PTR_ERR(em); |
659 | goto out_reserve; |
660 | + } |
661 | free_extent_map(em); |
662 | |
663 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
664 | diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c |
665 | index 2763f3184ac5..7303ba108112 100644 |
666 | --- a/fs/btrfs/ioctl.c |
667 | +++ b/fs/btrfs/ioctl.c |
668 | @@ -2682,8 +2682,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) |
669 | } |
670 | |
671 | /* Check for compatibility reject unknown flags */ |
672 | - if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) |
673 | - return -EOPNOTSUPP; |
674 | + if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) { |
675 | + ret = -EOPNOTSUPP; |
676 | + goto out; |
677 | + } |
678 | |
679 | if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { |
680 | ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; |
681 | @@ -3861,11 +3863,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, |
682 | src->i_sb != inode->i_sb) |
683 | return -EXDEV; |
684 | |
685 | - /* don't make the dst file partly checksummed */ |
686 | - if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != |
687 | - (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) |
688 | - return -EINVAL; |
689 | - |
690 | if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) |
691 | return -EISDIR; |
692 | |
693 | @@ -3875,6 +3872,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, |
694 | inode_lock(src); |
695 | } |
696 | |
697 | + /* don't make the dst file partly checksummed */ |
698 | + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != |
699 | + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
700 | + ret = -EINVAL; |
701 | + goto out_unlock; |
702 | + } |
703 | + |
704 | /* determine range to clone */ |
705 | ret = -EINVAL; |
706 | if (off + len > src->i_size || off + len < off) |
707 | diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c |
708 | index 24613b4e224c..936d58ca2b49 100644 |
709 | --- a/fs/btrfs/scrub.c |
710 | +++ b/fs/btrfs/scrub.c |
711 | @@ -2775,7 +2775,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, |
712 | have_csum = scrub_find_csum(sctx, logical, csum); |
713 | if (have_csum == 0) |
714 | ++sctx->stat.no_csum; |
715 | - if (sctx->is_dev_replace && !have_csum) { |
716 | + if (0 && sctx->is_dev_replace && !have_csum) { |
717 | ret = copy_nocow_pages(sctx, logical, l, |
718 | mirror_num, |
719 | physical_for_dev_replace); |
720 | diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h |
721 | index 4f3884835267..dd95a6fa24bf 100644 |
722 | --- a/fs/cifs/cifsacl.h |
723 | +++ b/fs/cifs/cifsacl.h |
724 | @@ -98,4 +98,18 @@ struct cifs_ace { |
725 | struct cifs_sid sid; /* ie UUID of user or group who gets these perms */ |
726 | } __attribute__((packed)); |
727 | |
728 | +/* |
729 | + * Minimum security identifier can be one for system defined Users |
730 | + * and Groups such as NULL SID and World or Built-in accounts such |
731 | + * as Administrator and Guest and consists of |
732 | + * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority) |
733 | + */ |
734 | +#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */ |
735 | + |
736 | +/* |
737 | + * Minimum security descriptor can be one without any SACL and DACL and can |
738 | + * consist of revision, type, and two sids of minimum size for owner and group |
739 | + */ |
740 | +#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN)) |
741 | + |
742 | #endif /* _CIFSACL_H */ |
743 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
744 | index 839327f75e3d..36bc9a7eb8ea 100644 |
745 | --- a/fs/cifs/smb2ops.c |
746 | +++ b/fs/cifs/smb2ops.c |
747 | @@ -1256,10 +1256,11 @@ smb2_is_session_expired(char *buf) |
748 | { |
749 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); |
750 | |
751 | - if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED) |
752 | + if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED && |
753 | + shdr->Status != STATUS_USER_SESSION_DELETED) |
754 | return false; |
755 | |
756 | - cifs_dbg(FYI, "Session expired\n"); |
757 | + cifs_dbg(FYI, "Session expired or deleted\n"); |
758 | return true; |
759 | } |
760 | |
761 | @@ -1571,8 +1572,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, |
762 | oparms.create_options = 0; |
763 | |
764 | utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); |
765 | - if (!utf16_path) |
766 | - return ERR_PTR(-ENOMEM); |
767 | + if (!utf16_path) { |
768 | + rc = -ENOMEM; |
769 | + free_xid(xid); |
770 | + return ERR_PTR(rc); |
771 | + } |
772 | |
773 | oparms.tcon = tcon; |
774 | oparms.desired_access = READ_CONTROL; |
775 | @@ -1630,8 +1634,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen, |
776 | access_flags = WRITE_DAC; |
777 | |
778 | utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); |
779 | - if (!utf16_path) |
780 | - return -ENOMEM; |
781 | + if (!utf16_path) { |
782 | + rc = -ENOMEM; |
783 | + free_xid(xid); |
784 | + return rc; |
785 | + } |
786 | |
787 | oparms.tcon = tcon; |
788 | oparms.desired_access = access_flags; |
789 | @@ -1691,15 +1698,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, |
790 | |
791 | /* if file not oplocked can't be sure whether asking to extend size */ |
792 | if (!CIFS_CACHE_READ(cifsi)) |
793 | - if (keep_size == false) |
794 | - return -EOPNOTSUPP; |
795 | + if (keep_size == false) { |
796 | + rc = -EOPNOTSUPP; |
797 | + free_xid(xid); |
798 | + return rc; |
799 | + } |
800 | |
801 | /* |
802 | * Must check if file sparse since fallocate -z (zero range) assumes |
803 | * non-sparse allocation |
804 | */ |
805 | - if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) |
806 | - return -EOPNOTSUPP; |
807 | + if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) { |
808 | + rc = -EOPNOTSUPP; |
809 | + free_xid(xid); |
810 | + return rc; |
811 | + } |
812 | |
813 | /* |
814 | * need to make sure we are not asked to extend the file since the SMB3 |
815 | @@ -1708,8 +1721,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, |
816 | * which for a non sparse file would zero the newly extended range |
817 | */ |
818 | if (keep_size == false) |
819 | - if (i_size_read(inode) < offset + len) |
820 | - return -EOPNOTSUPP; |
821 | + if (i_size_read(inode) < offset + len) { |
822 | + rc = -EOPNOTSUPP; |
823 | + free_xid(xid); |
824 | + return rc; |
825 | + } |
826 | |
827 | cifs_dbg(FYI, "offset %lld len %lld", offset, len); |
828 | |
829 | @@ -1743,8 +1759,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, |
830 | |
831 | /* Need to make file sparse, if not already, before freeing range. */ |
832 | /* Consider adding equivalent for compressed since it could also work */ |
833 | - if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) |
834 | - return -EOPNOTSUPP; |
835 | + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { |
836 | + rc = -EOPNOTSUPP; |
837 | + free_xid(xid); |
838 | + return rc; |
839 | + } |
840 | |
841 | cifs_dbg(FYI, "offset %lld len %lld", offset, len); |
842 | |
843 | @@ -1776,8 +1795,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, |
844 | |
845 | /* if file not oplocked can't be sure whether asking to extend size */ |
846 | if (!CIFS_CACHE_READ(cifsi)) |
847 | - if (keep_size == false) |
848 | - return -EOPNOTSUPP; |
849 | + if (keep_size == false) { |
850 | + free_xid(xid); |
851 | + return rc; |
852 | + } |
853 | |
854 | /* |
855 | * Files are non-sparse by default so falloc may be a no-op |
856 | @@ -1786,14 +1807,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, |
857 | */ |
858 | if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) { |
859 | if (keep_size == true) |
860 | - return 0; |
861 | + rc = 0; |
862 | /* check if extending file */ |
863 | else if (i_size_read(inode) >= off + len) |
864 | /* not extending file and already not sparse */ |
865 | - return 0; |
866 | + rc = 0; |
867 | /* BB: in future add else clause to extend file */ |
868 | else |
869 | - return -EOPNOTSUPP; |
870 | + rc = -EOPNOTSUPP; |
871 | + free_xid(xid); |
872 | + return rc; |
873 | } |
874 | |
875 | if ((keep_size == true) || (i_size_read(inode) >= off + len)) { |
876 | @@ -1805,8 +1828,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, |
877 | * ie potentially making a few extra pages at the beginning |
878 | * or end of the file non-sparse via set_sparse is harmless. |
879 | */ |
880 | - if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) |
881 | - return -EOPNOTSUPP; |
882 | + if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) { |
883 | + rc = -EOPNOTSUPP; |
884 | + free_xid(xid); |
885 | + return rc; |
886 | + } |
887 | |
888 | rc = smb2_set_sparse(xid, tcon, cfile, inode, false); |
889 | } |
890 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
891 | index 49779d952cd5..5247b40e57f6 100644 |
892 | --- a/fs/cifs/smb2pdu.c |
893 | +++ b/fs/cifs/smb2pdu.c |
894 | @@ -1182,6 +1182,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, |
895 | sess_data->ses = ses; |
896 | sess_data->buf0_type = CIFS_NO_BUFFER; |
897 | sess_data->nls_cp = (struct nls_table *) nls_cp; |
898 | + sess_data->previous_session = ses->Suid; |
899 | |
900 | while (sess_data->func) |
901 | sess_data->func(sess_data); |
902 | @@ -2278,8 +2279,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, |
903 | |
904 | return query_info(xid, tcon, persistent_fid, volatile_fid, |
905 | 0, SMB2_O_INFO_SECURITY, additional_info, |
906 | - SMB2_MAX_BUFFER_SIZE, |
907 | - sizeof(struct smb2_file_all_info), data, plen); |
908 | + SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); |
909 | } |
910 | |
911 | int |
912 | diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c |
913 | index c32802c956d5..bf7fa1507e81 100644 |
914 | --- a/fs/ext4/indirect.c |
915 | +++ b/fs/ext4/indirect.c |
916 | @@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, |
917 | unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); |
918 | int i; |
919 | |
920 | - /* Count number blocks in a subtree under 'partial' */ |
921 | - count = 1; |
922 | - for (i = 0; partial + i != chain + depth - 1; i++) |
923 | - count *= epb; |
924 | + /* |
925 | + * Count number blocks in a subtree under 'partial'. At each |
926 | + * level we count number of complete empty subtrees beyond |
927 | + * current offset and then descend into the subtree only |
928 | + * partially beyond current offset. |
929 | + */ |
930 | + count = 0; |
931 | + for (i = partial - chain + 1; i < depth; i++) |
932 | + count = count * epb + (epb - offsets[i] - 1); |
933 | + count++; |
934 | /* Fill in size of a hole we found */ |
935 | map->m_pblk = 0; |
936 | map->m_len = min_t(unsigned int, map->m_len, count); |
937 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c |
938 | index fd9501977f1c..8f5dc243effd 100644 |
939 | --- a/fs/ext4/inline.c |
940 | +++ b/fs/ext4/inline.c |
941 | @@ -150,6 +150,12 @@ int ext4_find_inline_data_nolock(struct inode *inode) |
942 | goto out; |
943 | |
944 | if (!is.s.not_found) { |
945 | + if (is.s.here->e_value_inum) { |
946 | + EXT4_ERROR_INODE(inode, "inline data xattr refers " |
947 | + "to an external xattr inode"); |
948 | + error = -EFSCORRUPTED; |
949 | + goto out; |
950 | + } |
951 | EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here - |
952 | (void *)ext4_raw_inode(&is.iloc)); |
953 | EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE + |
954 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
955 | index 09014c3c4207..bd6453e78992 100644 |
956 | --- a/fs/ext4/inode.c |
957 | +++ b/fs/ext4/inode.c |
958 | @@ -4246,28 +4246,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) |
959 | EXT4_BLOCK_SIZE_BITS(sb); |
960 | stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); |
961 | |
962 | - /* If there are no blocks to remove, return now */ |
963 | - if (first_block >= stop_block) |
964 | - goto out_stop; |
965 | + /* If there are blocks to remove, do it */ |
966 | + if (stop_block > first_block) { |
967 | |
968 | - down_write(&EXT4_I(inode)->i_data_sem); |
969 | - ext4_discard_preallocations(inode); |
970 | + down_write(&EXT4_I(inode)->i_data_sem); |
971 | + ext4_discard_preallocations(inode); |
972 | |
973 | - ret = ext4_es_remove_extent(inode, first_block, |
974 | - stop_block - first_block); |
975 | - if (ret) { |
976 | - up_write(&EXT4_I(inode)->i_data_sem); |
977 | - goto out_stop; |
978 | - } |
979 | + ret = ext4_es_remove_extent(inode, first_block, |
980 | + stop_block - first_block); |
981 | + if (ret) { |
982 | + up_write(&EXT4_I(inode)->i_data_sem); |
983 | + goto out_stop; |
984 | + } |
985 | |
986 | - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
987 | - ret = ext4_ext_remove_space(inode, first_block, |
988 | - stop_block - 1); |
989 | - else |
990 | - ret = ext4_ind_remove_space(handle, inode, first_block, |
991 | - stop_block); |
992 | + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
993 | + ret = ext4_ext_remove_space(inode, first_block, |
994 | + stop_block - 1); |
995 | + else |
996 | + ret = ext4_ind_remove_space(handle, inode, first_block, |
997 | + stop_block); |
998 | |
999 | - up_write(&EXT4_I(inode)->i_data_sem); |
1000 | + up_write(&EXT4_I(inode)->i_data_sem); |
1001 | + } |
1002 | if (IS_SYNC(inode)) |
1003 | ext4_handle_sync(handle); |
1004 | |
1005 | @@ -4634,19 +4634,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, |
1006 | } |
1007 | } |
1008 | |
1009 | -static inline void ext4_iget_extra_inode(struct inode *inode, |
1010 | +static inline int ext4_iget_extra_inode(struct inode *inode, |
1011 | struct ext4_inode *raw_inode, |
1012 | struct ext4_inode_info *ei) |
1013 | { |
1014 | __le32 *magic = (void *)raw_inode + |
1015 | EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; |
1016 | + |
1017 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= |
1018 | EXT4_INODE_SIZE(inode->i_sb) && |
1019 | *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { |
1020 | ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
1021 | - ext4_find_inline_data_nolock(inode); |
1022 | + return ext4_find_inline_data_nolock(inode); |
1023 | } else |
1024 | EXT4_I(inode)->i_inline_off = 0; |
1025 | + return 0; |
1026 | } |
1027 | |
1028 | int ext4_get_projid(struct inode *inode, kprojid_t *projid) |
1029 | @@ -4826,7 +4828,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) |
1030 | ei->i_extra_isize = sizeof(struct ext4_inode) - |
1031 | EXT4_GOOD_OLD_INODE_SIZE; |
1032 | } else { |
1033 | - ext4_iget_extra_inode(inode, raw_inode, ei); |
1034 | + ret = ext4_iget_extra_inode(inode, raw_inode, ei); |
1035 | + if (ret) |
1036 | + goto bad_inode; |
1037 | } |
1038 | } |
1039 | |
1040 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
1041 | index 1dac59c24792..823c0b82dfeb 100644 |
1042 | --- a/fs/ext4/resize.c |
1043 | +++ b/fs/ext4/resize.c |
1044 | @@ -1905,7 +1905,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) |
1045 | return 0; |
1046 | |
1047 | n_group = ext4_get_group_number(sb, n_blocks_count - 1); |
1048 | - if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { |
1049 | + if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { |
1050 | ext4_warning(sb, "resize would cause inodes_count overflow"); |
1051 | return -EINVAL; |
1052 | } |
1053 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c |
1054 | index 1718354e6322..ed1cf24a7831 100644 |
1055 | --- a/fs/ext4/xattr.c |
1056 | +++ b/fs/ext4/xattr.c |
1057 | @@ -1687,7 +1687,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, |
1058 | |
1059 | /* No failures allowed past this point. */ |
1060 | |
1061 | - if (!s->not_found && here->e_value_offs) { |
1062 | + if (!s->not_found && here->e_value_size && here->e_value_offs) { |
1063 | /* Remove the old value. */ |
1064 | void *first_val = s->base + min_offs; |
1065 | size_t offs = le16_to_cpu(here->e_value_offs); |
1066 | diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h |
1067 | index dcfcf7fd7438..a73144b3cb8c 100644 |
1068 | --- a/fs/nfs/nfs4_fs.h |
1069 | +++ b/fs/nfs/nfs4_fs.h |
1070 | @@ -465,7 +465,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); |
1071 | extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); |
1072 | extern void nfs_release_seqid(struct nfs_seqid *seqid); |
1073 | extern void nfs_free_seqid(struct nfs_seqid *seqid); |
1074 | -extern int nfs4_setup_sequence(const struct nfs_client *client, |
1075 | +extern int nfs4_setup_sequence(struct nfs_client *client, |
1076 | struct nfs4_sequence_args *args, |
1077 | struct nfs4_sequence_res *res, |
1078 | struct rpc_task *task); |
1079 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
1080 | index ae8f43d270d6..8ff98bbe479b 100644 |
1081 | --- a/fs/nfs/nfs4proc.c |
1082 | +++ b/fs/nfs/nfs4proc.c |
1083 | @@ -96,6 +96,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, |
1084 | struct nfs_open_context *ctx, struct nfs4_label *ilabel, |
1085 | struct nfs4_label *olabel); |
1086 | #ifdef CONFIG_NFS_V4_1 |
1087 | +static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, |
1088 | + struct rpc_cred *cred, |
1089 | + struct nfs4_slot *slot, |
1090 | + bool is_privileged); |
1091 | static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, |
1092 | struct rpc_cred *); |
1093 | static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, |
1094 | @@ -641,13 +645,14 @@ static int nfs40_sequence_done(struct rpc_task *task, |
1095 | |
1096 | #if defined(CONFIG_NFS_V4_1) |
1097 | |
1098 | -static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) |
1099 | +static void nfs41_release_slot(struct nfs4_slot *slot) |
1100 | { |
1101 | struct nfs4_session *session; |
1102 | struct nfs4_slot_table *tbl; |
1103 | - struct nfs4_slot *slot = res->sr_slot; |
1104 | bool send_new_highest_used_slotid = false; |
1105 | |
1106 | + if (!slot) |
1107 | + return; |
1108 | tbl = slot->table; |
1109 | session = tbl->session; |
1110 | |
1111 | @@ -673,13 +678,18 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) |
1112 | send_new_highest_used_slotid = false; |
1113 | out_unlock: |
1114 | spin_unlock(&tbl->slot_tbl_lock); |
1115 | - res->sr_slot = NULL; |
1116 | if (send_new_highest_used_slotid) |
1117 | nfs41_notify_server(session->clp); |
1118 | if (waitqueue_active(&tbl->slot_waitq)) |
1119 | wake_up_all(&tbl->slot_waitq); |
1120 | } |
1121 | |
1122 | +static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) |
1123 | +{ |
1124 | + nfs41_release_slot(res->sr_slot); |
1125 | + res->sr_slot = NULL; |
1126 | +} |
1127 | + |
1128 | static int nfs41_sequence_process(struct rpc_task *task, |
1129 | struct nfs4_sequence_res *res) |
1130 | { |
1131 | @@ -707,13 +717,6 @@ static int nfs41_sequence_process(struct rpc_task *task, |
1132 | /* Check the SEQUENCE operation status */ |
1133 | switch (res->sr_status) { |
1134 | case 0: |
1135 | - /* If previous op on slot was interrupted and we reused |
1136 | - * the seq# and got a reply from the cache, then retry |
1137 | - */ |
1138 | - if (task->tk_status == -EREMOTEIO && interrupted) { |
1139 | - ++slot->seq_nr; |
1140 | - goto retry_nowait; |
1141 | - } |
1142 | /* Update the slot's sequence and clientid lease timer */ |
1143 | slot->seq_done = 1; |
1144 | clp = session->clp; |
1145 | @@ -747,16 +750,16 @@ static int nfs41_sequence_process(struct rpc_task *task, |
1146 | * The slot id we used was probably retired. Try again |
1147 | * using a different slot id. |
1148 | */ |
1149 | + if (slot->seq_nr < slot->table->target_highest_slotid) |
1150 | + goto session_recover; |
1151 | goto retry_nowait; |
1152 | case -NFS4ERR_SEQ_MISORDERED: |
1153 | /* |
1154 | * Was the last operation on this sequence interrupted? |
1155 | * If so, retry after bumping the sequence number. |
1156 | */ |
1157 | - if (interrupted) { |
1158 | - ++slot->seq_nr; |
1159 | - goto retry_nowait; |
1160 | - } |
1161 | + if (interrupted) |
1162 | + goto retry_new_seq; |
1163 | /* |
1164 | * Could this slot have been previously retired? |
1165 | * If so, then the server may be expecting seq_nr = 1! |
1166 | @@ -765,10 +768,11 @@ static int nfs41_sequence_process(struct rpc_task *task, |
1167 | slot->seq_nr = 1; |
1168 | goto retry_nowait; |
1169 | } |
1170 | - break; |
1171 | + goto session_recover; |
1172 | case -NFS4ERR_SEQ_FALSE_RETRY: |
1173 | - ++slot->seq_nr; |
1174 | - goto retry_nowait; |
1175 | + if (interrupted) |
1176 | + goto retry_new_seq; |
1177 | + goto session_recover; |
1178 | default: |
1179 | /* Just update the slot sequence no. */ |
1180 | slot->seq_done = 1; |
1181 | @@ -778,6 +782,11 @@ static int nfs41_sequence_process(struct rpc_task *task, |
1182 | dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); |
1183 | out_noaction: |
1184 | return ret; |
1185 | +session_recover: |
1186 | + nfs4_schedule_session_recovery(session, res->sr_status); |
1187 | + goto retry_nowait; |
1188 | +retry_new_seq: |
1189 | + ++slot->seq_nr; |
1190 | retry_nowait: |
1191 | if (rpc_restart_call_prepare(task)) { |
1192 | nfs41_sequence_free_slot(res); |
1193 | @@ -854,6 +863,17 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { |
1194 | .rpc_call_done = nfs41_call_sync_done, |
1195 | }; |
1196 | |
1197 | +static void |
1198 | +nfs4_sequence_process_interrupted(struct nfs_client *client, |
1199 | + struct nfs4_slot *slot, struct rpc_cred *cred) |
1200 | +{ |
1201 | + struct rpc_task *task; |
1202 | + |
1203 | + task = _nfs41_proc_sequence(client, cred, slot, true); |
1204 | + if (!IS_ERR(task)) |
1205 | + rpc_put_task_async(task); |
1206 | +} |
1207 | + |
1208 | #else /* !CONFIG_NFS_V4_1 */ |
1209 | |
1210 | static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) |
1211 | @@ -874,9 +894,34 @@ int nfs4_sequence_done(struct rpc_task *task, |
1212 | } |
1213 | EXPORT_SYMBOL_GPL(nfs4_sequence_done); |
1214 | |
1215 | +static void |
1216 | +nfs4_sequence_process_interrupted(struct nfs_client *client, |
1217 | + struct nfs4_slot *slot, struct rpc_cred *cred) |
1218 | +{ |
1219 | + WARN_ON_ONCE(1); |
1220 | + slot->interrupted = 0; |
1221 | +} |
1222 | + |
1223 | #endif /* !CONFIG_NFS_V4_1 */ |
1224 | |
1225 | -int nfs4_setup_sequence(const struct nfs_client *client, |
1226 | +static |
1227 | +void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, |
1228 | + struct nfs4_sequence_res *res, |
1229 | + struct nfs4_slot *slot) |
1230 | +{ |
1231 | + if (!slot) |
1232 | + return; |
1233 | + slot->privileged = args->sa_privileged ? 1 : 0; |
1234 | + args->sa_slot = slot; |
1235 | + |
1236 | + res->sr_slot = slot; |
1237 | + res->sr_timestamp = jiffies; |
1238 | + res->sr_status_flags = 0; |
1239 | + res->sr_status = 1; |
1240 | + |
1241 | +} |
1242 | + |
1243 | +int nfs4_setup_sequence(struct nfs_client *client, |
1244 | struct nfs4_sequence_args *args, |
1245 | struct nfs4_sequence_res *res, |
1246 | struct rpc_task *task) |
1247 | @@ -894,29 +939,28 @@ int nfs4_setup_sequence(const struct nfs_client *client, |
1248 | task->tk_timeout = 0; |
1249 | } |
1250 | |
1251 | - spin_lock(&tbl->slot_tbl_lock); |
1252 | - /* The state manager will wait until the slot table is empty */ |
1253 | - if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) |
1254 | - goto out_sleep; |
1255 | + for (;;) { |
1256 | + spin_lock(&tbl->slot_tbl_lock); |
1257 | + /* The state manager will wait until the slot table is empty */ |
1258 | + if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) |
1259 | + goto out_sleep; |
1260 | + |
1261 | + slot = nfs4_alloc_slot(tbl); |
1262 | + if (IS_ERR(slot)) { |
1263 | + /* Try again in 1/4 second */ |
1264 | + if (slot == ERR_PTR(-ENOMEM)) |
1265 | + task->tk_timeout = HZ >> 2; |
1266 | + goto out_sleep; |
1267 | + } |
1268 | + spin_unlock(&tbl->slot_tbl_lock); |
1269 | |
1270 | - slot = nfs4_alloc_slot(tbl); |
1271 | - if (IS_ERR(slot)) { |
1272 | - /* Try again in 1/4 second */ |
1273 | - if (slot == ERR_PTR(-ENOMEM)) |
1274 | - task->tk_timeout = HZ >> 2; |
1275 | - goto out_sleep; |
1276 | + if (likely(!slot->interrupted)) |
1277 | + break; |
1278 | + nfs4_sequence_process_interrupted(client, |
1279 | + slot, task->tk_msg.rpc_cred); |
1280 | } |
1281 | - spin_unlock(&tbl->slot_tbl_lock); |
1282 | - |
1283 | - slot->privileged = args->sa_privileged ? 1 : 0; |
1284 | - args->sa_slot = slot; |
1285 | |
1286 | - res->sr_slot = slot; |
1287 | - if (session) { |
1288 | - res->sr_timestamp = jiffies; |
1289 | - res->sr_status_flags = 0; |
1290 | - res->sr_status = 1; |
1291 | - } |
1292 | + nfs4_sequence_attach_slot(args, res, slot); |
1293 | |
1294 | trace_nfs4_setup_sequence(session, args); |
1295 | out_start: |
1296 | @@ -8151,6 +8195,7 @@ static const struct rpc_call_ops nfs41_sequence_ops = { |
1297 | |
1298 | static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, |
1299 | struct rpc_cred *cred, |
1300 | + struct nfs4_slot *slot, |
1301 | bool is_privileged) |
1302 | { |
1303 | struct nfs4_sequence_data *calldata; |
1304 | @@ -8164,15 +8209,18 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, |
1305 | .callback_ops = &nfs41_sequence_ops, |
1306 | .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, |
1307 | }; |
1308 | + struct rpc_task *ret; |
1309 | |
1310 | + ret = ERR_PTR(-EIO); |
1311 | if (!atomic_inc_not_zero(&clp->cl_count)) |
1312 | - return ERR_PTR(-EIO); |
1313 | + goto out_err; |
1314 | + |
1315 | + ret = ERR_PTR(-ENOMEM); |
1316 | calldata = kzalloc(sizeof(*calldata), GFP_NOFS); |
1317 | - if (calldata == NULL) { |
1318 | - nfs_put_client(clp); |
1319 | - return ERR_PTR(-ENOMEM); |
1320 | - } |
1321 | + if (calldata == NULL) |
1322 | + goto out_put_clp; |
1323 | nfs4_init_sequence(&calldata->args, &calldata->res, 0); |
1324 | + nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); |
1325 | if (is_privileged) |
1326 | nfs4_set_sequence_privileged(&calldata->args); |
1327 | msg.rpc_argp = &calldata->args; |
1328 | @@ -8180,7 +8228,15 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, |
1329 | calldata->clp = clp; |
1330 | task_setup_data.callback_data = calldata; |
1331 | |
1332 | - return rpc_run_task(&task_setup_data); |
1333 | + ret = rpc_run_task(&task_setup_data); |
1334 | + if (IS_ERR(ret)) |
1335 | + goto out_err; |
1336 | + return ret; |
1337 | +out_put_clp: |
1338 | + nfs_put_client(clp); |
1339 | +out_err: |
1340 | + nfs41_release_slot(slot); |
1341 | + return ret; |
1342 | } |
1343 | |
1344 | static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) |
1345 | @@ -8190,7 +8246,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr |
1346 | |
1347 | if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) |
1348 | return -EAGAIN; |
1349 | - task = _nfs41_proc_sequence(clp, cred, false); |
1350 | + task = _nfs41_proc_sequence(clp, cred, NULL, false); |
1351 | if (IS_ERR(task)) |
1352 | ret = PTR_ERR(task); |
1353 | else |
1354 | @@ -8204,7 +8260,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) |
1355 | struct rpc_task *task; |
1356 | int ret; |
1357 | |
1358 | - task = _nfs41_proc_sequence(clp, cred, true); |
1359 | + task = _nfs41_proc_sequence(clp, cred, NULL, true); |
1360 | if (IS_ERR(task)) { |
1361 | ret = PTR_ERR(task); |
1362 | goto out; |
1363 | diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c |
1364 | index 28825a5b6d09..902b72dac41a 100644 |
1365 | --- a/fs/orangefs/inode.c |
1366 | +++ b/fs/orangefs/inode.c |
1367 | @@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat, |
1368 | else |
1369 | stat->result_mask = STATX_BASIC_STATS & |
1370 | ~STATX_SIZE; |
1371 | + |
1372 | + stat->attributes_mask = STATX_ATTR_IMMUTABLE | |
1373 | + STATX_ATTR_APPEND; |
1374 | + if (inode->i_flags & S_IMMUTABLE) |
1375 | + stat->attributes |= STATX_ATTR_IMMUTABLE; |
1376 | + if (inode->i_flags & S_APPEND) |
1377 | + stat->attributes |= STATX_ATTR_APPEND; |
1378 | } |
1379 | return ret; |
1380 | } |
1381 | diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c |
1382 | index f8f3c73d2664..05b3abbdbc4b 100644 |
1383 | --- a/fs/orangefs/namei.c |
1384 | +++ b/fs/orangefs/namei.c |
1385 | @@ -314,6 +314,13 @@ static int orangefs_symlink(struct inode *dir, |
1386 | ret = PTR_ERR(inode); |
1387 | goto out; |
1388 | } |
1389 | + /* |
1390 | + * This is necessary because orangefs_inode_getattr will not |
1391 | + * re-read symlink size as it is impossible for it to change. |
1392 | + * Invalidating the cache does not help. orangefs_new_inode |
1393 | + * does not set the correct size (it does not know symname). |
1394 | + */ |
1395 | + inode->i_size = strlen(symname); |
1396 | |
1397 | gossip_debug(GOSSIP_NAME_DEBUG, |
1398 | "Assigned symlink inode new number of %pU\n", |
1399 | diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h |
1400 | index f144216febc6..9397628a1967 100644 |
1401 | --- a/include/linux/virtio_net.h |
1402 | +++ b/include/linux/virtio_net.h |
1403 | @@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, |
1404 | static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, |
1405 | struct virtio_net_hdr *hdr, |
1406 | bool little_endian, |
1407 | - bool has_data_valid) |
1408 | + bool has_data_valid, |
1409 | + int vlan_hlen) |
1410 | { |
1411 | memset(hdr, 0, sizeof(*hdr)); /* no info leak */ |
1412 | |
1413 | @@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, |
1414 | |
1415 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1416 | hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
1417 | - if (skb_vlan_tag_present(skb)) |
1418 | - hdr->csum_start = __cpu_to_virtio16(little_endian, |
1419 | - skb_checksum_start_offset(skb) + VLAN_HLEN); |
1420 | - else |
1421 | - hdr->csum_start = __cpu_to_virtio16(little_endian, |
1422 | - skb_checksum_start_offset(skb)); |
1423 | + hdr->csum_start = __cpu_to_virtio16(little_endian, |
1424 | + skb_checksum_start_offset(skb) + vlan_hlen); |
1425 | hdr->csum_offset = __cpu_to_virtio16(little_endian, |
1426 | skb->csum_offset); |
1427 | } else if (has_data_valid && |
1428 | diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h |
1429 | index c4f5caaf3778..f6a3543e5247 100644 |
1430 | --- a/include/net/transp_v6.h |
1431 | +++ b/include/net/transp_v6.h |
1432 | @@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, |
1433 | struct flowi6 *fl6, struct ipcm6_cookie *ipc6, |
1434 | struct sockcm_cookie *sockc); |
1435 | |
1436 | -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, |
1437 | - __u16 srcp, __u16 destp, int bucket); |
1438 | +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, |
1439 | + __u16 srcp, __u16 destp, int rqueue, int bucket); |
1440 | +static inline void |
1441 | +ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, |
1442 | + __u16 destp, int bucket) |
1443 | +{ |
1444 | + __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp), |
1445 | + bucket); |
1446 | +} |
1447 | |
1448 | #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) |
1449 | |
1450 | diff --git a/include/net/udp.h b/include/net/udp.h |
1451 | index 6c759c8594e2..18391015233e 100644 |
1452 | --- a/include/net/udp.h |
1453 | +++ b/include/net/udp.h |
1454 | @@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, |
1455 | return htons((((u64) hash * (max - min)) >> 32) + min); |
1456 | } |
1457 | |
1458 | +static inline int udp_rqueue_get(struct sock *sk) |
1459 | +{ |
1460 | + return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); |
1461 | +} |
1462 | + |
1463 | /* net/ipv4/udp.c */ |
1464 | void udp_destruct_sock(struct sock *sk); |
1465 | void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); |
1466 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
1467 | index dee049a0ec5b..6774e0369ebe 100644 |
1468 | --- a/mm/backing-dev.c |
1469 | +++ b/mm/backing-dev.c |
1470 | @@ -409,6 +409,7 @@ static void wb_exit(struct bdi_writeback *wb) |
1471 | * protected. |
1472 | */ |
1473 | static DEFINE_SPINLOCK(cgwb_lock); |
1474 | +static struct workqueue_struct *cgwb_release_wq; |
1475 | |
1476 | /** |
1477 | * wb_congested_get_create - get or create a wb_congested |
1478 | @@ -519,7 +520,7 @@ static void cgwb_release(struct percpu_ref *refcnt) |
1479 | { |
1480 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, |
1481 | refcnt); |
1482 | - schedule_work(&wb->release_work); |
1483 | + queue_work(cgwb_release_wq, &wb->release_work); |
1484 | } |
1485 | |
1486 | static void cgwb_kill(struct bdi_writeback *wb) |
1487 | @@ -783,6 +784,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi) |
1488 | spin_unlock_irq(&cgwb_lock); |
1489 | } |
1490 | |
1491 | +static int __init cgwb_init(void) |
1492 | +{ |
1493 | + /* |
1494 | + * There can be many concurrent release work items overwhelming |
1495 | + * system_wq. Put them in a separate wq and limit concurrency. |
1496 | + * There's no point in executing many of these in parallel. |
1497 | + */ |
1498 | + cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); |
1499 | + if (!cgwb_release_wq) |
1500 | + return -ENOMEM; |
1501 | + |
1502 | + return 0; |
1503 | +} |
1504 | +subsys_initcall(cgwb_init); |
1505 | + |
1506 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1507 | |
1508 | static int cgwb_bdi_init(struct backing_dev_info *bdi) |
1509 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
1510 | index 1d7693c35424..59ccf455fcbd 100644 |
1511 | --- a/mm/page_alloc.c |
1512 | +++ b/mm/page_alloc.c |
1513 | @@ -3981,7 +3981,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
1514 | * orientated. |
1515 | */ |
1516 | if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { |
1517 | - ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); |
1518 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, |
1519 | ac->high_zoneidx, ac->nodemask); |
1520 | } |
1521 | diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c |
1522 | index fcc9aa72877d..374d586b4a2c 100644 |
1523 | --- a/net/dsa/tag_trailer.c |
1524 | +++ b/net/dsa/tag_trailer.c |
1525 | @@ -79,7 +79,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, |
1526 | if (unlikely(ds->cpu_port_mask & BIT(source_port))) |
1527 | return NULL; |
1528 | |
1529 | - pskb_trim_rcsum(skb, skb->len - 4); |
1530 | + if (pskb_trim_rcsum(skb, skb->len - 4)) |
1531 | + return NULL; |
1532 | |
1533 | skb->dev = ds->ports[source_port].netdev; |
1534 | |
1535 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1536 | index cab4b935e474..a95ccdceb797 100644 |
1537 | --- a/net/ipv4/tcp_ipv4.c |
1538 | +++ b/net/ipv4/tcp_ipv4.c |
1539 | @@ -1675,6 +1675,10 @@ int tcp_v4_rcv(struct sk_buff *skb) |
1540 | reqsk_put(req); |
1541 | goto discard_it; |
1542 | } |
1543 | + if (tcp_checksum_complete(skb)) { |
1544 | + reqsk_put(req); |
1545 | + goto csum_error; |
1546 | + } |
1547 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
1548 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
1549 | goto lookup; |
1550 | diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
1551 | index b0ad62bd38f7..5752bf7593dc 100644 |
1552 | --- a/net/ipv4/udp.c |
1553 | +++ b/net/ipv4/udp.c |
1554 | @@ -2720,7 +2720,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, |
1555 | " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", |
1556 | bucket, src, srcp, dest, destp, sp->sk_state, |
1557 | sk_wmem_alloc_get(sp), |
1558 | - sk_rmem_alloc_get(sp), |
1559 | + udp_rqueue_get(sp), |
1560 | 0, 0L, 0, |
1561 | from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), |
1562 | 0, sock_i_ino(sp), |
1563 | diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c |
1564 | index d0390d844ac8..d9ad986c7b2c 100644 |
1565 | --- a/net/ipv4/udp_diag.c |
1566 | +++ b/net/ipv4/udp_diag.c |
1567 | @@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, |
1568 | static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, |
1569 | void *info) |
1570 | { |
1571 | - r->idiag_rqueue = sk_rmem_alloc_get(sk); |
1572 | + r->idiag_rqueue = udp_rqueue_get(sk); |
1573 | r->idiag_wqueue = sk_wmem_alloc_get(sk); |
1574 | } |
1575 | |
1576 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
1577 | index 287112da3c06..453dc3726199 100644 |
1578 | --- a/net/ipv6/datagram.c |
1579 | +++ b/net/ipv6/datagram.c |
1580 | @@ -1026,8 +1026,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, |
1581 | } |
1582 | EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); |
1583 | |
1584 | -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, |
1585 | - __u16 srcp, __u16 destp, int bucket) |
1586 | +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, |
1587 | + __u16 srcp, __u16 destp, int rqueue, int bucket) |
1588 | { |
1589 | const struct in6_addr *dest, *src; |
1590 | |
1591 | @@ -1043,7 +1043,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, |
1592 | dest->s6_addr32[2], dest->s6_addr32[3], destp, |
1593 | sp->sk_state, |
1594 | sk_wmem_alloc_get(sp), |
1595 | - sk_rmem_alloc_get(sp), |
1596 | + rqueue, |
1597 | 0, 0L, 0, |
1598 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), |
1599 | 0, |
1600 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
1601 | index 375b20d5bbd7..60efd326014b 100644 |
1602 | --- a/net/ipv6/route.c |
1603 | +++ b/net/ipv6/route.c |
1604 | @@ -1476,9 +1476,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, |
1605 | const struct in6_addr *daddr, *saddr; |
1606 | struct rt6_info *rt6 = (struct rt6_info *)dst; |
1607 | |
1608 | - if (rt6->rt6i_flags & RTF_LOCAL) |
1609 | - return; |
1610 | - |
1611 | if (dst_metric_locked(dst, RTAX_MTU)) |
1612 | return; |
1613 | |
1614 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1615 | index 237cc6187c5a..35e8aef9ceed 100644 |
1616 | --- a/net/ipv6/tcp_ipv6.c |
1617 | +++ b/net/ipv6/tcp_ipv6.c |
1618 | @@ -1453,6 +1453,10 @@ static int tcp_v6_rcv(struct sk_buff *skb) |
1619 | reqsk_put(req); |
1620 | goto discard_it; |
1621 | } |
1622 | + if (tcp_checksum_complete(skb)) { |
1623 | + reqsk_put(req); |
1624 | + goto csum_error; |
1625 | + } |
1626 | if (unlikely(sk->sk_state != TCP_LISTEN)) { |
1627 | inet_csk_reqsk_queue_drop_and_put(sk, req); |
1628 | goto lookup; |
1629 | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
1630 | index 0146dcdc5c40..330d5ea8451b 100644 |
1631 | --- a/net/ipv6/udp.c |
1632 | +++ b/net/ipv6/udp.c |
1633 | @@ -1503,7 +1503,8 @@ int udp6_seq_show(struct seq_file *seq, void *v) |
1634 | struct inet_sock *inet = inet_sk(v); |
1635 | __u16 srcp = ntohs(inet->inet_sport); |
1636 | __u16 destp = ntohs(inet->inet_dport); |
1637 | - ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); |
1638 | + __ip6_dgram_sock_seq_show(seq, v, srcp, destp, |
1639 | + udp_rqueue_get(v), bucket); |
1640 | } |
1641 | return 0; |
1642 | } |
1643 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
1644 | index 7806e166669a..4fe2e34522d6 100644 |
1645 | --- a/net/packet/af_packet.c |
1646 | +++ b/net/packet/af_packet.c |
1647 | @@ -2046,7 +2046,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, |
1648 | return -EINVAL; |
1649 | *len -= sizeof(vnet_hdr); |
1650 | |
1651 | - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) |
1652 | + if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) |
1653 | return -EINVAL; |
1654 | |
1655 | return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); |
1656 | @@ -2313,7 +2313,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
1657 | if (do_vnet) { |
1658 | if (virtio_net_hdr_from_skb(skb, h.raw + macoff - |
1659 | sizeof(struct virtio_net_hdr), |
1660 | - vio_le(), true)) { |
1661 | + vio_le(), true, 0)) { |
1662 | spin_lock(&sk->sk_receive_queue.lock); |
1663 | goto drop_n_account; |
1664 | } |
1665 | diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c |
1666 | index b5f80e675783..f3ed63aa4111 100644 |
1667 | --- a/net/sched/act_simple.c |
1668 | +++ b/net/sched/act_simple.c |
1669 | @@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a, int bind) |
1670 | kfree(d->tcfd_defdata); |
1671 | } |
1672 | |
1673 | -static int alloc_defdata(struct tcf_defact *d, char *defdata) |
1674 | +static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata) |
1675 | { |
1676 | d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL); |
1677 | if (unlikely(!d->tcfd_defdata)) |
1678 | return -ENOMEM; |
1679 | - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); |
1680 | + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); |
1681 | return 0; |
1682 | } |
1683 | |
1684 | -static void reset_policy(struct tcf_defact *d, char *defdata, |
1685 | +static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata, |
1686 | struct tc_defact *p) |
1687 | { |
1688 | spin_lock_bh(&d->tcf_lock); |
1689 | d->tcf_action = p->action; |
1690 | memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); |
1691 | - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); |
1692 | + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); |
1693 | spin_unlock_bh(&d->tcf_lock); |
1694 | } |
1695 | |
1696 | @@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, |
1697 | struct tcf_defact *d; |
1698 | bool exists = false; |
1699 | int ret = 0, err; |
1700 | - char *defdata; |
1701 | |
1702 | if (nla == NULL) |
1703 | return -EINVAL; |
1704 | @@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, |
1705 | return -EINVAL; |
1706 | } |
1707 | |
1708 | - defdata = nla_data(tb[TCA_DEF_DATA]); |
1709 | - |
1710 | if (!exists) { |
1711 | ret = tcf_idr_create(tn, parm->index, est, a, |
1712 | &act_simp_ops, bind, false); |
1713 | @@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, |
1714 | return ret; |
1715 | |
1716 | d = to_defact(*a); |
1717 | - ret = alloc_defdata(d, defdata); |
1718 | + ret = alloc_defdata(d, tb[TCA_DEF_DATA]); |
1719 | if (ret < 0) { |
1720 | tcf_idr_release(*a, bind); |
1721 | return ret; |
1722 | @@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, |
1723 | if (!ovr) |
1724 | return -EEXIST; |
1725 | |
1726 | - reset_policy(d, defdata, parm); |
1727 | + reset_policy(d, tb[TCA_DEF_DATA], parm); |
1728 | } |
1729 | |
1730 | if (ret == ACT_P_CREATED) |
1731 | diff --git a/net/socket.c b/net/socket.c |
1732 | index 43d2f17f5eea..8b2bef6cfe42 100644 |
1733 | --- a/net/socket.c |
1734 | +++ b/net/socket.c |
1735 | @@ -538,7 +538,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) |
1736 | if (!err && (iattr->ia_valid & ATTR_UID)) { |
1737 | struct socket *sock = SOCKET_I(d_inode(dentry)); |
1738 | |
1739 | - sock->sk->sk_uid = iattr->ia_uid; |
1740 | + if (sock->sk) |
1741 | + sock->sk->sk_uid = iattr->ia_uid; |
1742 | + else |
1743 | + err = -ENOENT; |
1744 | } |
1745 | |
1746 | return err; |
1747 | @@ -588,12 +591,16 @@ EXPORT_SYMBOL(sock_alloc); |
1748 | * an inode not a file. |
1749 | */ |
1750 | |
1751 | -void sock_release(struct socket *sock) |
1752 | +static void __sock_release(struct socket *sock, struct inode *inode) |
1753 | { |
1754 | if (sock->ops) { |
1755 | struct module *owner = sock->ops->owner; |
1756 | |
1757 | + if (inode) |
1758 | + inode_lock(inode); |
1759 | sock->ops->release(sock); |
1760 | + if (inode) |
1761 | + inode_unlock(inode); |
1762 | sock->ops = NULL; |
1763 | module_put(owner); |
1764 | } |
1765 | @@ -608,6 +615,11 @@ void sock_release(struct socket *sock) |
1766 | } |
1767 | sock->file = NULL; |
1768 | } |
1769 | + |
1770 | +void sock_release(struct socket *sock) |
1771 | +{ |
1772 | + __sock_release(sock, NULL); |
1773 | +} |
1774 | EXPORT_SYMBOL(sock_release); |
1775 | |
1776 | void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) |
1777 | @@ -1122,7 +1134,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma) |
1778 | |
1779 | static int sock_close(struct inode *inode, struct file *filp) |
1780 | { |
1781 | - sock_release(SOCKET_I(inode)); |
1782 | + __sock_release(SOCKET_I(inode), inode); |
1783 | return 0; |
1784 | } |
1785 | |
1786 | diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c |
1787 | index 83f886d7c1f8..3c86614462f6 100644 |
1788 | --- a/net/tls/tls_sw.c |
1789 | +++ b/net/tls/tls_sw.c |
1790 | @@ -211,18 +211,12 @@ static void tls_free_both_sg(struct sock *sk) |
1791 | } |
1792 | |
1793 | static int tls_do_encryption(struct tls_context *tls_ctx, |
1794 | - struct tls_sw_context *ctx, size_t data_len, |
1795 | - gfp_t flags) |
1796 | + struct tls_sw_context *ctx, |
1797 | + struct aead_request *aead_req, |
1798 | + size_t data_len) |
1799 | { |
1800 | - unsigned int req_size = sizeof(struct aead_request) + |
1801 | - crypto_aead_reqsize(ctx->aead_send); |
1802 | - struct aead_request *aead_req; |
1803 | int rc; |
1804 | |
1805 | - aead_req = kzalloc(req_size, flags); |
1806 | - if (!aead_req) |
1807 | - return -ENOMEM; |
1808 | - |
1809 | ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size; |
1810 | ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size; |
1811 | |
1812 | @@ -235,7 +229,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx, |
1813 | ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; |
1814 | ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; |
1815 | |
1816 | - kfree(aead_req); |
1817 | return rc; |
1818 | } |
1819 | |
1820 | @@ -244,8 +237,14 @@ static int tls_push_record(struct sock *sk, int flags, |
1821 | { |
1822 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1823 | struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); |
1824 | + struct aead_request *req; |
1825 | int rc; |
1826 | |
1827 | + req = kzalloc(sizeof(struct aead_request) + |
1828 | + crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); |
1829 | + if (!req) |
1830 | + return -ENOMEM; |
1831 | + |
1832 | sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); |
1833 | sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); |
1834 | |
1835 | @@ -261,15 +260,14 @@ static int tls_push_record(struct sock *sk, int flags, |
1836 | tls_ctx->pending_open_record_frags = 0; |
1837 | set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); |
1838 | |
1839 | - rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, |
1840 | - sk->sk_allocation); |
1841 | + rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size); |
1842 | if (rc < 0) { |
1843 | /* If we are called from write_space and |
1844 | * we fail, we need to set this SOCK_NOSPACE |
1845 | * to trigger another write_space in the future. |
1846 | */ |
1847 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1848 | - return rc; |
1849 | + goto out_req; |
1850 | } |
1851 | |
1852 | free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, |
1853 | @@ -284,6 +282,8 @@ static int tls_push_record(struct sock *sk, int flags, |
1854 | tls_err_abort(sk); |
1855 | |
1856 | tls_advance_record_sn(sk, tls_ctx); |
1857 | +out_req: |
1858 | + kfree(req); |
1859 | return rc; |
1860 | } |
1861 | |
1862 | diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c |
1863 | index d1eb14842340..a12e594d4e3b 100644 |
1864 | --- a/sound/pci/hda/hda_controller.c |
1865 | +++ b/sound/pci/hda/hda_controller.c |
1866 | @@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, |
1867 | return err; |
1868 | strlcpy(pcm->name, cpcm->name, sizeof(pcm->name)); |
1869 | apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); |
1870 | - if (apcm == NULL) |
1871 | + if (apcm == NULL) { |
1872 | + snd_device_free(chip->card, pcm); |
1873 | return -ENOMEM; |
1874 | + } |
1875 | apcm->chip = chip; |
1876 | apcm->pcm = pcm; |
1877 | apcm->codec = codec; |
1878 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
1879 | index 5b4dbcec6de8..ba9a7e552183 100644 |
1880 | --- a/sound/pci/hda/patch_conexant.c |
1881 | +++ b/sound/pci/hda/patch_conexant.c |
1882 | @@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { |
1883 | SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), |
1884 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), |
1885 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), |
1886 | + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), |
1887 | + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), |
1888 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
1889 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), |
1890 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), |
1891 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), |
1892 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
1893 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
1894 | + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), |
1895 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), |
1896 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), |
1897 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), |
1898 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
1899 | index 6ae061183eff..2a8aa2bc5c30 100644 |
1900 | --- a/sound/pci/hda/patch_realtek.c |
1901 | +++ b/sound/pci/hda/patch_realtek.c |
1902 | @@ -6439,7 +6439,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
1903 | SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
1904 | SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
1905 | SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
1906 | - SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
1907 | SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), |
1908 | SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), |
1909 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), |
1910 | @@ -6610,6 +6609,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
1911 | {0x12, 0x90a60140}, |
1912 | {0x14, 0x90170110}, |
1913 | {0x21, 0x02211020}), |
1914 | + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, |
1915 | + {0x12, 0x90a60140}, |
1916 | + {0x14, 0x90170110}, |
1917 | + {0x19, 0x02a11030}, |
1918 | + {0x21, 0x02211020}), |
1919 | SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
1920 | {0x12, 0x90a60140}, |
1921 | {0x14, 0x90170150}, |