Contents of /trunk/kernel-alx/patches-5.4/0296-5.4.197-all-fixes.patch
Parent Directory | Revision Log
Revision 3635 -
(show annotations)
(download)
Mon Oct 24 12:34:12 2022 UTC (20 months ago) by niro
File size: 39865 byte(s)
Mon Oct 24 12:34:12 2022 UTC (20 months ago) by niro
File size: 39865 byte(s)
-sync kernel patches
1 | diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst |
2 | index fb56297f70dc8..857be0d44e809 100644 |
3 | --- a/Documentation/process/submitting-patches.rst |
4 | +++ b/Documentation/process/submitting-patches.rst |
5 | @@ -133,7 +133,7 @@ as you intend it to. |
6 | |
7 | The maintainer will thank you if you write your patch description in a |
8 | form which can be easily pulled into Linux's source code management |
9 | -system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`. |
10 | +system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`. |
11 | |
12 | Solve only one problem per patch. If your description starts to get |
13 | long, that's a sign that you probably need to split up your patch. |
14 | diff --git a/Makefile b/Makefile |
15 | index c064ed925552d..57e27af9fc0c0 100644 |
16 | --- a/Makefile |
17 | +++ b/Makefile |
18 | @@ -1,7 +1,7 @@ |
19 | # SPDX-License-Identifier: GPL-2.0 |
20 | VERSION = 5 |
21 | PATCHLEVEL = 4 |
22 | -SUBLEVEL = 196 |
23 | +SUBLEVEL = 197 |
24 | EXTRAVERSION = |
25 | NAME = Kleptomaniac Octopus |
26 | |
27 | diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c |
28 | index 5c11ae66b5d8e..9cf8f5417e7f4 100644 |
29 | --- a/arch/x86/pci/xen.c |
30 | +++ b/arch/x86/pci/xen.c |
31 | @@ -442,6 +442,11 @@ void __init xen_msi_init(void) |
32 | |
33 | x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; |
34 | x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
35 | + /* |
36 | + * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely |
37 | + * controlled by the hypervisor. |
38 | + */ |
39 | + pci_msi_ignore_mask = 1; |
40 | } |
41 | #endif |
42 | |
43 | diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c |
44 | index 887ec21aee494..1ffcea7b03558 100644 |
45 | --- a/crypto/ecrdsa.c |
46 | +++ b/crypto/ecrdsa.c |
47 | @@ -112,15 +112,15 @@ static int ecrdsa_verify(struct akcipher_request *req) |
48 | |
49 | /* Step 1: verify that 0 < r < q, 0 < s < q */ |
50 | if (vli_is_zero(r, ndigits) || |
51 | - vli_cmp(r, ctx->curve->n, ndigits) == 1 || |
52 | + vli_cmp(r, ctx->curve->n, ndigits) >= 0 || |
53 | vli_is_zero(s, ndigits) || |
54 | - vli_cmp(s, ctx->curve->n, ndigits) == 1) |
55 | + vli_cmp(s, ctx->curve->n, ndigits) >= 0) |
56 | return -EKEYREJECTED; |
57 | |
58 | /* Step 2: calculate hash (h) of the message (passed as input) */ |
59 | /* Step 3: calculate e = h \mod q */ |
60 | vli_from_le64(e, digest, ndigits); |
61 | - if (vli_cmp(e, ctx->curve->n, ndigits) == 1) |
62 | + if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) |
63 | vli_sub(e, e, ctx->curve->n, ndigits); |
64 | if (vli_is_zero(e, ndigits)) |
65 | e[0] = 1; |
66 | @@ -136,7 +136,7 @@ static int ecrdsa_verify(struct akcipher_request *req) |
67 | /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ |
68 | ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, |
69 | ctx->curve); |
70 | - if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) |
71 | + if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) |
72 | vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); |
73 | |
74 | /* Step 7: if R == r signature is valid */ |
75 | diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c |
76 | index 76c668c05fa03..cc0b98affd64d 100644 |
77 | --- a/drivers/acpi/sysfs.c |
78 | +++ b/drivers/acpi/sysfs.c |
79 | @@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, |
80 | { |
81 | struct acpi_data_attr *data_attr; |
82 | void __iomem *base; |
83 | - ssize_t rc; |
84 | + ssize_t size; |
85 | |
86 | data_attr = container_of(bin_attr, struct acpi_data_attr, attr); |
87 | + size = data_attr->attr.size; |
88 | + |
89 | + if (offset < 0) |
90 | + return -EINVAL; |
91 | + |
92 | + if (offset >= size) |
93 | + return 0; |
94 | |
95 | - base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); |
96 | + if (count > size - offset) |
97 | + count = size - offset; |
98 | + |
99 | + base = acpi_os_map_iomem(data_attr->addr, size); |
100 | if (!base) |
101 | return -ENOMEM; |
102 | - rc = memory_read_from_buffer(buf, count, &offset, base, |
103 | - data_attr->attr.size); |
104 | - acpi_os_unmap_memory(base, data_attr->attr.size); |
105 | |
106 | - return rc; |
107 | + memcpy_fromio(buf, base + offset, count); |
108 | + |
109 | + acpi_os_unmap_iomem(base, size); |
110 | + |
111 | + return count; |
112 | } |
113 | |
114 | static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) |
115 | diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c |
116 | index 1385c2c0acbe1..89635bb117d28 100644 |
117 | --- a/drivers/char/tpm/tpm2-cmd.c |
118 | +++ b/drivers/char/tpm/tpm2-cmd.c |
119 | @@ -706,7 +706,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, |
120 | if (!rc) { |
121 | out = (struct tpm2_get_cap_out *) |
122 | &buf.data[TPM_HEADER_SIZE]; |
123 | - *value = be32_to_cpu(out->value); |
124 | + /* |
125 | + * To prevent failing boot up of some systems, Infineon TPM2.0 |
126 | + * returns SUCCESS on TPM2_Startup in field upgrade mode. Also |
127 | + * the TPM2_Getcapability command returns a zero length list |
128 | + * in field upgrade mode. |
129 | + */ |
130 | + if (be32_to_cpu(out->property_cnt) > 0) |
131 | + *value = be32_to_cpu(out->value); |
132 | + else |
133 | + rc = -ENODATA; |
134 | } |
135 | tpm_buf_destroy(&buf); |
136 | return rc; |
137 | diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c |
138 | index 64428dbed9928..4236607f69e43 100644 |
139 | --- a/drivers/char/tpm/tpm_ibmvtpm.c |
140 | +++ b/drivers/char/tpm/tpm_ibmvtpm.c |
141 | @@ -685,6 +685,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, |
142 | if (!wait_event_timeout(ibmvtpm->crq_queue.wq, |
143 | ibmvtpm->rtce_buf != NULL, |
144 | HZ)) { |
145 | + rc = -ENODEV; |
146 | dev_err(dev, "CRQ response timed out\n"); |
147 | goto init_irq_cleanup; |
148 | } |
149 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
150 | index e43a76b027a2a..d59455b2d401f 100644 |
151 | --- a/drivers/gpu/drm/i915/intel_pm.c |
152 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
153 | @@ -2822,7 +2822,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state) |
154 | } |
155 | |
156 | static void intel_read_wm_latency(struct drm_i915_private *dev_priv, |
157 | - u16 wm[8]) |
158 | + u16 wm[]) |
159 | { |
160 | struct intel_uncore *uncore = &dev_priv->uncore; |
161 | |
162 | diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c |
163 | index 37270b8f4e58b..653f436aa4593 100644 |
164 | --- a/drivers/hid/hid-multitouch.c |
165 | +++ b/drivers/hid/hid-multitouch.c |
166 | @@ -2158,6 +2158,9 @@ static const struct hid_device_id mt_devices[] = { |
167 | { .driver_data = MT_CLS_GOOGLE, |
168 | HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, |
169 | USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) }, |
170 | + { .driver_data = MT_CLS_GOOGLE, |
171 | + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE, |
172 | + USB_DEVICE_ID_GOOGLE_WHISKERS) }, |
173 | |
174 | /* Generic MT device */ |
175 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, |
176 | diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c |
177 | index 2f95e25a10f7c..53325419ec13d 100644 |
178 | --- a/drivers/i2c/busses/i2c-ismt.c |
179 | +++ b/drivers/i2c/busses/i2c-ismt.c |
180 | @@ -81,6 +81,7 @@ |
181 | |
182 | #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ |
183 | #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ |
184 | +#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */ |
185 | |
186 | /* Hardware Descriptor Constants - Control Field */ |
187 | #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */ |
188 | @@ -174,6 +175,8 @@ struct ismt_priv { |
189 | u8 head; /* ring buffer head pointer */ |
190 | struct completion cmp; /* interrupt completion */ |
191 | u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */ |
192 | + dma_addr_t log_dma; |
193 | + u32 *log; |
194 | }; |
195 | |
196 | /** |
197 | @@ -408,6 +411,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, |
198 | memset(desc, 0, sizeof(struct ismt_desc)); |
199 | desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); |
200 | |
201 | + /* Always clear the log entries */ |
202 | + memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32)); |
203 | + |
204 | /* Initialize common control bits */ |
205 | if (likely(pci_dev_msi_enabled(priv->pci_dev))) |
206 | desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR; |
207 | @@ -697,6 +703,8 @@ static void ismt_hw_init(struct ismt_priv *priv) |
208 | /* initialize the Master Descriptor Base Address (MDBA) */ |
209 | writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA); |
210 | |
211 | + writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL); |
212 | + |
213 | /* initialize the Master Control Register (MCTRL) */ |
214 | writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL); |
215 | |
216 | @@ -784,6 +792,12 @@ static int ismt_dev_init(struct ismt_priv *priv) |
217 | priv->head = 0; |
218 | init_completion(&priv->cmp); |
219 | |
220 | + priv->log = dmam_alloc_coherent(&priv->pci_dev->dev, |
221 | + ISMT_LOG_ENTRIES * sizeof(u32), |
222 | + &priv->log_dma, GFP_KERNEL); |
223 | + if (!priv->log) |
224 | + return -ENOMEM; |
225 | + |
226 | return 0; |
227 | } |
228 | |
229 | diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c |
230 | index 19f8eec387172..107aeb8b54da4 100644 |
231 | --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c |
232 | +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c |
233 | @@ -208,6 +208,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev, |
234 | i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info; |
235 | i2c->adap.dev.parent = dev; |
236 | i2c->adap.dev.of_node = pdev->dev.of_node; |
237 | + i2c->adap.dev.fwnode = dev->fwnode; |
238 | snprintf(i2c->adap.name, sizeof(i2c->adap.name), |
239 | "Cavium ThunderX i2c adapter at %s", dev_name(dev)); |
240 | i2c_set_adapdata(&i2c->adap, i2c); |
241 | diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c |
242 | index bfb945fc33a17..3c9cdb87770f2 100644 |
243 | --- a/drivers/input/touchscreen/goodix.c |
244 | +++ b/drivers/input/touchscreen/goodix.c |
245 | @@ -335,7 +335,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) |
246 | * The Goodix panel will send spurious interrupts after a |
247 | * 'finger up' event, which will always cause a timeout. |
248 | */ |
249 | - return 0; |
250 | + return -ENOMSG; |
251 | } |
252 | |
253 | static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data) |
254 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
255 | index 3ed8ca47bc6e6..fa674e9b6f23d 100644 |
256 | --- a/drivers/md/dm-crypt.c |
257 | +++ b/drivers/md/dm-crypt.c |
258 | @@ -2817,6 +2817,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) |
259 | return DM_MAPIO_SUBMITTED; |
260 | } |
261 | |
262 | +static char hex2asc(unsigned char c) |
263 | +{ |
264 | + return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); |
265 | +} |
266 | + |
267 | static void crypt_status(struct dm_target *ti, status_type_t type, |
268 | unsigned status_flags, char *result, unsigned maxlen) |
269 | { |
270 | @@ -2835,9 +2840,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type, |
271 | if (cc->key_size > 0) { |
272 | if (cc->key_string) |
273 | DMEMIT(":%u:%s", cc->key_size, cc->key_string); |
274 | - else |
275 | - for (i = 0; i < cc->key_size; i++) |
276 | - DMEMIT("%02x", cc->key[i]); |
277 | + else { |
278 | + for (i = 0; i < cc->key_size; i++) { |
279 | + DMEMIT("%c%c", hex2asc(cc->key[i] >> 4), |
280 | + hex2asc(cc->key[i] & 0xf)); |
281 | + } |
282 | + } |
283 | } else |
284 | DMEMIT("-"); |
285 | |
286 | diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c |
287 | index 28a9eeae83b66..acbda91e7643c 100644 |
288 | --- a/drivers/md/dm-integrity.c |
289 | +++ b/drivers/md/dm-integrity.c |
290 | @@ -4149,8 +4149,6 @@ try_smaller_buffer: |
291 | } |
292 | |
293 | if (should_write_sb) { |
294 | - int r; |
295 | - |
296 | init_journal(ic, 0, ic->journal_sections, 0); |
297 | r = dm_integrity_failed(ic); |
298 | if (unlikely(r)) { |
299 | diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c |
300 | index 71417048256af..ce6d3bce1b7b0 100644 |
301 | --- a/drivers/md/dm-stats.c |
302 | +++ b/drivers/md/dm-stats.c |
303 | @@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats) |
304 | atomic_read(&shared->in_flight[READ]), |
305 | atomic_read(&shared->in_flight[WRITE])); |
306 | } |
307 | + cond_resched(); |
308 | } |
309 | dm_stat_free(&s->rcu_head); |
310 | } |
311 | @@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
312 | for (ni = 0; ni < n_entries; ni++) { |
313 | atomic_set(&s->stat_shared[ni].in_flight[READ], 0); |
314 | atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); |
315 | + cond_resched(); |
316 | } |
317 | |
318 | if (s->n_histogram_entries) { |
319 | @@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
320 | for (ni = 0; ni < n_entries; ni++) { |
321 | s->stat_shared[ni].tmp.histogram = hi; |
322 | hi += s->n_histogram_entries + 1; |
323 | + cond_resched(); |
324 | } |
325 | } |
326 | |
327 | @@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, |
328 | for (ni = 0; ni < n_entries; ni++) { |
329 | p[ni].histogram = hi; |
330 | hi += s->n_histogram_entries + 1; |
331 | + cond_resched(); |
332 | } |
333 | } |
334 | } |
335 | @@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program, |
336 | } |
337 | DMEMIT("\n"); |
338 | } |
339 | + cond_resched(); |
340 | } |
341 | mutex_unlock(&stats->mutex); |
342 | |
343 | @@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, |
344 | local_irq_enable(); |
345 | } |
346 | } |
347 | + cond_resched(); |
348 | } |
349 | } |
350 | |
351 | @@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id, |
352 | |
353 | if (unlikely(sz + 1 >= maxlen)) |
354 | goto buffer_overflow; |
355 | + |
356 | + cond_resched(); |
357 | } |
358 | |
359 | if (clear) |
360 | diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c |
361 | index 711f101447e3e..9dcdf34b7e32d 100644 |
362 | --- a/drivers/md/dm-verity-target.c |
363 | +++ b/drivers/md/dm-verity-target.c |
364 | @@ -1217,6 +1217,7 @@ bad: |
365 | |
366 | static struct target_type verity_target = { |
367 | .name = "verity", |
368 | + .features = DM_TARGET_IMMUTABLE, |
369 | .version = {1, 5, 0}, |
370 | .module = THIS_MODULE, |
371 | .ctr = verity_ctr, |
372 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
373 | index 08a7f97750f7a..c40327ad6ad53 100644 |
374 | --- a/drivers/md/raid5.c |
375 | +++ b/drivers/md/raid5.c |
376 | @@ -609,17 +609,17 @@ int raid5_calc_degraded(struct r5conf *conf) |
377 | return degraded; |
378 | } |
379 | |
380 | -static int has_failed(struct r5conf *conf) |
381 | +static bool has_failed(struct r5conf *conf) |
382 | { |
383 | - int degraded; |
384 | + int degraded = conf->mddev->degraded; |
385 | |
386 | - if (conf->mddev->reshape_position == MaxSector) |
387 | - return conf->mddev->degraded > conf->max_degraded; |
388 | + if (test_bit(MD_BROKEN, &conf->mddev->flags)) |
389 | + return true; |
390 | |
391 | - degraded = raid5_calc_degraded(conf); |
392 | - if (degraded > conf->max_degraded) |
393 | - return 1; |
394 | - return 0; |
395 | + if (conf->mddev->reshape_position != MaxSector) |
396 | + degraded = raid5_calc_degraded(conf); |
397 | + |
398 | + return degraded > conf->max_degraded; |
399 | } |
400 | |
401 | struct stripe_head * |
402 | @@ -2679,34 +2679,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) |
403 | unsigned long flags; |
404 | pr_debug("raid456: error called\n"); |
405 | |
406 | + pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n", |
407 | + mdname(mddev), bdevname(rdev->bdev, b)); |
408 | + |
409 | spin_lock_irqsave(&conf->device_lock, flags); |
410 | + set_bit(Faulty, &rdev->flags); |
411 | + clear_bit(In_sync, &rdev->flags); |
412 | + mddev->degraded = raid5_calc_degraded(conf); |
413 | |
414 | - if (test_bit(In_sync, &rdev->flags) && |
415 | - mddev->degraded == conf->max_degraded) { |
416 | - /* |
417 | - * Don't allow to achieve failed state |
418 | - * Don't try to recover this device |
419 | - */ |
420 | + if (has_failed(conf)) { |
421 | + set_bit(MD_BROKEN, &conf->mddev->flags); |
422 | conf->recovery_disabled = mddev->recovery_disabled; |
423 | - spin_unlock_irqrestore(&conf->device_lock, flags); |
424 | - return; |
425 | + |
426 | + pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n", |
427 | + mdname(mddev), mddev->degraded, conf->raid_disks); |
428 | + } else { |
429 | + pr_crit("md/raid:%s: Operation continuing on %d devices.\n", |
430 | + mdname(mddev), conf->raid_disks - mddev->degraded); |
431 | } |
432 | |
433 | - set_bit(Faulty, &rdev->flags); |
434 | - clear_bit(In_sync, &rdev->flags); |
435 | - mddev->degraded = raid5_calc_degraded(conf); |
436 | spin_unlock_irqrestore(&conf->device_lock, flags); |
437 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
438 | |
439 | set_bit(Blocked, &rdev->flags); |
440 | set_mask_bits(&mddev->sb_flags, 0, |
441 | BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); |
442 | - pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" |
443 | - "md/raid:%s: Operation continuing on %d devices.\n", |
444 | - mdname(mddev), |
445 | - bdevname(rdev->bdev, b), |
446 | - mdname(mddev), |
447 | - conf->raid_disks - mddev->degraded); |
448 | r5c_update_on_rdev_error(mddev, rdev); |
449 | } |
450 | |
451 | diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c |
452 | index 8d6b09623d884..6fba00e03c67b 100644 |
453 | --- a/drivers/media/platform/vim2m.c |
454 | +++ b/drivers/media/platform/vim2m.c |
455 | @@ -1333,12 +1333,6 @@ static int vim2m_probe(struct platform_device *pdev) |
456 | vfd->lock = &dev->dev_mutex; |
457 | vfd->v4l2_dev = &dev->v4l2_dev; |
458 | |
459 | - ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); |
460 | - if (ret) { |
461 | - v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); |
462 | - goto error_v4l2; |
463 | - } |
464 | - |
465 | video_set_drvdata(vfd, dev); |
466 | v4l2_info(&dev->v4l2_dev, |
467 | "Device registered as /dev/video%d\n", vfd->num); |
468 | @@ -1361,12 +1355,20 @@ static int vim2m_probe(struct platform_device *pdev) |
469 | media_device_init(&dev->mdev); |
470 | dev->mdev.ops = &m2m_media_ops; |
471 | dev->v4l2_dev.mdev = &dev->mdev; |
472 | +#endif |
473 | + |
474 | + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); |
475 | + if (ret) { |
476 | + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); |
477 | + goto error_m2m; |
478 | + } |
479 | |
480 | +#ifdef CONFIG_MEDIA_CONTROLLER |
481 | ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, |
482 | MEDIA_ENT_F_PROC_VIDEO_SCALER); |
483 | if (ret) { |
484 | v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n"); |
485 | - goto error_dev; |
486 | + goto error_v4l2; |
487 | } |
488 | |
489 | ret = media_device_register(&dev->mdev); |
490 | @@ -1381,11 +1383,13 @@ static int vim2m_probe(struct platform_device *pdev) |
491 | error_m2m_mc: |
492 | v4l2_m2m_unregister_media_controller(dev->m2m_dev); |
493 | #endif |
494 | -error_dev: |
495 | +error_v4l2: |
496 | video_unregister_device(&dev->vfd); |
497 | /* vim2m_device_release called by video_unregister_device to release various objects */ |
498 | return ret; |
499 | -error_v4l2: |
500 | +error_m2m: |
501 | + v4l2_m2m_release(dev->m2m_dev); |
502 | +error_dev: |
503 | v4l2_device_unregister(&dev->v4l2_dev); |
504 | error_free: |
505 | kfree(dev); |
506 | diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c |
507 | index 2c06cdcd3e751..d7478d332820e 100644 |
508 | --- a/drivers/net/ethernet/faraday/ftgmac100.c |
509 | +++ b/drivers/net/ethernet/faraday/ftgmac100.c |
510 | @@ -1880,6 +1880,11 @@ static int ftgmac100_probe(struct platform_device *pdev) |
511 | /* AST2400 doesn't have working HW checksum generation */ |
512 | if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) |
513 | netdev->hw_features &= ~NETIF_F_HW_CSUM; |
514 | + |
515 | + /* AST2600 tx checksum with NCSI is broken */ |
516 | + if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac")) |
517 | + netdev->hw_features &= ~NETIF_F_HW_CSUM; |
518 | + |
519 | if (np && of_get_property(np, "no-hw-checksum", NULL)) |
520 | netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); |
521 | netdev->features |= netdev->hw_features; |
522 | diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c |
523 | index 2801ca7062732..68a5b627fb9b2 100644 |
524 | --- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c |
525 | +++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c |
526 | @@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = { |
527 | SUNXI_FUNCTION(0x0, "gpio_in"), |
528 | SUNXI_FUNCTION(0x1, "gpio_out"), |
529 | SUNXI_FUNCTION(0x2, "lcd"), /* D20 */ |
530 | - SUNXI_FUNCTION(0x3, "lvds1"), /* RX */ |
531 | + SUNXI_FUNCTION(0x3, "uart2"), /* RX */ |
532 | SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), |
533 | SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15), |
534 | SUNXI_FUNCTION(0x0, "gpio_in"), |
535 | diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c |
536 | index d8d44fd9a92f4..ea2fd3a73c3a8 100644 |
537 | --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c |
538 | +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c |
539 | @@ -1351,9 +1351,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, |
540 | |
541 | sec_len = *(pos++); len-= 1; |
542 | |
543 | - if (sec_len>0 && sec_len<=len) { |
544 | + if (sec_len > 0 && |
545 | + sec_len <= len && |
546 | + sec_len <= 32) { |
547 | ssid[ssid_index].SsidLength = sec_len; |
548 | - memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); |
549 | + memcpy(ssid[ssid_index].Ssid, pos, sec_len); |
550 | /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */ |
551 | /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */ |
552 | ssid_index++; |
553 | diff --git a/fs/exec.c b/fs/exec.c |
554 | index 098de820abcc9..a7d78241082a2 100644 |
555 | --- a/fs/exec.c |
556 | +++ b/fs/exec.c |
557 | @@ -454,6 +454,9 @@ static int prepare_arg_pages(struct linux_binprm *bprm, |
558 | unsigned long limit, ptr_size; |
559 | |
560 | bprm->argc = count(argv, MAX_ARG_STRINGS); |
561 | + if (bprm->argc == 0) |
562 | + pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", |
563 | + current->comm, bprm->filename); |
564 | if (bprm->argc < 0) |
565 | return bprm->argc; |
566 | |
567 | @@ -482,8 +485,14 @@ static int prepare_arg_pages(struct linux_binprm *bprm, |
568 | * the stack. They aren't stored until much later when we can't |
569 | * signal to the parent that the child has run out of stack space. |
570 | * Instead, calculate it here so it's possible to fail gracefully. |
571 | + * |
572 | + * In the case of argc = 0, make sure there is space for adding a |
573 | + * empty string (which will bump argc to 1), to ensure confused |
574 | + * userspace programs don't start processing from argv[1], thinking |
575 | + * argc can never be 0, to keep them from walking envp by accident. |
576 | + * See do_execveat_common(). |
577 | */ |
578 | - ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); |
579 | + ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); |
580 | if (limit <= ptr_size) |
581 | return -E2BIG; |
582 | limit -= ptr_size; |
583 | @@ -1848,6 +1857,20 @@ static int __do_execve_file(int fd, struct filename *filename, |
584 | if (retval < 0) |
585 | goto out; |
586 | |
587 | + /* |
588 | + * When argv is empty, add an empty string ("") as argv[0] to |
589 | + * ensure confused userspace programs that start processing |
590 | + * from argv[1] won't end up walking envp. See also |
591 | + * bprm_stack_limits(). |
592 | + */ |
593 | + if (bprm->argc == 0) { |
594 | + const char *argv[] = { "", NULL }; |
595 | + retval = copy_strings_kernel(1, argv, bprm); |
596 | + if (retval < 0) |
597 | + goto out; |
598 | + bprm->argc = 1; |
599 | + } |
600 | + |
601 | retval = exec_binprm(bprm); |
602 | if (retval < 0) |
603 | goto out; |
604 | diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h |
605 | index 9e717796e57b7..a4dc182e8989b 100644 |
606 | --- a/fs/nfs/internal.h |
607 | +++ b/fs/nfs/internal.h |
608 | @@ -775,6 +775,7 @@ static inline bool nfs_error_is_fatal_on_server(int err) |
609 | case 0: |
610 | case -ERESTARTSYS: |
611 | case -EINTR: |
612 | + case -ENOMEM: |
613 | return false; |
614 | } |
615 | return nfs_error_is_fatal(err); |
616 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
617 | index 62eb78ac7437f..228c2b0753dcf 100644 |
618 | --- a/fs/nfsd/nfs4state.c |
619 | +++ b/fs/nfsd/nfs4state.c |
620 | @@ -6894,16 +6894,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, |
621 | if (sop->so_is_open_owner || !same_owner_str(sop, owner)) |
622 | continue; |
623 | |
624 | - /* see if there are still any locks associated with it */ |
625 | - lo = lockowner(sop); |
626 | - list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { |
627 | - if (check_for_locks(stp->st_stid.sc_file, lo)) { |
628 | - status = nfserr_locks_held; |
629 | - spin_unlock(&clp->cl_lock); |
630 | - return status; |
631 | - } |
632 | + if (atomic_read(&sop->so_count) != 1) { |
633 | + spin_unlock(&clp->cl_lock); |
634 | + return nfserr_locks_held; |
635 | } |
636 | |
637 | + lo = lockowner(sop); |
638 | nfs4_get_stateowner(sop); |
639 | break; |
640 | } |
641 | diff --git a/include/linux/security.h b/include/linux/security.h |
642 | index 3f6b8195ae9eb..aa5c7141c8d17 100644 |
643 | --- a/include/linux/security.h |
644 | +++ b/include/linux/security.h |
645 | @@ -118,10 +118,12 @@ enum lockdown_reason { |
646 | LOCKDOWN_MMIOTRACE, |
647 | LOCKDOWN_DEBUGFS, |
648 | LOCKDOWN_XMON_WR, |
649 | + LOCKDOWN_DBG_WRITE_KERNEL, |
650 | LOCKDOWN_INTEGRITY_MAX, |
651 | LOCKDOWN_KCORE, |
652 | LOCKDOWN_KPROBES, |
653 | LOCKDOWN_BPF_READ, |
654 | + LOCKDOWN_DBG_READ_KERNEL, |
655 | LOCKDOWN_PERF, |
656 | LOCKDOWN_TRACEFS, |
657 | LOCKDOWN_XMON_RW, |
658 | diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h |
659 | index a1869a6789448..a186c245a6f41 100644 |
660 | --- a/include/net/inet_hashtables.h |
661 | +++ b/include/net/inet_hashtables.h |
662 | @@ -420,7 +420,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) |
663 | } |
664 | |
665 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
666 | - struct sock *sk, u32 port_offset, |
667 | + struct sock *sk, u64 port_offset, |
668 | int (*check_established)(struct inet_timewait_death_row *, |
669 | struct sock *, __u16, |
670 | struct inet_timewait_sock **)); |
671 | diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h |
672 | index 09f2efea0b970..5805fe4947f3c 100644 |
673 | --- a/include/net/netfilter/nf_conntrack_core.h |
674 | +++ b/include/net/netfilter/nf_conntrack_core.h |
675 | @@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) |
676 | int ret = NF_ACCEPT; |
677 | |
678 | if (ct) { |
679 | - if (!nf_ct_is_confirmed(ct)) |
680 | + if (!nf_ct_is_confirmed(ct)) { |
681 | ret = __nf_conntrack_confirm(skb); |
682 | + |
683 | + if (ret == NF_ACCEPT) |
684 | + ct = (struct nf_conn *)skb_nfct(skb); |
685 | + } |
686 | + |
687 | if (likely(ret == NF_ACCEPT)) |
688 | nf_ct_deliver_cached_events(ct); |
689 | } |
690 | diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h |
691 | index d7d2495f83c27..dac91aa38c5af 100644 |
692 | --- a/include/net/secure_seq.h |
693 | +++ b/include/net/secure_seq.h |
694 | @@ -4,8 +4,8 @@ |
695 | |
696 | #include <linux/types.h> |
697 | |
698 | -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); |
699 | -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
700 | +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); |
701 | +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
702 | __be16 dport); |
703 | u32 secure_tcp_seq(__be32 saddr, __be32 daddr, |
704 | __be16 sport, __be16 dport); |
705 | diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c |
706 | index 097ab02989f92..565987557ad89 100644 |
707 | --- a/kernel/debug/debug_core.c |
708 | +++ b/kernel/debug/debug_core.c |
709 | @@ -56,6 +56,7 @@ |
710 | #include <linux/vmacache.h> |
711 | #include <linux/rcupdate.h> |
712 | #include <linux/irq.h> |
713 | +#include <linux/security.h> |
714 | |
715 | #include <asm/cacheflush.h> |
716 | #include <asm/byteorder.h> |
717 | @@ -685,6 +686,29 @@ cpu_master_loop: |
718 | continue; |
719 | kgdb_connected = 0; |
720 | } else { |
721 | + /* |
722 | + * This is a brutal way to interfere with the debugger |
723 | + * and prevent gdb being used to poke at kernel memory. |
724 | + * This could cause trouble if lockdown is applied when |
725 | + * there is already an active gdb session. For now the |
726 | + * answer is simply "don't do that". Typically lockdown |
727 | + * *will* be applied before the debug core gets started |
728 | + * so only developers using kgdb for fairly advanced |
729 | + * early kernel debug can be biten by this. Hopefully |
730 | + * they are sophisticated enough to take care of |
731 | + * themselves, especially with help from the lockdown |
732 | + * message printed on the console! |
733 | + */ |
734 | + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { |
735 | + if (IS_ENABLED(CONFIG_KGDB_KDB)) { |
736 | + /* Switch back to kdb if possible... */ |
737 | + dbg_kdb_mode = 1; |
738 | + continue; |
739 | + } else { |
740 | + /* ... otherwise just bail */ |
741 | + break; |
742 | + } |
743 | + } |
744 | error = gdb_serial_stub(ks); |
745 | } |
746 | |
747 | diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c |
748 | index 4567fe998c306..7c96bf9a6c2c2 100644 |
749 | --- a/kernel/debug/kdb/kdb_main.c |
750 | +++ b/kernel/debug/kdb/kdb_main.c |
751 | @@ -45,6 +45,7 @@ |
752 | #include <linux/proc_fs.h> |
753 | #include <linux/uaccess.h> |
754 | #include <linux/slab.h> |
755 | +#include <linux/security.h> |
756 | #include "kdb_private.h" |
757 | |
758 | #undef MODULE_PARAM_PREFIX |
759 | @@ -198,10 +199,62 @@ struct task_struct *kdb_curr_task(int cpu) |
760 | } |
761 | |
762 | /* |
763 | - * Check whether the flags of the current command and the permissions |
764 | - * of the kdb console has allow a command to be run. |
765 | + * Update the permissions flags (kdb_cmd_enabled) to match the |
766 | + * current lockdown state. |
767 | + * |
768 | + * Within this function the calls to security_locked_down() are "lazy". We |
769 | + * avoid calling them if the current value of kdb_cmd_enabled already excludes |
770 | + * flags that might be subject to lockdown. Additionally we deliberately check |
771 | + * the lockdown flags independently (even though read lockdown implies write |
772 | + * lockdown) since that results in both simpler code and clearer messages to |
773 | + * the user on first-time debugger entry. |
774 | + * |
775 | + * The permission masks during a read+write lockdown permits the following |
776 | + * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). |
777 | + * |
778 | + * The INSPECT commands are not blocked during lockdown because they are |
779 | + * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes |
780 | + * forcing them to have no arguments) and lsmod. These commands do expose |
781 | + * some kernel state but do not allow the developer seated at the console to |
782 | + * choose what state is reported. SIGNAL and REBOOT should not be controversial, |
783 | + * given these are allowed for root during lockdown already. |
784 | + */ |
785 | +static void kdb_check_for_lockdown(void) |
786 | +{ |
787 | + const int write_flags = KDB_ENABLE_MEM_WRITE | |
788 | + KDB_ENABLE_REG_WRITE | |
789 | + KDB_ENABLE_FLOW_CTRL; |
790 | + const int read_flags = KDB_ENABLE_MEM_READ | |
791 | + KDB_ENABLE_REG_READ; |
792 | + |
793 | + bool need_to_lockdown_write = false; |
794 | + bool need_to_lockdown_read = false; |
795 | + |
796 | + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) |
797 | + need_to_lockdown_write = |
798 | + security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); |
799 | + |
800 | + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) |
801 | + need_to_lockdown_read = |
802 | + security_locked_down(LOCKDOWN_DBG_READ_KERNEL); |
803 | + |
804 | + /* De-compose KDB_ENABLE_ALL if required */ |
805 | + if (need_to_lockdown_write || need_to_lockdown_read) |
806 | + if (kdb_cmd_enabled & KDB_ENABLE_ALL) |
807 | + kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; |
808 | + |
809 | + if (need_to_lockdown_write) |
810 | + kdb_cmd_enabled &= ~write_flags; |
811 | + |
812 | + if (need_to_lockdown_read) |
813 | + kdb_cmd_enabled &= ~read_flags; |
814 | +} |
815 | + |
816 | +/* |
817 | + * Check whether the flags of the current command, the permissions of the kdb |
818 | + * console and the lockdown state allow a command to be run. |
819 | */ |
820 | -static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, |
821 | +static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, |
822 | bool no_args) |
823 | { |
824 | /* permissions comes from userspace so needs massaging slightly */ |
825 | @@ -1188,6 +1241,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, |
826 | kdb_curr_task(raw_smp_processor_id()); |
827 | |
828 | KDB_DEBUG_STATE("kdb_local 1", reason); |
829 | + |
830 | + kdb_check_for_lockdown(); |
831 | + |
832 | kdb_go_count = 0; |
833 | if (reason == KDB_REASON_DEBUG) { |
834 | /* special case below */ |
835 | diff --git a/lib/assoc_array.c b/lib/assoc_array.c |
836 | index 6f4bcf5245547..b537a83678e11 100644 |
837 | --- a/lib/assoc_array.c |
838 | +++ b/lib/assoc_array.c |
839 | @@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array, |
840 | struct assoc_array_ptr *cursor, *ptr; |
841 | struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; |
842 | unsigned long nr_leaves_on_tree; |
843 | + bool retained; |
844 | int keylen, slot, nr_free, next_slot, i; |
845 | |
846 | pr_devel("-->%s()\n", __func__); |
847 | @@ -1538,6 +1539,7 @@ continue_node: |
848 | goto descend; |
849 | } |
850 | |
851 | +retry_compress: |
852 | pr_devel("-- compress node %p --\n", new_n); |
853 | |
854 | /* Count up the number of empty slots in this node and work out the |
855 | @@ -1555,6 +1557,7 @@ continue_node: |
856 | pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); |
857 | |
858 | /* See what we can fold in */ |
859 | + retained = false; |
860 | next_slot = 0; |
861 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
862 | struct assoc_array_shortcut *s; |
863 | @@ -1604,9 +1607,14 @@ continue_node: |
864 | pr_devel("[%d] retain node %lu/%d [nx %d]\n", |
865 | slot, child->nr_leaves_on_branch, nr_free + 1, |
866 | next_slot); |
867 | + retained = true; |
868 | } |
869 | } |
870 | |
871 | + if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { |
872 | + pr_devel("internal nodes remain despite enough space, retrying\n"); |
873 | + goto retry_compress; |
874 | + } |
875 | pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); |
876 | |
877 | nr_leaves_on_tree = new_n->nr_leaves_on_branch; |
878 | diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c |
879 | index 490e5f3ae614a..6b100f02ee431 100644 |
880 | --- a/mm/zsmalloc.c |
881 | +++ b/mm/zsmalloc.c |
882 | @@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspage(struct size_class *class, |
883 | */ |
884 | static void lock_zspage(struct zspage *zspage) |
885 | { |
886 | - struct page *page = get_first_page(zspage); |
887 | + struct page *curr_page, *page; |
888 | |
889 | - do { |
890 | - lock_page(page); |
891 | - } while ((page = get_next_page(page)) != NULL); |
892 | + /* |
893 | + * Pages we haven't locked yet can be migrated off the list while we're |
894 | + * trying to lock them, so we need to be careful and only attempt to |
895 | + * lock each page under migrate_read_lock(). Otherwise, the page we lock |
896 | + * may no longer belong to the zspage. This means that we may wait for |
897 | + * the wrong page to unlock, so we must take a reference to the page |
898 | + * prior to waiting for it to unlock outside migrate_read_lock(). |
899 | + */ |
900 | + while (1) { |
901 | + migrate_read_lock(zspage); |
902 | + page = get_first_page(zspage); |
903 | + if (trylock_page(page)) |
904 | + break; |
905 | + get_page(page); |
906 | + migrate_read_unlock(zspage); |
907 | + wait_on_page_locked(page); |
908 | + put_page(page); |
909 | + } |
910 | + |
911 | + curr_page = page; |
912 | + while ((page = get_next_page(curr_page))) { |
913 | + if (trylock_page(page)) { |
914 | + curr_page = page; |
915 | + } else { |
916 | + get_page(page); |
917 | + migrate_read_unlock(zspage); |
918 | + wait_on_page_locked(page); |
919 | + put_page(page); |
920 | + migrate_read_lock(zspage); |
921 | + } |
922 | + } |
923 | + migrate_read_unlock(zspage); |
924 | } |
925 | |
926 | static int zs_init_fs_context(struct fs_context *fc) |
927 | diff --git a/net/core/filter.c b/net/core/filter.c |
928 | index e16b2b5cda981..b0df4ddbe30c3 100644 |
929 | --- a/net/core/filter.c |
930 | +++ b/net/core/filter.c |
931 | @@ -1668,7 +1668,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, |
932 | |
933 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
934 | return -EINVAL; |
935 | - if (unlikely(offset > 0xffff)) |
936 | + if (unlikely(offset > INT_MAX)) |
937 | return -EFAULT; |
938 | if (unlikely(bpf_try_make_writable(skb, offset + len))) |
939 | return -EFAULT; |
940 | @@ -1703,7 +1703,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, |
941 | { |
942 | void *ptr; |
943 | |
944 | - if (unlikely(offset > 0xffff)) |
945 | + if (unlikely(offset > INT_MAX)) |
946 | goto err_clear; |
947 | |
948 | ptr = skb_header_pointer(skb, offset, len, to); |
949 | diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c |
950 | index 2f9796a1a63ff..a1867c65ac632 100644 |
951 | --- a/net/core/secure_seq.c |
952 | +++ b/net/core/secure_seq.c |
953 | @@ -97,7 +97,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, |
954 | } |
955 | EXPORT_SYMBOL(secure_tcpv6_seq); |
956 | |
957 | -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
958 | +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
959 | __be16 dport) |
960 | { |
961 | const struct { |
962 | @@ -147,7 +147,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr, |
963 | } |
964 | EXPORT_SYMBOL_GPL(secure_tcp_seq); |
965 | |
966 | -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
967 | +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) |
968 | { |
969 | net_secret_init(); |
970 | return siphash_4u32((__force u32)saddr, (__force u32)daddr, |
971 | diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c |
972 | index cbbeb0eea0c35..959f4f0c85460 100644 |
973 | --- a/net/ipv4/inet_hashtables.c |
974 | +++ b/net/ipv4/inet_hashtables.c |
975 | @@ -464,7 +464,7 @@ not_unique: |
976 | return -EADDRNOTAVAIL; |
977 | } |
978 | |
979 | -static u32 inet_sk_port_offset(const struct sock *sk) |
980 | +static u64 inet_sk_port_offset(const struct sock *sk) |
981 | { |
982 | const struct inet_sock *inet = inet_sk(sk); |
983 | |
984 | @@ -671,8 +671,19 @@ unlock: |
985 | } |
986 | EXPORT_SYMBOL_GPL(inet_unhash); |
987 | |
988 | +/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm |
989 | + * Note that we use 32bit integers (vs RFC 'short integers') |
990 | + * because 2^16 is not a multiple of num_ephemeral and this |
991 | + * property might be used by clever attacker. |
992 | + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, |
993 | + * we use 256 instead to really give more isolation and |
994 | + * privacy, this only consumes 1 KB of kernel memory. |
995 | + */ |
996 | +#define INET_TABLE_PERTURB_SHIFT 8 |
997 | +static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; |
998 | + |
999 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
1000 | - struct sock *sk, u32 port_offset, |
1001 | + struct sock *sk, u64 port_offset, |
1002 | int (*check_established)(struct inet_timewait_death_row *, |
1003 | struct sock *, __u16, struct inet_timewait_sock **)) |
1004 | { |
1005 | @@ -684,8 +695,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
1006 | struct inet_bind_bucket *tb; |
1007 | u32 remaining, offset; |
1008 | int ret, i, low, high; |
1009 | - static u32 hint; |
1010 | int l3mdev; |
1011 | + u32 index; |
1012 | |
1013 | if (port) { |
1014 | head = &hinfo->bhash[inet_bhashfn(net, port, |
1015 | @@ -712,7 +723,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
1016 | if (likely(remaining > 1)) |
1017 | remaining &= ~1U; |
1018 | |
1019 | - offset = (hint + port_offset) % remaining; |
1020 | + net_get_random_once(table_perturb, sizeof(table_perturb)); |
1021 | + index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); |
1022 | + |
1023 | + offset = READ_ONCE(table_perturb[index]) + port_offset; |
1024 | + offset %= remaining; |
1025 | + |
1026 | /* In first pass we try ports of @low parity. |
1027 | * inet_csk_get_port() does the opposite choice. |
1028 | */ |
1029 | @@ -766,7 +782,7 @@ next_port: |
1030 | return -EADDRNOTAVAIL; |
1031 | |
1032 | ok: |
1033 | - hint += i + 2; |
1034 | + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); |
1035 | |
1036 | /* Head lock still held and bh's disabled */ |
1037 | inet_bind_hash(sk, tb, port); |
1038 | @@ -789,7 +805,7 @@ ok: |
1039 | int inet_hash_connect(struct inet_timewait_death_row *death_row, |
1040 | struct sock *sk) |
1041 | { |
1042 | - u32 port_offset = 0; |
1043 | + u64 port_offset = 0; |
1044 | |
1045 | if (!inet_sk(sk)->inet_num) |
1046 | port_offset = inet_sk_port_offset(sk); |
1047 | diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c |
1048 | index ab12e00f6bfff..528c78bc920e0 100644 |
1049 | --- a/net/ipv6/inet6_hashtables.c |
1050 | +++ b/net/ipv6/inet6_hashtables.c |
1051 | @@ -262,7 +262,7 @@ not_unique: |
1052 | return -EADDRNOTAVAIL; |
1053 | } |
1054 | |
1055 | -static u32 inet6_sk_port_offset(const struct sock *sk) |
1056 | +static u64 inet6_sk_port_offset(const struct sock *sk) |
1057 | { |
1058 | const struct inet_sock *inet = inet_sk(sk); |
1059 | |
1060 | @@ -274,7 +274,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk) |
1061 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
1062 | struct sock *sk) |
1063 | { |
1064 | - u32 port_offset = 0; |
1065 | + u64 port_offset = 0; |
1066 | |
1067 | if (!inet_sk(sk)->inet_num) |
1068 | port_offset = inet6_sk_port_offset(sk); |
1069 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
1070 | index f67d3ba72c496..dd064d5eff6ed 100644 |
1071 | --- a/net/key/af_key.c |
1072 | +++ b/net/key/af_key.c |
1073 | @@ -2904,7 +2904,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) |
1074 | break; |
1075 | if (!aalg->pfkey_supported) |
1076 | continue; |
1077 | - if (aalg_tmpl_set(t, aalg)) |
1078 | + if (aalg_tmpl_set(t, aalg) && aalg->available) |
1079 | sz += sizeof(struct sadb_comb); |
1080 | } |
1081 | return sz + sizeof(struct sadb_prop); |
1082 | @@ -2922,7 +2922,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) |
1083 | if (!ealg->pfkey_supported) |
1084 | continue; |
1085 | |
1086 | - if (!(ealg_tmpl_set(t, ealg))) |
1087 | + if (!(ealg_tmpl_set(t, ealg) && ealg->available)) |
1088 | continue; |
1089 | |
1090 | for (k = 1; ; k++) { |
1091 | @@ -2933,7 +2933,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) |
1092 | if (!aalg->pfkey_supported) |
1093 | continue; |
1094 | |
1095 | - if (aalg_tmpl_set(t, aalg)) |
1096 | + if (aalg_tmpl_set(t, aalg) && aalg->available) |
1097 | sz += sizeof(struct sadb_comb); |
1098 | } |
1099 | } |
1100 | diff --git a/net/wireless/core.c b/net/wireless/core.c |
1101 | index 5d151e8f89320..f7228afd81ebd 100644 |
1102 | --- a/net/wireless/core.c |
1103 | +++ b/net/wireless/core.c |
1104 | @@ -5,7 +5,7 @@ |
1105 | * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> |
1106 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
1107 | * Copyright 2015-2017 Intel Deutschland GmbH |
1108 | - * Copyright (C) 2018-2019 Intel Corporation |
1109 | + * Copyright (C) 2018-2021 Intel Corporation |
1110 | */ |
1111 | |
1112 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
1113 | @@ -891,9 +891,6 @@ int wiphy_register(struct wiphy *wiphy) |
1114 | return res; |
1115 | } |
1116 | |
1117 | - /* set up regulatory info */ |
1118 | - wiphy_regulatory_register(wiphy); |
1119 | - |
1120 | list_add_rcu(&rdev->list, &cfg80211_rdev_list); |
1121 | cfg80211_rdev_list_generation++; |
1122 | |
1123 | @@ -904,6 +901,9 @@ int wiphy_register(struct wiphy *wiphy) |
1124 | cfg80211_debugfs_rdev_add(rdev); |
1125 | nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); |
1126 | |
1127 | + /* set up regulatory info */ |
1128 | + wiphy_regulatory_register(wiphy); |
1129 | + |
1130 | if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { |
1131 | struct regulatory_request request; |
1132 | |
1133 | diff --git a/net/wireless/reg.c b/net/wireless/reg.c |
1134 | index 0f3b57a73670b..74caece779633 100644 |
1135 | --- a/net/wireless/reg.c |
1136 | +++ b/net/wireless/reg.c |
1137 | @@ -3790,6 +3790,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy) |
1138 | |
1139 | wiphy_update_regulatory(wiphy, lr->initiator); |
1140 | wiphy_all_share_dfs_chan_state(wiphy); |
1141 | + reg_process_self_managed_hints(); |
1142 | } |
1143 | |
1144 | void wiphy_regulatory_deregister(struct wiphy *wiphy) |
1145 | diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c |
1146 | index 3f38583bed06f..655a6edb5d7f9 100644 |
1147 | --- a/security/lockdown/lockdown.c |
1148 | +++ b/security/lockdown/lockdown.c |
1149 | @@ -33,10 +33,12 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { |
1150 | [LOCKDOWN_MMIOTRACE] = "unsafe mmio", |
1151 | [LOCKDOWN_DEBUGFS] = "debugfs access", |
1152 | [LOCKDOWN_XMON_WR] = "xmon write access", |
1153 | + [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", |
1154 | [LOCKDOWN_INTEGRITY_MAX] = "integrity", |
1155 | [LOCKDOWN_KCORE] = "/proc/kcore access", |
1156 | [LOCKDOWN_KPROBES] = "use of kprobes", |
1157 | [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", |
1158 | + [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", |
1159 | [LOCKDOWN_PERF] = "unsafe use of perf", |
1160 | [LOCKDOWN_TRACEFS] = "use of tracefs", |
1161 | [LOCKDOWN_XMON_RW] = "xmon read and write access", |