Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0204-5.4.105-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (hide annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (20 months ago) by niro
File size: 45213 byte(s)
-sync kernel patches
1 niro 3635 diff --git a/Makefile b/Makefile
2     index e94dcf2d77f55..e27d031f3241e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 104
10     +SUBLEVEL = 105
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
15     index 8def0e3d690fd..b0b9bb31c3364 100644
16     --- a/drivers/acpi/acpica/acobject.h
17     +++ b/drivers/acpi/acpica/acobject.h
18     @@ -283,6 +283,7 @@ struct acpi_object_addr_handler {
19     acpi_adr_space_handler handler;
20     struct acpi_namespace_node *node; /* Parent device */
21     void *context;
22     + acpi_mutex context_mutex;
23     acpi_adr_space_setup setup;
24     union acpi_operand_object *region_list; /* Regions using this handler */
25     union acpi_operand_object *next;
26     diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
27     index 3ef4e27995f0d..78550f5004c9e 100644
28     --- a/drivers/acpi/acpica/evhandler.c
29     +++ b/drivers/acpi/acpica/evhandler.c
30     @@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
31    
32     /* Init handler obj */
33    
34     + status =
35     + acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
36     + if (ACPI_FAILURE(status)) {
37     + acpi_ut_remove_reference(handler_obj);
38     + goto unlock_and_exit;
39     + }
40     +
41     handler_obj->address_space.space_id = (u8)space_id;
42     handler_obj->address_space.handler_flags = flags;
43     handler_obj->address_space.region_list = NULL;
44     diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
45     index 45dc797df05de..50782033012b2 100644
46     --- a/drivers/acpi/acpica/evregion.c
47     +++ b/drivers/acpi/acpica/evregion.c
48     @@ -111,6 +111,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
49     union acpi_operand_object *region_obj2;
50     void *region_context = NULL;
51     struct acpi_connection_info *context;
52     + acpi_mutex context_mutex;
53     + u8 context_locked;
54     acpi_physical_address address;
55    
56     ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
57     @@ -135,6 +137,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
58     }
59    
60     context = handler_desc->address_space.context;
61     + context_mutex = handler_desc->address_space.context_mutex;
62     + context_locked = FALSE;
63    
64     /*
65     * It may be the case that the region has never been initialized.
66     @@ -203,6 +207,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
67     handler = handler_desc->address_space.handler;
68     address = (region_obj->region.address + region_offset);
69    
70     + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
71     + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
72     + &region_obj->region.handler->address_space, handler,
73     + ACPI_FORMAT_UINT64(address),
74     + acpi_ut_get_region_name(region_obj->region.
75     + space_id)));
76     +
77     + if (!(handler_desc->address_space.handler_flags &
78     + ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
79     + /*
80     + * For handlers other than the default (supplied) handlers, we must
81     + * exit the interpreter because the handler *might* block -- we don't
82     + * know what it will do, so we can't hold the lock on the interpreter.
83     + */
84     + acpi_ex_exit_interpreter();
85     + }
86     +
87     /*
88     * Special handling for generic_serial_bus and general_purpose_io:
89     * There are three extra parameters that must be passed to the
90     @@ -211,6 +232,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
91     * 2) Length of the above buffer
92     * 3) Actual access length from the access_as() op
93     *
94     + * Since we pass these extra parameters via the context, which is
95     + * shared between threads, we must lock the context to avoid these
96     + * parameters being changed from another thread before the handler
97     + * has completed running.
98     + *
99     * In addition, for general_purpose_io, the Address and bit_width fields
100     * are defined as follows:
101     * 1) Address is the pin number index of the field (bit offset from
102     @@ -220,6 +246,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
103     if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
104     context && field_obj) {
105    
106     + status =
107     + acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
108     + if (ACPI_FAILURE(status)) {
109     + goto re_enter_interpreter;
110     + }
111     +
112     + context_locked = TRUE;
113     +
114     /* Get the Connection (resource_template) buffer */
115    
116     context->connection = field_obj->field.resource_buffer;
117     @@ -229,6 +263,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
118     if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
119     context && field_obj) {
120    
121     + status =
122     + acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
123     + if (ACPI_FAILURE(status)) {
124     + goto re_enter_interpreter;
125     + }
126     +
127     + context_locked = TRUE;
128     +
129     /* Get the Connection (resource_template) buffer */
130    
131     context->connection = field_obj->field.resource_buffer;
132     @@ -238,28 +280,15 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
133     bit_width = field_obj->field.bit_length;
134     }
135    
136     - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
137     - "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
138     - &region_obj->region.handler->address_space, handler,
139     - ACPI_FORMAT_UINT64(address),
140     - acpi_ut_get_region_name(region_obj->region.
141     - space_id)));
142     -
143     - if (!(handler_desc->address_space.handler_flags &
144     - ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
145     - /*
146     - * For handlers other than the default (supplied) handlers, we must
147     - * exit the interpreter because the handler *might* block -- we don't
148     - * know what it will do, so we can't hold the lock on the interpreter.
149     - */
150     - acpi_ex_exit_interpreter();
151     - }
152     -
153     /* Call the handler */
154    
155     status = handler(function, address, bit_width, value, context,
156     region_obj2->extra.region_context);
157    
158     + if (context_locked) {
159     + acpi_os_release_mutex(context_mutex);
160     + }
161     +
162     if (ACPI_FAILURE(status)) {
163     ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
164     acpi_ut_get_region_name(region_obj->region.
165     @@ -276,6 +305,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
166     }
167     }
168    
169     +re_enter_interpreter:
170     if (!(handler_desc->address_space.handler_flags &
171     ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
172     /*
173     diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
174     index 47265b073e6ff..6e0d2a98c4ade 100644
175     --- a/drivers/acpi/acpica/evxfregn.c
176     +++ b/drivers/acpi/acpica/evxfregn.c
177     @@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
178    
179     /* Now we can delete the handler object */
180    
181     + acpi_os_release_mutex(handler_obj->address_space.
182     + context_mutex);
183     acpi_ut_remove_reference(handler_obj);
184     goto unlock_and_exit;
185     }
186     diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
187     index 55af78b55c513..301ffe5b8feb0 100644
188     --- a/drivers/acpi/video_detect.c
189     +++ b/drivers/acpi/video_detect.c
190     @@ -143,6 +143,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
191     },
192     {
193     .callback = video_detect_force_vendor,
194     + .ident = "GIGABYTE GB-BXBT-2807",
195     + .matches = {
196     + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
197     + DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
198     + },
199     + },
200     + {
201     .ident = "Sony VPCEH3U1E",
202     .matches = {
203     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
204     diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
205     index c8fb21cc0d6ff..f84049119f1c1 100644
206     --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
207     +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
208     @@ -581,8 +581,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
209     if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
210     gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
211    
212     - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
213     -
214     /* Enable USE_RETENTION_FLOPS */
215     gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
216    
217     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
218     index 33183933337af..d004f5645b30c 100644
219     --- a/drivers/hid/hid-ids.h
220     +++ b/drivers/hid/hid-ids.h
221     @@ -365,6 +365,7 @@
222     #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
223     #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843
224     #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844
225     +#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846
226    
227     #define USB_VENDOR_ID_DWAV 0x0eef
228     #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
229     @@ -640,6 +641,8 @@
230     #define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
231    
232     #define USB_VENDOR_ID_ITE 0x048d
233     +#define I2C_VENDOR_ID_ITE 0x103c
234     +#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f
235     #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
236     #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
237     #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
238     diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
239     index fc75f30f537c9..92d7ecd41a78f 100644
240     --- a/drivers/hid/hid-mf.c
241     +++ b/drivers/hid/hid-mf.c
242     @@ -153,6 +153,8 @@ static const struct hid_device_id mf_devices[] = {
243     .driver_data = HID_QUIRK_MULTI_INPUT },
244     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2),
245     .driver_data = 0 }, /* No quirk required */
246     + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3),
247     + .driver_data = HID_QUIRK_MULTI_INPUT },
248     { }
249     };
250     MODULE_DEVICE_TABLE(hid, mf_devices);
251     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
252     index 60d188a704e5e..f35d919c4ebab 100644
253     --- a/drivers/hid/hid-quirks.c
254     +++ b/drivers/hid/hid-quirks.c
255     @@ -72,6 +72,7 @@ static const struct hid_device_id hid_quirks[] = {
256     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
257     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
258     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
259     + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT },
260     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
261     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
262     { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
263     @@ -491,6 +492,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
264     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
265     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
266     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
267     + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) },
268     #endif
269     #if IS_ENABLED(CONFIG_HID_MICROSOFT)
270     { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
271     diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
272     index 592176aff0270..96898983db990 100644
273     --- a/drivers/hid/i2c-hid/i2c-hid-core.c
274     +++ b/drivers/hid/i2c-hid/i2c-hid-core.c
275     @@ -173,6 +173,8 @@ static const struct i2c_hid_quirks {
276     I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
277     { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
278     I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
279     + { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
280     + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
281     { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
282     I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
283     { USB_VENDOR_ID_ELAN, HID_ANY_ID,
284     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
285     index 7b724f7b27a99..c392930253a30 100644
286     --- a/drivers/iommu/amd_iommu.c
287     +++ b/drivers/iommu/amd_iommu.c
288     @@ -1469,25 +1469,27 @@ static bool increase_address_space(struct protection_domain *domain,
289     bool ret = false;
290     u64 *pte;
291    
292     + pte = (void *)get_zeroed_page(gfp);
293     + if (!pte)
294     + return false;
295     +
296     spin_lock_irqsave(&domain->lock, flags);
297    
298     if (address <= PM_LEVEL_SIZE(domain->mode) ||
299     WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
300     goto out;
301    
302     - pte = (void *)get_zeroed_page(gfp);
303     - if (!pte)
304     - goto out;
305     -
306     *pte = PM_LEVEL_PDE(domain->mode,
307     iommu_virt_to_phys(domain->pt_root));
308     domain->pt_root = pte;
309     domain->mode += 1;
310    
311     + pte = NULL;
312     ret = true;
313    
314     out:
315     spin_unlock_irqrestore(&domain->lock, flags);
316     + free_page((unsigned long)pte);
317    
318     return ret;
319     }
320     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
321     index 98be040cf958c..06b382304d926 100644
322     --- a/drivers/md/dm-table.c
323     +++ b/drivers/md/dm-table.c
324     @@ -888,24 +888,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
325     EXPORT_SYMBOL_GPL(dm_table_set_type);
326    
327     /* validate the dax capability of the target device span */
328     -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
329     +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
330     sector_t start, sector_t len, void *data)
331     {
332     int blocksize = *(int *) data, id;
333     bool rc;
334    
335     id = dax_read_lock();
336     - rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
337     + rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
338     dax_read_unlock(id);
339    
340     return rc;
341     }
342    
343     /* Check devices support synchronous DAX */
344     -static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
345     - sector_t start, sector_t len, void *data)
346     +static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
347     + sector_t start, sector_t len, void *data)
348     {
349     - return dev->dax_dev && dax_synchronous(dev->dax_dev);
350     + return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
351     }
352    
353     bool dm_table_supports_dax(struct dm_table *t,
354     @@ -922,7 +922,7 @@ bool dm_table_supports_dax(struct dm_table *t,
355     return false;
356    
357     if (!ti->type->iterate_devices ||
358     - !ti->type->iterate_devices(ti, iterate_fn, blocksize))
359     + ti->type->iterate_devices(ti, iterate_fn, blocksize))
360     return false;
361     }
362    
363     @@ -996,7 +996,7 @@ static int dm_table_determine_type(struct dm_table *t)
364     verify_bio_based:
365     /* We must use this table as bio-based */
366     t->type = DM_TYPE_BIO_BASED;
367     - if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
368     + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
369     (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
370     t->type = DM_TYPE_DAX_BIO_BASED;
371     } else {
372     @@ -1376,6 +1376,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
373     return &t->targets[(KEYS_PER_NODE * n) + k];
374     }
375    
376     +/*
377     + * type->iterate_devices() should be called when the sanity check needs to
378     + * iterate and check all underlying data devices. iterate_devices() will
379     + * iterate all underlying data devices until it encounters a non-zero return
380     + * code, returned by whether the input iterate_devices_callout_fn, or
381     + * iterate_devices() itself internally.
382     + *
383     + * For some target type (e.g. dm-stripe), one call of iterate_devices() may
384     + * iterate multiple underlying devices internally, in which case a non-zero
385     + * return code returned by iterate_devices_callout_fn will stop the iteration
386     + * in advance.
387     + *
388     + * Cases requiring _any_ underlying device supporting some kind of attribute,
389     + * should use the iteration structure like dm_table_any_dev_attr(), or call
390     + * it directly. @func should handle semantics of positive examples, e.g.
391     + * capable of something.
392     + *
393     + * Cases requiring _all_ underlying devices supporting some kind of attribute,
394     + * should use the iteration structure like dm_table_supports_nowait() or
395     + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
396     + * uses an @anti_func that handle semantics of counter examples, e.g. not
397     + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
398     + */
399     +static bool dm_table_any_dev_attr(struct dm_table *t,
400     + iterate_devices_callout_fn func, void *data)
401     +{
402     + struct dm_target *ti;
403     + unsigned int i;
404     +
405     + for (i = 0; i < dm_table_get_num_targets(t); i++) {
406     + ti = dm_table_get_target(t, i);
407     +
408     + if (ti->type->iterate_devices &&
409     + ti->type->iterate_devices(ti, func, data))
410     + return true;
411     + }
412     +
413     + return false;
414     +}
415     +
416     static int count_device(struct dm_target *ti, struct dm_dev *dev,
417     sector_t start, sector_t len, void *data)
418     {
419     @@ -1412,13 +1452,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
420     return true;
421     }
422    
423     -static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
424     - sector_t start, sector_t len, void *data)
425     +static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
426     + sector_t start, sector_t len, void *data)
427     {
428     struct request_queue *q = bdev_get_queue(dev->bdev);
429     enum blk_zoned_model *zoned_model = data;
430    
431     - return q && blk_queue_zoned_model(q) == *zoned_model;
432     + return !q || blk_queue_zoned_model(q) != *zoned_model;
433     }
434    
435     static bool dm_table_supports_zoned_model(struct dm_table *t,
436     @@ -1435,37 +1475,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
437     return false;
438    
439     if (!ti->type->iterate_devices ||
440     - !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
441     + ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
442     return false;
443     }
444    
445     return true;
446     }
447    
448     -static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
449     - sector_t start, sector_t len, void *data)
450     +static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
451     + sector_t start, sector_t len, void *data)
452     {
453     struct request_queue *q = bdev_get_queue(dev->bdev);
454     unsigned int *zone_sectors = data;
455    
456     - return q && blk_queue_zone_sectors(q) == *zone_sectors;
457     -}
458     -
459     -static bool dm_table_matches_zone_sectors(struct dm_table *t,
460     - unsigned int zone_sectors)
461     -{
462     - struct dm_target *ti;
463     - unsigned i;
464     -
465     - for (i = 0; i < dm_table_get_num_targets(t); i++) {
466     - ti = dm_table_get_target(t, i);
467     -
468     - if (!ti->type->iterate_devices ||
469     - !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
470     - return false;
471     - }
472     -
473     - return true;
474     + return !q || blk_queue_zone_sectors(q) != *zone_sectors;
475     }
476    
477     static int validate_hardware_zoned_model(struct dm_table *table,
478     @@ -1485,7 +1508,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
479     if (!zone_sectors || !is_power_of_2(zone_sectors))
480     return -EINVAL;
481    
482     - if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
483     + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
484     DMERR("%s: zone sectors is not consistent across all devices",
485     dm_device_name(table->md));
486     return -EINVAL;
487     @@ -1675,29 +1698,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
488     return false;
489     }
490    
491     -static int dm_table_supports_dax_write_cache(struct dm_table *t)
492     -{
493     - struct dm_target *ti;
494     - unsigned i;
495     -
496     - for (i = 0; i < dm_table_get_num_targets(t); i++) {
497     - ti = dm_table_get_target(t, i);
498     -
499     - if (ti->type->iterate_devices &&
500     - ti->type->iterate_devices(ti,
501     - device_dax_write_cache_enabled, NULL))
502     - return true;
503     - }
504     -
505     - return false;
506     -}
507     -
508     -static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
509     - sector_t start, sector_t len, void *data)
510     +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
511     + sector_t start, sector_t len, void *data)
512     {
513     struct request_queue *q = bdev_get_queue(dev->bdev);
514    
515     - return q && blk_queue_nonrot(q);
516     + return q && !blk_queue_nonrot(q);
517     }
518    
519     static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
520     @@ -1708,35 +1714,18 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
521     return q && !blk_queue_add_random(q);
522     }
523    
524     -static bool dm_table_all_devices_attribute(struct dm_table *t,
525     - iterate_devices_callout_fn func)
526     -{
527     - struct dm_target *ti;
528     - unsigned i;
529     -
530     - for (i = 0; i < dm_table_get_num_targets(t); i++) {
531     - ti = dm_table_get_target(t, i);
532     -
533     - if (!ti->type->iterate_devices ||
534     - !ti->type->iterate_devices(ti, func, NULL))
535     - return false;
536     - }
537     -
538     - return true;
539     -}
540     -
541     -static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
542     +static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
543     sector_t start, sector_t len, void *data)
544     {
545     char b[BDEVNAME_SIZE];
546    
547     /* For now, NVMe devices are the only devices of this class */
548     - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
549     + return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
550     }
551    
552     static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
553     {
554     - return dm_table_all_devices_attribute(t, device_no_partial_completion);
555     + return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
556     }
557    
558     static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
559     @@ -1863,27 +1852,6 @@ static int device_requires_stable_pages(struct dm_target *ti,
560     return q && bdi_cap_stable_pages_required(q->backing_dev_info);
561     }
562    
563     -/*
564     - * If any underlying device requires stable pages, a table must require
565     - * them as well. Only targets that support iterate_devices are considered:
566     - * don't want error, zero, etc to require stable pages.
567     - */
568     -static bool dm_table_requires_stable_pages(struct dm_table *t)
569     -{
570     - struct dm_target *ti;
571     - unsigned i;
572     -
573     - for (i = 0; i < dm_table_get_num_targets(t); i++) {
574     - ti = dm_table_get_target(t, i);
575     -
576     - if (ti->type->iterate_devices &&
577     - ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
578     - return true;
579     - }
580     -
581     - return false;
582     -}
583     -
584     void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
585     struct queue_limits *limits)
586     {
587     @@ -1916,22 +1884,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
588     }
589     blk_queue_write_cache(q, wc, fua);
590    
591     - if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
592     + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
593     blk_queue_flag_set(QUEUE_FLAG_DAX, q);
594     - if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
595     + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
596     set_dax_synchronous(t->md->dax_dev);
597     }
598     else
599     blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
600    
601     - if (dm_table_supports_dax_write_cache(t))
602     + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
603     dax_write_cache(t->md->dax_dev, true);
604    
605     /* Ensure that all underlying devices are non-rotational. */
606     - if (dm_table_all_devices_attribute(t, device_is_nonrot))
607     - blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
608     - else
609     + if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
610     blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
611     + else
612     + blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
613    
614     if (!dm_table_supports_write_same(t))
615     q->limits.max_write_same_sectors = 0;
616     @@ -1943,8 +1911,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
617     /*
618     * Some devices don't use blk_integrity but still want stable pages
619     * because they do their own checksumming.
620     + * If any underlying device requires stable pages, a table must require
621     + * them as well. Only targets that support iterate_devices are considered:
622     + * don't want error, zero, etc to require stable pages.
623     */
624     - if (dm_table_requires_stable_pages(t))
625     + if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
626     q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
627     else
628     q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
629     @@ -1955,7 +1926,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
630     * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
631     * have it set.
632     */
633     - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
634     + if (blk_queue_add_random(q) &&
635     + dm_table_any_dev_attr(t, device_is_not_random, NULL))
636     blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
637    
638     /*
639     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
640     index de32f8553735f..530c0fe142291 100644
641     --- a/drivers/md/dm.c
642     +++ b/drivers/md/dm.c
643     @@ -1139,7 +1139,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
644     if (!map)
645     goto out;
646    
647     - ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
648     + ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
649    
650     out:
651     dm_put_live_table(md, srcu_idx);
652     diff --git a/drivers/md/dm.h b/drivers/md/dm.h
653     index d7c4f6606b5fc..9fbf87e04019c 100644
654     --- a/drivers/md/dm.h
655     +++ b/drivers/md/dm.h
656     @@ -74,7 +74,7 @@ void dm_table_free_md_mempools(struct dm_table *t);
657     struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
658     bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
659     int *blocksize);
660     -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
661     +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
662     sector_t start, sector_t len, void *data);
663    
664     void dm_lock_md_type(struct mapped_device *md);
665     diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
666     index 7e0b0b7cc2a35..ead0acb7807c8 100644
667     --- a/drivers/media/pci/cx23885/cx23885-core.c
668     +++ b/drivers/media/pci/cx23885/cx23885-core.c
669     @@ -2074,6 +2074,10 @@ static struct {
670     * 0x1451 is PCI ID for the IOMMU found on Ryzen
671     */
672     { PCI_VENDOR_ID_AMD, 0x1451 },
673     + /* According to sudo lspci -nn,
674     + * 0x1423 is the PCI ID for the IOMMU found on Kaveri
675     + */
676     + { PCI_VENDOR_ID_AMD, 0x1423 },
677     };
678    
679     static bool cx23885_does_need_dma_reset(void)
680     diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
681     index 414dcbd3c3c25..8b355fc0607b7 100644
682     --- a/drivers/misc/eeprom/eeprom_93xx46.c
683     +++ b/drivers/misc/eeprom/eeprom_93xx46.c
684     @@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
685     EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
686     };
687    
688     +static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
689     + .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
690     +};
691     +
692     struct eeprom_93xx46_dev {
693     struct spi_device *spi;
694     struct eeprom_93xx46_platform_data *pdata;
695     @@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
696     return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
697     }
698    
699     +static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
700     +{
701     + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
702     +}
703     +
704     static int eeprom_93xx46_read(void *priv, unsigned int off,
705     void *val, size_t count)
706     {
707     @@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
708     dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
709     cmd_addr, edev->spi->max_speed_hz);
710    
711     + if (has_quirk_extra_read_cycle(edev)) {
712     + cmd_addr <<= 1;
713     + bits += 1;
714     + }
715     +
716     spi_message_init(&m);
717    
718     t[0].tx_buf = (char *)&cmd_addr;
719     @@ -363,6 +377,7 @@ static void select_deassert(void *context)
720     static const struct of_device_id eeprom_93xx46_of_table[] = {
721     { .compatible = "eeprom-93xx46", },
722     { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
723     + { .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
724     {}
725     };
726     MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
727     diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
728     index a5137845a1c78..6793fb8fe976b 100644
729     --- a/drivers/mmc/host/sdhci-of-dwcmshc.c
730     +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
731     @@ -58,6 +58,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
732     static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
733     .ops = &sdhci_dwcmshc_ops,
734     .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
735     + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
736     };
737    
738     static int dwcmshc_probe(struct platform_device *pdev)
739     diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
740     index fc1706d0647d7..58c9623c3a916 100644
741     --- a/drivers/net/wireless/marvell/mwifiex/pcie.c
742     +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
743     @@ -377,6 +377,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
744     clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
745     clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
746     mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
747     +
748     + card->pci_reset_ongoing = true;
749     }
750    
751     /*
752     @@ -405,6 +407,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
753     dev_err(&pdev->dev, "reinit failed: %d\n", ret);
754     else
755     mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
756     +
757     + card->pci_reset_ongoing = false;
758     }
759    
760     static const struct pci_error_handlers mwifiex_pcie_err_handler = {
761     @@ -2995,7 +2999,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
762     int ret;
763     u32 fw_status;
764    
765     - cancel_work_sync(&card->work);
766     + /* Perform the cancel_work_sync() only when we're not resetting
767     + * the card. It's because that function never returns if we're
768     + * in reset path. If we're here when resetting the card, it means
769     + * that we failed to reset the card (reset failure path).
770     + */
771     + if (!card->pci_reset_ongoing) {
772     + mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n");
773     + cancel_work_sync(&card->work);
774     + mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n");
775     + } else {
776     + mwifiex_dbg(adapter, MSG,
777     + "skipped cancel_work_sync() because we're in card reset failure path\n");
778     + }
779    
780     ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
781     if (fw_status == FIRMWARE_READY_PCIE) {
782     diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
783     index f7ce9b6db6b41..72d0c01ff3592 100644
784     --- a/drivers/net/wireless/marvell/mwifiex/pcie.h
785     +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
786     @@ -391,6 +391,8 @@ struct pcie_service_card {
787     struct mwifiex_msix_context share_irq_ctx;
788     struct work_struct work;
789     unsigned long work_flags;
790     +
791     + bool pci_reset_ongoing;
792     };
793    
794     static inline int
795     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
796     index abc342db3b337..fc18738dcf8ff 100644
797     --- a/drivers/nvme/host/pci.c
798     +++ b/drivers/nvme/host/pci.c
799     @@ -3164,7 +3164,8 @@ static const struct pci_device_id nvme_id_table[] = {
800     { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
801     .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
802     { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
803     - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
804     + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
805     + NVME_QUIRK_NO_NS_DESC_LIST, },
806     { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
807     .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
808     { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
809     @@ -3178,6 +3179,9 @@ static const struct pci_device_id nvme_id_table[] = {
810     NVME_QUIRK_IGNORE_DEV_SUBNQN, },
811     { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
812     .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
813     + { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
814     + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
815     + NVME_QUIRK_IGNORE_DEV_SUBNQN, },
816     { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
817     .driver_data = NVME_QUIRK_LIGHTNVM, },
818     { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
819     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
820     index c98067579e9f3..53376bcda1f3f 100644
821     --- a/drivers/pci/quirks.c
822     +++ b/drivers/pci/quirks.c
823     @@ -4055,6 +4055,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
824     /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
825     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
826     quirk_dma_func1_alias);
827     +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
828     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
829     + quirk_dma_func1_alias);
830     /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
831     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
832     quirk_dma_func1_alias);
833     diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
834     index 7fa27e7536917..d27a564389a47 100644
835     --- a/drivers/platform/x86/acer-wmi.c
836     +++ b/drivers/platform/x86/acer-wmi.c
837     @@ -30,6 +30,7 @@
838     #include <linux/input/sparse-keymap.h>
839     #include <acpi/video.h>
840    
841     +ACPI_MODULE_NAME(KBUILD_MODNAME);
842     MODULE_AUTHOR("Carlos Corbacho");
843     MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
844     MODULE_LICENSE("GPL");
845     @@ -80,7 +81,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
846    
847     enum acer_wmi_event_ids {
848     WMID_HOTKEY_EVENT = 0x1,
849     - WMID_ACCEL_EVENT = 0x5,
850     + WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
851     };
852    
853     static const struct key_entry acer_wmi_keymap[] __initconst = {
854     @@ -128,7 +129,9 @@ struct event_return_value {
855     u8 function;
856     u8 key_num;
857     u16 device_state;
858     - u32 reserved;
859     + u16 reserved1;
860     + u8 kbd_dock_state;
861     + u8 reserved2;
862     } __attribute__((packed));
863    
864     /*
865     @@ -206,14 +209,13 @@ struct hotkey_function_type_aa {
866     /*
867     * Interface capability flags
868     */
869     -#define ACER_CAP_MAILLED (1<<0)
870     -#define ACER_CAP_WIRELESS (1<<1)
871     -#define ACER_CAP_BLUETOOTH (1<<2)
872     -#define ACER_CAP_BRIGHTNESS (1<<3)
873     -#define ACER_CAP_THREEG (1<<4)
874     -#define ACER_CAP_ACCEL (1<<5)
875     -#define ACER_CAP_RFBTN (1<<6)
876     -#define ACER_CAP_ANY (0xFFFFFFFF)
877     +#define ACER_CAP_MAILLED BIT(0)
878     +#define ACER_CAP_WIRELESS BIT(1)
879     +#define ACER_CAP_BLUETOOTH BIT(2)
880     +#define ACER_CAP_BRIGHTNESS BIT(3)
881     +#define ACER_CAP_THREEG BIT(4)
882     +#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
883     +#define ACER_CAP_KBD_DOCK BIT(6)
884    
885     /*
886     * Interface type flags
887     @@ -236,6 +238,7 @@ static int mailled = -1;
888     static int brightness = -1;
889     static int threeg = -1;
890     static int force_series;
891     +static int force_caps = -1;
892     static bool ec_raw_mode;
893     static bool has_type_aa;
894     static u16 commun_func_bitmap;
895     @@ -245,11 +248,13 @@ module_param(mailled, int, 0444);
896     module_param(brightness, int, 0444);
897     module_param(threeg, int, 0444);
898     module_param(force_series, int, 0444);
899     +module_param(force_caps, int, 0444);
900     module_param(ec_raw_mode, bool, 0444);
901     MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
902     MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
903     MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
904     MODULE_PARM_DESC(force_series, "Force a different laptop series");
905     +MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
906     MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
907    
908     struct acer_data {
909     @@ -319,6 +324,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
910     return 1;
911     }
912    
913     +static int __init set_force_caps(const struct dmi_system_id *dmi)
914     +{
915     + if (force_caps == -1) {
916     + force_caps = (uintptr_t)dmi->driver_data;
917     + pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps);
918     + }
919     + return 1;
920     +}
921     +
922     static struct quirk_entry quirk_unknown = {
923     };
924    
925     @@ -497,6 +511,33 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
926     },
927     .driver_data = &quirk_acer_travelmate_2490,
928     },
929     + {
930     + .callback = set_force_caps,
931     + .ident = "Acer Aspire Switch 10E SW3-016",
932     + .matches = {
933     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
934     + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"),
935     + },
936     + .driver_data = (void *)ACER_CAP_KBD_DOCK,
937     + },
938     + {
939     + .callback = set_force_caps,
940     + .ident = "Acer Aspire Switch 10 SW5-012",
941     + .matches = {
942     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
943     + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
944     + },
945     + .driver_data = (void *)ACER_CAP_KBD_DOCK,
946     + },
947     + {
948     + .callback = set_force_caps,
949     + .ident = "Acer One 10 (S1003)",
950     + .matches = {
951     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
952     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
953     + },
954     + .driver_data = (void *)ACER_CAP_KBD_DOCK,
955     + },
956     {}
957     };
958    
959     @@ -1253,10 +1294,8 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d)
960     interface->capability |= ACER_CAP_THREEG;
961     if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
962     interface->capability |= ACER_CAP_BLUETOOTH;
963     - if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) {
964     - interface->capability |= ACER_CAP_RFBTN;
965     + if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN)
966     commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
967     - }
968    
969     commun_fn_key_number = type_aa->commun_fn_key_number;
970     }
971     @@ -1520,7 +1559,7 @@ static int acer_gsensor_event(void)
972     struct acpi_buffer output;
973     union acpi_object out_obj[5];
974    
975     - if (!has_cap(ACER_CAP_ACCEL))
976     + if (!acer_wmi_accel_dev)
977     return -1;
978    
979     output.length = sizeof(out_obj);
980     @@ -1543,6 +1582,71 @@ static int acer_gsensor_event(void)
981     return 0;
982     }
983    
984     +/*
985     + * Switch series keyboard dock status
986     + */
987     +static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state)
988     +{
989     + switch (kbd_dock_state) {
990     + case 0x01: /* Docked, traditional clamshell laptop mode */
991     + return 0;
992     + case 0x04: /* Stand-alone tablet */
993     + case 0x40: /* Docked, tent mode, keyboard not usable */
994     + return 1;
995     + default:
996     + pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state);
997     + }
998     +
999     + return 0;
1000     +}
1001     +
1002     +static void acer_kbd_dock_get_initial_state(void)
1003     +{
1004     + u8 *output, input[8] = { 0x05, 0x00, };
1005     + struct acpi_buffer input_buf = { sizeof(input), input };
1006     + struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL };
1007     + union acpi_object *obj;
1008     + acpi_status status;
1009     + int sw_tablet_mode;
1010     +
1011     + status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf);
1012     + if (ACPI_FAILURE(status)) {
1013     + ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status"));
1014     + return;
1015     + }
1016     +
1017     + obj = output_buf.pointer;
1018     + if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
1019     + pr_err("Unexpected output format getting keyboard-dock initial status\n");
1020     + goto out_free_obj;
1021     + }
1022     +
1023     + output = obj->buffer.pointer;
1024     + if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) {
1025     + pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n",
1026     + output[0], output[3]);
1027     + goto out_free_obj;
1028     + }
1029     +
1030     + sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]);
1031     + input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
1032     +
1033     +out_free_obj:
1034     + kfree(obj);
1035     +}
1036     +
1037     +static void acer_kbd_dock_event(const struct event_return_value *event)
1038     +{
1039     + int sw_tablet_mode;
1040     +
1041     + if (!has_cap(ACER_CAP_KBD_DOCK))
1042     + return;
1043     +
1044     + sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state);
1045     + input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode);
1046     + input_sync(acer_wmi_input_dev);
1047     +}
1048     +
1049     /*
1050     * Rfkill devices
1051     */
1052     @@ -1770,8 +1874,9 @@ static void acer_wmi_notify(u32 value, void *context)
1053     sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
1054     }
1055     break;
1056     - case WMID_ACCEL_EVENT:
1057     + case WMID_ACCEL_OR_KBD_DOCK_EVENT:
1058     acer_gsensor_event();
1059     + acer_kbd_dock_event(&return_value);
1060     break;
1061     default:
1062     pr_warn("Unknown function number - %d - %d\n",
1063     @@ -1894,8 +1999,6 @@ static int __init acer_wmi_accel_setup(void)
1064     gsensor_handle = acpi_device_handle(adev);
1065     acpi_dev_put(adev);
1066    
1067     - interface->capability |= ACER_CAP_ACCEL;
1068     -
1069     acer_wmi_accel_dev = input_allocate_device();
1070     if (!acer_wmi_accel_dev)
1071     return -ENOMEM;
1072     @@ -1921,11 +2024,6 @@ err_free_dev:
1073     return err;
1074     }
1075    
1076     -static void acer_wmi_accel_destroy(void)
1077     -{
1078     - input_unregister_device(acer_wmi_accel_dev);
1079     -}
1080     -
1081     static int __init acer_wmi_input_setup(void)
1082     {
1083     acpi_status status;
1084     @@ -1943,6 +2041,9 @@ static int __init acer_wmi_input_setup(void)
1085     if (err)
1086     goto err_free_dev;
1087    
1088     + if (has_cap(ACER_CAP_KBD_DOCK))
1089     + input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE);
1090     +
1091     status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
1092     acer_wmi_notify, NULL);
1093     if (ACPI_FAILURE(status)) {
1094     @@ -1950,6 +2051,9 @@ static int __init acer_wmi_input_setup(void)
1095     goto err_free_dev;
1096     }
1097    
1098     + if (has_cap(ACER_CAP_KBD_DOCK))
1099     + acer_kbd_dock_get_initial_state();
1100     +
1101     err = input_register_device(acer_wmi_input_dev);
1102     if (err)
1103     goto err_uninstall_notifier;
1104     @@ -2080,7 +2184,7 @@ static int acer_resume(struct device *dev)
1105     if (has_cap(ACER_CAP_BRIGHTNESS))
1106     set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
1107    
1108     - if (has_cap(ACER_CAP_ACCEL))
1109     + if (acer_wmi_accel_dev)
1110     acer_gsensor_init();
1111    
1112     return 0;
1113     @@ -2181,7 +2285,7 @@ static int __init acer_wmi_init(void)
1114     }
1115     /* WMID always provides brightness methods */
1116     interface->capability |= ACER_CAP_BRIGHTNESS;
1117     - } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) {
1118     + } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) {
1119     pr_err("No WMID device detection method found\n");
1120     return -ENODEV;
1121     }
1122     @@ -2211,7 +2315,14 @@ static int __init acer_wmi_init(void)
1123     if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
1124     interface->capability &= ~ACER_CAP_BRIGHTNESS;
1125    
1126     - if (wmi_has_guid(WMID_GUID3)) {
1127     + if (wmi_has_guid(WMID_GUID3))
1128     + interface->capability |= ACER_CAP_SET_FUNCTION_MODE;
1129     +
1130     + if (force_caps != -1)
1131     + interface->capability = force_caps;
1132     +
1133     + if (wmi_has_guid(WMID_GUID3) &&
1134     + (interface->capability & ACER_CAP_SET_FUNCTION_MODE)) {
1135     if (ACPI_FAILURE(acer_wmi_enable_rf_button()))
1136     pr_warn("Cannot enable RF Button Driver\n");
1137    
1138     @@ -2270,8 +2381,8 @@ error_device_alloc:
1139     error_platform_register:
1140     if (wmi_has_guid(ACERWMID_EVENT_GUID))
1141     acer_wmi_input_destroy();
1142     - if (has_cap(ACER_CAP_ACCEL))
1143     - acer_wmi_accel_destroy();
1144     + if (acer_wmi_accel_dev)
1145     + input_unregister_device(acer_wmi_accel_dev);
1146    
1147     return err;
1148     }
1149     @@ -2281,8 +2392,8 @@ static void __exit acer_wmi_exit(void)
1150     if (wmi_has_guid(ACERWMID_EVENT_GUID))
1151     acer_wmi_input_destroy();
1152    
1153     - if (has_cap(ACER_CAP_ACCEL))
1154     - acer_wmi_accel_destroy();
1155     + if (acer_wmi_accel_dev)
1156     + input_unregister_device(acer_wmi_accel_dev);
1157    
1158     remove_debugfs();
1159     platform_device_unregister(acer_platform_device);
1160     diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
1161     index eec7928ff8fe0..99580c22f91a4 100644
1162     --- a/include/linux/eeprom_93xx46.h
1163     +++ b/include/linux/eeprom_93xx46.h
1164     @@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
1165     #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
1166     /* Instructions such as EWEN are (addrlen + 2) in length. */
1167     #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
1168     +/* Add extra cycle after address during a read */
1169     +#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
1170    
1171     /*
1172     * optional hooks to control additional logic
1173     diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
1174     index 29e2bd5cc5af7..7dce11ab28062 100644
1175     --- a/net/dsa/Kconfig
1176     +++ b/net/dsa/Kconfig
1177     @@ -9,6 +9,7 @@ menuconfig NET_DSA
1178     tristate "Distributed Switch Architecture"
1179     depends on HAVE_NET_DSA
1180     depends on BRIDGE || BRIDGE=n
1181     + select GRO_CELLS
1182     select NET_SWITCHDEV
1183     select PHYLINK
1184     select NET_DEVLINK
1185     diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
1186     index 43120a3fb06f3..ca80f86995e68 100644
1187     --- a/net/dsa/dsa.c
1188     +++ b/net/dsa/dsa.c
1189     @@ -238,7 +238,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
1190     if (dsa_skb_defer_rx_timestamp(p, skb))
1191     return 0;
1192    
1193     - netif_receive_skb(skb);
1194     + gro_cells_receive(&p->gcells, skb);
1195    
1196     return 0;
1197     }
1198     diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
1199     index bf9947c577b6d..d8e850724d13c 100644
1200     --- a/net/dsa/dsa_priv.h
1201     +++ b/net/dsa/dsa_priv.h
1202     @@ -11,6 +11,7 @@
1203     #include <linux/netdevice.h>
1204     #include <linux/netpoll.h>
1205     #include <net/dsa.h>
1206     +#include <net/gro_cells.h>
1207    
1208     enum {
1209     DSA_NOTIFIER_AGEING_TIME,
1210     @@ -68,6 +69,8 @@ struct dsa_slave_priv {
1211    
1212     struct pcpu_sw_netstats *stats64;
1213    
1214     + struct gro_cells gcells;
1215     +
1216     /* DSA port data, such as switch, port index, etc. */
1217     struct dsa_port *dp;
1218    
1219     diff --git a/net/dsa/slave.c b/net/dsa/slave.c
1220     index f734ce0bcb56e..06f8874d53eea 100644
1221     --- a/net/dsa/slave.c
1222     +++ b/net/dsa/slave.c
1223     @@ -1431,6 +1431,11 @@ int dsa_slave_create(struct dsa_port *port)
1224     free_netdev(slave_dev);
1225     return -ENOMEM;
1226     }
1227     +
1228     + ret = gro_cells_init(&p->gcells, slave_dev);
1229     + if (ret)
1230     + goto out_free;
1231     +
1232     p->dp = port;
1233     INIT_LIST_HEAD(&p->mall_tc_list);
1234     INIT_WORK(&port->xmit_work, dsa_port_xmit_work);
1235     @@ -1443,7 +1448,7 @@ int dsa_slave_create(struct dsa_port *port)
1236     ret = dsa_slave_phy_setup(slave_dev);
1237     if (ret) {
1238     netdev_err(master, "error %d setting up slave phy\n", ret);
1239     - goto out_free;
1240     + goto out_gcells;
1241     }
1242    
1243     dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1244     @@ -1462,6 +1467,8 @@ out_phy:
1245     phylink_disconnect_phy(p->dp->pl);
1246     rtnl_unlock();
1247     phylink_destroy(p->dp->pl);
1248     +out_gcells:
1249     + gro_cells_destroy(&p->gcells);
1250     out_free:
1251     free_percpu(p->stats64);
1252     free_netdev(slave_dev);
1253     @@ -1482,6 +1489,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
1254     dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1255     unregister_netdev(slave_dev);
1256     phylink_destroy(dp->pl);
1257     + gro_cells_destroy(&p->gcells);
1258     free_percpu(p->stats64);
1259     free_netdev(slave_dev);
1260     }
1261     diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
1262     index 9ee610504bac9..cfd3077174731 100644
1263     --- a/sound/soc/intel/boards/bytcr_rt5640.c
1264     +++ b/sound/soc/intel/boards/bytcr_rt5640.c
1265     @@ -435,6 +435,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
1266     BYT_RT5640_SSP0_AIF1 |
1267     BYT_RT5640_MCLK_EN),
1268     },
1269     + {
1270     + .matches = {
1271     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
1272     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"),
1273     + },
1274     + .driver_data = (void *)(BYT_RT5640_IN1_MAP |
1275     + BYT_RT5640_JD_SRC_JD2_IN4N |
1276     + BYT_RT5640_OVCD_TH_2000UA |
1277     + BYT_RT5640_OVCD_SF_0P75 |
1278     + BYT_RT5640_SSP0_AIF1 |
1279     + BYT_RT5640_MCLK_EN),
1280     + },
1281     {
1282     .matches = {
1283     DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),