Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.17/0104-4.17.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3151 - (hide annotations) (download)
Mon Jul 9 08:30:54 2018 UTC (5 years, 10 months ago) by niro
File size: 62369 byte(s)
-linux-4.17.5
1 niro 3151 diff --git a/Makefile b/Makefile
2     index 1d740dbe676d..e4ddbad49636 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 17
9     -SUBLEVEL = 4
10     +SUBLEVEL = 5
11     EXTRAVERSION =
12     NAME = Merciless Moray
13    
14     diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
15     index ae7b3f107893..5185300cc11f 100644
16     --- a/arch/arm/boot/dts/imx6q.dtsi
17     +++ b/arch/arm/boot/dts/imx6q.dtsi
18     @@ -96,7 +96,7 @@
19     clocks = <&clks IMX6Q_CLK_ECSPI5>,
20     <&clks IMX6Q_CLK_ECSPI5>;
21     clock-names = "ipg", "per";
22     - dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
23     + dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
24     dma-names = "rx", "tx";
25     status = "disabled";
26     };
27     diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
28     index 0cfd701809de..a1b31013ab6e 100644
29     --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
30     +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
31     @@ -189,3 +189,10 @@
32     &usb0 {
33     status = "okay";
34     };
35     +
36     +&usb2_phy0 {
37     + /*
38     + * HDMI_5V is also used as supply for the USB VBUS.
39     + */
40     + phy-supply = <&hdmi_5v>;
41     +};
42     diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
43     index 263c142a6a6c..f65e9e1cea4c 100644
44     --- a/arch/x86/include/asm/pgalloc.h
45     +++ b/arch/x86/include/asm/pgalloc.h
46     @@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
47    
48     static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
49     {
50     + if (!pgtable_l5_enabled)
51     + return;
52     +
53     BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
54     free_page((unsigned long)p4d);
55     }
56     diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
57     index 7ca41bf023c9..8df9abfa947b 100644
58     --- a/drivers/acpi/osl.c
59     +++ b/drivers/acpi/osl.c
60     @@ -45,6 +45,8 @@
61     #include <linux/uaccess.h>
62     #include <linux/io-64-nonatomic-lo-hi.h>
63    
64     +#include "acpica/accommon.h"
65     +#include "acpica/acnamesp.h"
66     #include "internal.h"
67    
68     #define _COMPONENT ACPI_OS_SERVICES
69     @@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
70     }
71     EXPORT_SYMBOL(acpi_check_region);
72    
73     +static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
74     + void *_res, void **return_value)
75     +{
76     + struct acpi_mem_space_context **mem_ctx;
77     + union acpi_operand_object *handler_obj;
78     + union acpi_operand_object *region_obj2;
79     + union acpi_operand_object *region_obj;
80     + struct resource *res = _res;
81     + acpi_status status;
82     +
83     + region_obj = acpi_ns_get_attached_object(handle);
84     + if (!region_obj)
85     + return AE_OK;
86     +
87     + handler_obj = region_obj->region.handler;
88     + if (!handler_obj)
89     + return AE_OK;
90     +
91     + if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
92     + return AE_OK;
93     +
94     + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
95     + return AE_OK;
96     +
97     + region_obj2 = acpi_ns_get_secondary_object(region_obj);
98     + if (!region_obj2)
99     + return AE_OK;
100     +
101     + mem_ctx = (void *)&region_obj2->extra.region_context;
102     +
103     + if (!(mem_ctx[0]->address >= res->start &&
104     + mem_ctx[0]->address < res->end))
105     + return AE_OK;
106     +
107     + status = handler_obj->address_space.setup(region_obj,
108     + ACPI_REGION_DEACTIVATE,
109     + NULL, (void **)mem_ctx);
110     + if (ACPI_SUCCESS(status))
111     + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
112     +
113     + return status;
114     +}
115     +
116     +/**
117     + * acpi_release_memory - Release any mappings done to a memory region
118     + * @handle: Handle to namespace node
119     + * @res: Memory resource
120     + * @level: A level that terminates the search
121     + *
122     + * Walks through @handle and unmaps all SystemMemory Operation Regions that
123     + * overlap with @res and that have already been activated (mapped).
124     + *
125     + * This is a helper that allows drivers to place special requirements on memory
126     + * region that may overlap with operation regions, primarily allowing them to
127     + * safely map the region as non-cached memory.
128     + *
129     + * The unmapped Operation Regions will be automatically remapped next time they
130     + * are called, so the drivers do not need to do anything else.
131     + */
132     +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
133     + u32 level)
134     +{
135     + if (!(res->flags & IORESOURCE_MEM))
136     + return AE_TYPE;
137     +
138     + return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
139     + acpi_deactivate_mem_region, NULL, res, NULL);
140     +}
141     +EXPORT_SYMBOL_GPL(acpi_release_memory);
142     +
143     /*
144     * Let drivers know whether the resource checks are effective
145     */
146     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
147     index 34af664b9f93..6fcc537d7779 100644
148     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
149     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
150     @@ -2080,10 +2080,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
151     switch (asic_type) {
152     #if defined(CONFIG_DRM_AMD_DC)
153     case CHIP_BONAIRE:
154     - case CHIP_HAWAII:
155     case CHIP_KAVERI:
156     case CHIP_KABINI:
157     case CHIP_MULLINS:
158     + /*
159     + * We have systems in the wild with these ASICs that require
160     + * LVDS and VGA support which is not supported with DC.
161     + *
162     + * Fallback to the non-DC driver here by default so as not to
163     + * cause regressions.
164     + */
165     + return amdgpu_dc > 0;
166     + case CHIP_HAWAII:
167     case CHIP_CARRIZO:
168     case CHIP_STONEY:
169     case CHIP_POLARIS11:
170     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
171     index 6d08cde8443c..b52f26e7db98 100644
172     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
173     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
174     @@ -749,8 +749,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
175     domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
176     if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
177     adev->vram_pin_size += amdgpu_bo_size(bo);
178     - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
179     - adev->invisible_pin_size += amdgpu_bo_size(bo);
180     + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
181     } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
182     adev->gart_pin_size += amdgpu_bo_size(bo);
183     }
184     @@ -777,25 +776,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
185     bo->pin_count--;
186     if (bo->pin_count)
187     return 0;
188     - for (i = 0; i < bo->placement.num_placement; i++) {
189     - bo->placements[i].lpfn = 0;
190     - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
191     - }
192     - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
193     - if (unlikely(r)) {
194     - dev_err(adev->dev, "%p validate failed for unpin\n", bo);
195     - goto error;
196     - }
197    
198     if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
199     adev->vram_pin_size -= amdgpu_bo_size(bo);
200     - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
201     - adev->invisible_pin_size -= amdgpu_bo_size(bo);
202     + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
203     } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
204     adev->gart_pin_size -= amdgpu_bo_size(bo);
205     }
206    
207     -error:
208     + for (i = 0; i < bo->placement.num_placement; i++) {
209     + bo->placements[i].lpfn = 0;
210     + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
211     + }
212     + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
213     + if (unlikely(r))
214     + dev_err(adev->dev, "%p validate failed for unpin\n", bo);
215     +
216     return r;
217     }
218    
219     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
220     index 6ea7de863041..379e9ff173f1 100644
221     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
222     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
223     @@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
224     uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
225     int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
226    
227     +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
228     uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
229     uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
230    
231     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
232     index 58e495330b38..87e89cc12397 100644
233     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
234     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
235     @@ -84,6 +84,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
236     }
237    
238     hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
239     + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
240     family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
241     version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
242     version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
243     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
244     index da55a78d7380..11aa36aa304b 100644
245     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
246     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
247     @@ -1442,7 +1442,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
248     uint64_t count;
249    
250     max_entries = min(max_entries, 16ull * 1024ull);
251     - for (count = 1; count < max_entries; ++count) {
252     + for (count = 1;
253     + count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
254     + ++count) {
255     uint64_t idx = pfn + count;
256    
257     if (pages_addr[idx] !=
258     @@ -1455,7 +1457,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
259     dma_addr = pages_addr;
260     } else {
261     addr = pages_addr[pfn];
262     - max_entries = count;
263     + max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
264     }
265    
266     } else if (flags & AMDGPU_PTE_VALID) {
267     @@ -1470,7 +1472,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
268     if (r)
269     return r;
270    
271     - pfn += last - start + 1;
272     + pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
273     if (nodes && nodes->size == pfn) {
274     pfn = 0;
275     ++nodes;
276     @@ -2112,7 +2114,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
277     before->last = saddr - 1;
278     before->offset = tmp->offset;
279     before->flags = tmp->flags;
280     - list_add(&before->list, &tmp->list);
281     + before->bo_va = tmp->bo_va;
282     + list_add(&before->list, &tmp->bo_va->invalids);
283     }
284    
285     /* Remember mapping split at the end */
286     @@ -2122,7 +2125,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
287     after->offset = tmp->offset;
288     after->offset += after->start - tmp->start;
289     after->flags = tmp->flags;
290     - list_add(&after->list, &tmp->list);
291     + after->bo_va = tmp->bo_va;
292     + list_add(&after->list, &tmp->bo_va->invalids);
293     }
294    
295     list_del(&tmp->list);
296     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
297     index 9aca653bec07..b6333f92ba45 100644
298     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
299     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
300     @@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
301     adev->gmc.visible_vram_size : end) - start;
302     }
303    
304     +/**
305     + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
306     + *
307     + * @bo: &amdgpu_bo buffer object (must be in VRAM)
308     + *
309     + * Returns:
310     + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
311     + */
312     +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
313     +{
314     + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
315     + struct ttm_mem_reg *mem = &bo->tbo.mem;
316     + struct drm_mm_node *nodes = mem->mm_node;
317     + unsigned pages = mem->num_pages;
318     + u64 usage = 0;
319     +
320     + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
321     + return 0;
322     +
323     + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
324     + return amdgpu_bo_size(bo);
325     +
326     + while (nodes && pages) {
327     + usage += nodes->size << PAGE_SHIFT;
328     + usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
329     + pages -= nodes->size;
330     + ++nodes;
331     + }
332     +
333     + return usage;
334     +}
335     +
336     /**
337     * amdgpu_vram_mgr_new - allocate new ranges
338     *
339     @@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
340     num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
341     }
342    
343     - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
344     + nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
345     + GFP_KERNEL | __GFP_ZERO);
346     if (!nodes)
347     return -ENOMEM;
348    
349     @@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
350     drm_mm_remove_node(&nodes[i]);
351     spin_unlock(&mgr->lock);
352    
353     - kfree(nodes);
354     + kvfree(nodes);
355     return r == -ENOSPC ? 0 : r;
356     }
357    
358     @@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
359     atomic64_sub(usage, &mgr->usage);
360     atomic64_sub(vis_usage, &mgr->vis_usage);
361    
362     - kfree(mem->mm_node);
363     + kvfree(mem->mm_node);
364     mem->mm_node = NULL;
365     }
366    
367     diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
368     index 428d1928e44e..ac9617269a2f 100644
369     --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
370     +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
371     @@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle)
372     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373    
374     vce_v3_0_override_vce_clock_gating(adev, true);
375     - if (!(adev->flags & AMD_IS_APU))
376     - amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
377     +
378     + amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
379    
380     for (i = 0; i < adev->vce.num_rings; i++)
381     adev->vce.ring[i].ready = false;
382     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
383     index 126f1276d347..9ae350dad235 100644
384     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
385     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
386     @@ -728,33 +728,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
387     return r;
388    
389     tmp = RREG32_SMC(cntl_reg);
390     - tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
391     - CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
392     +
393     + if (adev->flags & AMD_IS_APU)
394     + tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
395     + else
396     + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
397     + CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
398     tmp |= dividers.post_divider;
399     WREG32_SMC(cntl_reg, tmp);
400    
401     for (i = 0; i < 100; i++) {
402     - if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
403     - break;
404     + tmp = RREG32_SMC(status_reg);
405     + if (adev->flags & AMD_IS_APU) {
406     + if (tmp & 0x10000)
407     + break;
408     + } else {
409     + if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
410     + break;
411     + }
412     mdelay(10);
413     }
414     if (i == 100)
415     return -ETIMEDOUT;
416     -
417     return 0;
418     }
419    
420     +#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
421     +#define ixGNB_CLK1_STATUS 0xD822010C
422     +#define ixGNB_CLK2_DFS_CNTL 0xD8220110
423     +#define ixGNB_CLK2_STATUS 0xD822012C
424     +#define ixGNB_CLK3_DFS_CNTL 0xD8220130
425     +#define ixGNB_CLK3_STATUS 0xD822014C
426     +
427     static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
428     {
429     int r;
430    
431     - r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
432     - if (r)
433     - return r;
434     + if (adev->flags & AMD_IS_APU) {
435     + r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
436     + if (r)
437     + return r;
438    
439     - r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
440     - if (r)
441     - return r;
442     + r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
443     + if (r)
444     + return r;
445     + } else {
446     + r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
447     + if (r)
448     + return r;
449     +
450     + r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
451     + if (r)
452     + return r;
453     + }
454    
455     return 0;
456     }
457     @@ -764,6 +790,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
458     int r, i;
459     struct atom_clock_dividers dividers;
460     u32 tmp;
461     + u32 reg_ctrl;
462     + u32 reg_status;
463     + u32 status_mask;
464     + u32 reg_mask;
465     +
466     + if (adev->flags & AMD_IS_APU) {
467     + reg_ctrl = ixGNB_CLK3_DFS_CNTL;
468     + reg_status = ixGNB_CLK3_STATUS;
469     + status_mask = 0x00010000;
470     + reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
471     + } else {
472     + reg_ctrl = ixCG_ECLK_CNTL;
473     + reg_status = ixCG_ECLK_STATUS;
474     + status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
475     + reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
476     + }
477    
478     r = amdgpu_atombios_get_clock_dividers(adev,
479     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
480     @@ -772,24 +814,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
481     return r;
482    
483     for (i = 0; i < 100; i++) {
484     - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
485     + if (RREG32_SMC(reg_status) & status_mask)
486     break;
487     mdelay(10);
488     }
489     +
490     if (i == 100)
491     return -ETIMEDOUT;
492    
493     - tmp = RREG32_SMC(ixCG_ECLK_CNTL);
494     - tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
495     - CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
496     + tmp = RREG32_SMC(reg_ctrl);
497     + tmp &= ~reg_mask;
498     tmp |= dividers.post_divider;
499     - WREG32_SMC(ixCG_ECLK_CNTL, tmp);
500     + WREG32_SMC(reg_ctrl, tmp);
501    
502     for (i = 0; i < 100; i++) {
503     - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
504     + if (RREG32_SMC(reg_status) & status_mask)
505     break;
506     mdelay(10);
507     }
508     +
509     if (i == 100)
510     return -ETIMEDOUT;
511    
512     diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
513     index 27579443cdc5..79afffa00772 100644
514     --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
515     +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
516     @@ -46,6 +46,7 @@
517     #include <linux/moduleparam.h>
518     #include <linux/version.h>
519     #include <linux/types.h>
520     +#include <linux/pm_runtime.h>
521    
522     #include <drm/drmP.h>
523     #include <drm/drm_atomic.h>
524     @@ -927,6 +928,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
525     drm_mode_connector_update_edid_property(connector, NULL);
526     aconnector->num_modes = 0;
527     aconnector->dc_sink = NULL;
528     + aconnector->edid = NULL;
529     }
530    
531     mutex_unlock(&dev->mode_config.mutex);
532     @@ -3965,10 +3967,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
533     if (acrtc->base.state->event)
534     prepare_flip_isr(acrtc);
535    
536     + spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
537     +
538     surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
539     surface_updates->flip_addr = &addr;
540    
541     -
542     dc_commit_updates_for_stream(adev->dm.dc,
543     surface_updates,
544     1,
545     @@ -3981,9 +3984,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
546     __func__,
547     addr.address.grph.addr.high_part,
548     addr.address.grph.addr.low_part);
549     -
550     -
551     - spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
552     }
553    
554     static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
555     @@ -4149,6 +4149,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
556     struct drm_connector *connector;
557     struct drm_connector_state *old_con_state, *new_con_state;
558     struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
559     + int crtc_disable_count = 0;
560    
561     drm_atomic_helper_update_legacy_modeset_state(dev, state);
562    
563     @@ -4211,6 +4212,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
564     if (dm_old_crtc_state->stream)
565     remove_stream(adev, acrtc, dm_old_crtc_state->stream);
566    
567     + pm_runtime_get_noresume(dev->dev);
568     +
569     acrtc->enabled = true;
570     acrtc->hw_mode = new_crtc_state->mode;
571     crtc->hwmode = new_crtc_state->mode;
572     @@ -4348,6 +4351,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
573     struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
574     bool modeset_needed;
575    
576     + if (old_crtc_state->active && !new_crtc_state->active)
577     + crtc_disable_count++;
578     +
579     dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
580     dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
581     modeset_needed = modeset_required(
582     @@ -4396,6 +4402,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
583     drm_atomic_helper_wait_for_flip_done(dev, state);
584    
585     drm_atomic_helper_cleanup_planes(dev, state);
586     +
587     + /* Finally, drop a runtime PM reference for each newly disabled CRTC,
588     + * so we can put the GPU into runtime suspend if we're not driving any
589     + * displays anymore
590     + */
591     + for (i = 0; i < crtc_disable_count; i++)
592     + pm_runtime_put_autosuspend(dev->dev);
593     + pm_runtime_mark_last_busy(dev->dev);
594     }
595    
596    
597     diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
598     index e18800ed7cd1..7b8191eae68a 100644
599     --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
600     +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
601     @@ -875,7 +875,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
602     drm_object_attach_property(&plane->base.base,
603     props->alpha, 255);
604    
605     - if (desc->layout.xstride && desc->layout.pstride) {
606     + if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
607     int ret;
608    
609     ret = drm_plane_create_rotation_property(&plane->base,
610     diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
611     index 633c18785c1e..b25cc5aa8fbe 100644
612     --- a/drivers/gpu/drm/i915/i915_irq.c
613     +++ b/drivers/gpu/drm/i915/i915_irq.c
614     @@ -1862,9 +1862,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
615    
616     /*
617     * Clear the PIPE*STAT regs before the IIR
618     + *
619     + * Toggle the enable bits to make sure we get an
620     + * edge in the ISR pipe event bit if we don't clear
621     + * all the enabled status bits. Otherwise the edge
622     + * triggered IIR on i965/g4x wouldn't notice that
623     + * an interrupt is still pending.
624     */
625     - if (pipe_stats[pipe])
626     - I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
627     + if (pipe_stats[pipe]) {
628     + I915_WRITE(reg, pipe_stats[pipe]);
629     + I915_WRITE(reg, enable_mask);
630     + }
631     }
632     spin_unlock(&dev_priv->irq_lock);
633     }
634     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
635     index 8a69a9275e28..29dc0a57e466 100644
636     --- a/drivers/gpu/drm/i915/i915_reg.h
637     +++ b/drivers/gpu/drm/i915/i915_reg.h
638     @@ -2565,12 +2565,17 @@ enum i915_power_well_id {
639     #define _3D_CHICKEN _MMIO(0x2084)
640     #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
641     #define _3D_CHICKEN2 _MMIO(0x208c)
642     +
643     +#define FF_SLICE_CHICKEN _MMIO(0x2088)
644     +#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
645     +
646     /* Disables pipelining of read flushes past the SF-WIZ interface.
647     * Required on all Ironlake steppings according to the B-Spec, but the
648     * particular danger of not doing so is not specified.
649     */
650     # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
651     #define _3D_CHICKEN3 _MMIO(0x2090)
652     +#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
653     #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
654     #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
655     #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
656     diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
657     index c0a8805b277f..d26827c44fb0 100644
658     --- a/drivers/gpu/drm/i915/intel_crt.c
659     +++ b/drivers/gpu/drm/i915/intel_crt.c
660     @@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
661     int max_dotclk = dev_priv->max_dotclk_freq;
662     int max_clock;
663    
664     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
665     + return MODE_NO_DBLESCAN;
666     +
667     if (mode->clock < 25000)
668     return MODE_CLOCK_LOW;
669    
670     @@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
671     struct intel_crtc_state *pipe_config,
672     struct drm_connector_state *conn_state)
673     {
674     + struct drm_display_mode *adjusted_mode =
675     + &pipe_config->base.adjusted_mode;
676     +
677     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
678     + return false;
679     +
680     return true;
681     }
682    
683     @@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
684     struct intel_crtc_state *pipe_config,
685     struct drm_connector_state *conn_state)
686     {
687     + struct drm_display_mode *adjusted_mode =
688     + &pipe_config->base.adjusted_mode;
689     +
690     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
691     + return false;
692     +
693     pipe_config->has_pch_encoder = true;
694    
695     return true;
696     @@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
697     struct drm_connector_state *conn_state)
698     {
699     struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
700     + struct drm_display_mode *adjusted_mode =
701     + &pipe_config->base.adjusted_mode;
702     +
703     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
704     + return false;
705    
706     pipe_config->has_pch_encoder = true;
707    
708     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
709     index 8c2d778560f0..1d14ebc7480d 100644
710     --- a/drivers/gpu/drm/i915/intel_ddi.c
711     +++ b/drivers/gpu/drm/i915/intel_ddi.c
712     @@ -2205,7 +2205,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
713     intel_prepare_dp_ddi_buffers(encoder, crtc_state);
714    
715     intel_ddi_init_dp_buf_reg(encoder);
716     - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
717     + if (!is_mst)
718     + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
719     intel_dp_start_link_train(intel_dp);
720     if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
721     intel_dp_stop_link_train(intel_dp);
722     @@ -2303,12 +2304,15 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
723     struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
724     struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
725     struct intel_dp *intel_dp = &dig_port->dp;
726     + bool is_mst = intel_crtc_has_type(old_crtc_state,
727     + INTEL_OUTPUT_DP_MST);
728    
729     /*
730     * Power down sink before disabling the port, otherwise we end
731     * up getting interrupts from the sink on detecting link loss.
732     */
733     - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
734     + if (!is_mst)
735     + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
736    
737     intel_disable_ddi_buf(encoder);
738    
739     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
740     index 56004ffbd8bb..84011e08adc3 100644
741     --- a/drivers/gpu/drm/i915/intel_display.c
742     +++ b/drivers/gpu/drm/i915/intel_display.c
743     @@ -14211,12 +14211,22 @@ static enum drm_mode_status
744     intel_mode_valid(struct drm_device *dev,
745     const struct drm_display_mode *mode)
746     {
747     + /*
748     + * Can't reject DBLSCAN here because Xorg ddxen can add piles
749     + * of DBLSCAN modes to the output's mode list when they detect
750     + * the scaling mode property on the connector. And they don't
751     + * ask the kernel to validate those modes in any way until
752     + * modeset time at which point the client gets a protocol error.
753     + * So in order to not upset those clients we silently ignore the
754     + * DBLSCAN flag on such connectors. For other connectors we will
755     + * reject modes with the DBLSCAN flag in encoder->compute_config().
756     + * And we always reject DBLSCAN modes in connector->mode_valid()
757     + * as we never want such modes on the connector's mode list.
758     + */
759     +
760     if (mode->vscan > 1)
761     return MODE_NO_VSCAN;
762    
763     - if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
764     - return MODE_NO_DBLESCAN;
765     -
766     if (mode->flags & DRM_MODE_FLAG_HSKEW)
767     return MODE_H_ILLEGAL;
768    
769     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
770     index b7b4cfdeb974..cd6e87756509 100644
771     --- a/drivers/gpu/drm/i915/intel_dp.c
772     +++ b/drivers/gpu/drm/i915/intel_dp.c
773     @@ -423,6 +423,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
774     int max_rate, mode_rate, max_lanes, max_link_clock;
775     int max_dotclk;
776    
777     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
778     + return MODE_NO_DBLESCAN;
779     +
780     max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
781    
782     if (intel_dp_is_edp(intel_dp) && fixed_mode) {
783     @@ -1760,7 +1763,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
784     conn_state->scaling_mode);
785     }
786    
787     - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
788     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
789     + return false;
790     +
791     + if (HAS_GMCH_DISPLAY(dev_priv) &&
792     adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
793     return false;
794    
795     @@ -2759,16 +2765,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
796     static void g4x_disable_dp(struct intel_encoder *encoder,
797     const struct intel_crtc_state *old_crtc_state,
798     const struct drm_connector_state *old_conn_state)
799     -{
800     - intel_disable_dp(encoder, old_crtc_state, old_conn_state);
801     -
802     - /* disable the port before the pipe on g4x */
803     - intel_dp_link_down(encoder, old_crtc_state);
804     -}
805     -
806     -static void ilk_disable_dp(struct intel_encoder *encoder,
807     - const struct intel_crtc_state *old_crtc_state,
808     - const struct drm_connector_state *old_conn_state)
809     {
810     intel_disable_dp(encoder, old_crtc_state, old_conn_state);
811     }
812     @@ -2784,13 +2780,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
813     intel_disable_dp(encoder, old_crtc_state, old_conn_state);
814     }
815    
816     -static void ilk_post_disable_dp(struct intel_encoder *encoder,
817     +static void g4x_post_disable_dp(struct intel_encoder *encoder,
818     const struct intel_crtc_state *old_crtc_state,
819     const struct drm_connector_state *old_conn_state)
820     {
821     struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
822     enum port port = encoder->port;
823    
824     + /*
825     + * Bspec does not list a specific disable sequence for g4x DP.
826     + * Follow the ilk+ sequence (disable pipe before the port) for
827     + * g4x DP as it does not suffer from underruns like the normal
828     + * g4x modeset sequence (disable pipe after the port).
829     + */
830     intel_dp_link_down(encoder, old_crtc_state);
831    
832     /* Only ilk+ has port A */
833     @@ -6327,7 +6329,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
834     drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
835     drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
836    
837     - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
838     + if (!HAS_GMCH_DISPLAY(dev_priv))
839     connector->interlace_allowed = true;
840     connector->doublescan_allowed = 0;
841    
842     @@ -6426,15 +6428,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
843     intel_encoder->enable = vlv_enable_dp;
844     intel_encoder->disable = vlv_disable_dp;
845     intel_encoder->post_disable = vlv_post_disable_dp;
846     - } else if (INTEL_GEN(dev_priv) >= 5) {
847     - intel_encoder->pre_enable = g4x_pre_enable_dp;
848     - intel_encoder->enable = g4x_enable_dp;
849     - intel_encoder->disable = ilk_disable_dp;
850     - intel_encoder->post_disable = ilk_post_disable_dp;
851     } else {
852     intel_encoder->pre_enable = g4x_pre_enable_dp;
853     intel_encoder->enable = g4x_enable_dp;
854     intel_encoder->disable = g4x_disable_dp;
855     + intel_encoder->post_disable = g4x_post_disable_dp;
856     }
857    
858     intel_dig_port->dp.output_reg = output_reg;
859     diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
860     index c3de0918ee13..5890500a3a8b 100644
861     --- a/drivers/gpu/drm/i915/intel_dp_mst.c
862     +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
863     @@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
864     bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
865     DP_DPCD_QUIRK_LIMITED_M_N);
866    
867     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
868     + return false;
869     +
870     pipe_config->has_pch_encoder = false;
871     bpp = 24;
872     if (intel_dp->compliance.test_data.bpc) {
873     @@ -180,9 +183,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
874     intel_dp->active_mst_links--;
875    
876     intel_mst->connector = NULL;
877     - if (intel_dp->active_mst_links == 0)
878     + if (intel_dp->active_mst_links == 0) {
879     + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
880     intel_dig_port->base.post_disable(&intel_dig_port->base,
881     old_crtc_state, NULL);
882     + }
883    
884     DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
885     }
886     @@ -223,7 +228,11 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
887    
888     DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
889    
890     + if (intel_dp->active_mst_links == 0)
891     + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
892     +
893     drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
894     +
895     if (intel_dp->active_mst_links == 0)
896     intel_dig_port->base.pre_enable(&intel_dig_port->base,
897     pipe_config, NULL);
898     @@ -360,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
899     if (!intel_dp)
900     return MODE_ERROR;
901    
902     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
903     + return MODE_NO_DBLESCAN;
904     +
905     max_link_clock = intel_dp_max_link_rate(intel_dp);
906     max_lanes = intel_dp_max_lane_count(intel_dp);
907    
908     diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
909     index 51a1d6868b1e..384b37e2da70 100644
910     --- a/drivers/gpu/drm/i915/intel_dsi.c
911     +++ b/drivers/gpu/drm/i915/intel_dsi.c
912     @@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
913     conn_state->scaling_mode);
914     }
915    
916     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
917     + return false;
918     +
919     /* DSI uses short packets for sync events, so clear mode flags for DSI */
920     adjusted_mode->flags = 0;
921    
922     @@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
923    
924     DRM_DEBUG_KMS("\n");
925    
926     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
927     + return MODE_NO_DBLESCAN;
928     +
929     if (fixed_mode) {
930     if (mode->hdisplay > fixed_mode->hdisplay)
931     return MODE_PANEL;
932     diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
933     index eb0c559b2715..6604806f89d5 100644
934     --- a/drivers/gpu/drm/i915/intel_dvo.c
935     +++ b/drivers/gpu/drm/i915/intel_dvo.c
936     @@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
937     int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
938     int target_clock = mode->clock;
939    
940     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
941     + return MODE_NO_DBLESCAN;
942     +
943     /* XXX: Validate clock range */
944    
945     if (fixed_mode) {
946     @@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
947     if (fixed_mode)
948     intel_fixed_panel_mode(fixed_mode, adjusted_mode);
949    
950     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
951     + return false;
952     +
953     return true;
954     }
955    
956     diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
957     index 1baef4ac7ecb..383f9df4145e 100644
958     --- a/drivers/gpu/drm/i915/intel_hdmi.c
959     +++ b/drivers/gpu/drm/i915/intel_hdmi.c
960     @@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
961     bool force_dvi =
962     READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
963    
964     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
965     + return MODE_NO_DBLESCAN;
966     +
967     clock = mode->clock;
968    
969     if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
970     @@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
971     int desired_bpp;
972     bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
973    
974     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
975     + return false;
976     +
977     pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
978    
979     if (pipe_config->has_hdmi_sink)
980     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
981     index 8704f7f8d072..df5ba1de8aea 100644
982     --- a/drivers/gpu/drm/i915/intel_lrc.c
983     +++ b/drivers/gpu/drm/i915/intel_lrc.c
984     @@ -1386,11 +1386,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
985     /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
986     batch = gen8_emit_flush_coherentl3_wa(engine, batch);
987    
988     + *batch++ = MI_LOAD_REGISTER_IMM(3);
989     +
990     /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
991     - *batch++ = MI_LOAD_REGISTER_IMM(1);
992     *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
993     *batch++ = _MASKED_BIT_DISABLE(
994     GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
995     +
996     + /* BSpec: 11391 */
997     + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
998     + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
999     +
1000     + /* BSpec: 11299 */
1001     + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
1002     + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
1003     +
1004     *batch++ = MI_NOOP;
1005    
1006     /* WaClearSlmSpaceAtContextSwitch:kbl */
1007     diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1008     index e125d16a1aa7..34dd1e5233ac 100644
1009     --- a/drivers/gpu/drm/i915/intel_lvds.c
1010     +++ b/drivers/gpu/drm/i915/intel_lvds.c
1011     @@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
1012     struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
1013     int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
1014    
1015     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1016     + return MODE_NO_DBLESCAN;
1017     if (mode->hdisplay > fixed_mode->hdisplay)
1018     return MODE_PANEL;
1019     if (mode->vdisplay > fixed_mode->vdisplay)
1020     @@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
1021     intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1022     adjusted_mode);
1023    
1024     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1025     + return false;
1026     +
1027     if (HAS_PCH_SPLIT(dev_priv)) {
1028     pipe_config->has_pch_encoder = true;
1029    
1030     diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
1031     index 96e213ec202d..d253e3a06e30 100644
1032     --- a/drivers/gpu/drm/i915/intel_sdvo.c
1033     +++ b/drivers/gpu/drm/i915/intel_sdvo.c
1034     @@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1035     adjusted_mode);
1036     }
1037    
1038     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1039     + return false;
1040     +
1041     /*
1042     * Make the CRTC code factor in the SDVO pixel multiplier. The
1043     * SDVO device will factor out the multiplier during mode_set.
1044     @@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1045     struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1046     int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1047    
1048     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1049     + return MODE_NO_DBLESCAN;
1050     +
1051     if (intel_sdvo->pixel_clock_min > mode->clock)
1052     return MODE_CLOCK_LOW;
1053    
1054     diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
1055     index 885fc3809f7f..b55b5c157e38 100644
1056     --- a/drivers/gpu/drm/i915/intel_tv.c
1057     +++ b/drivers/gpu/drm/i915/intel_tv.c
1058     @@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
1059     const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
1060     int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1061    
1062     + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1063     + return MODE_NO_DBLESCAN;
1064     +
1065     if (mode->clock > max_dotclk)
1066     return MODE_CLOCK_HIGH;
1067    
1068     @@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
1069     struct drm_connector_state *conn_state)
1070     {
1071     const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
1072     + struct drm_display_mode *adjusted_mode =
1073     + &pipe_config->base.adjusted_mode;
1074    
1075     if (!tv_mode)
1076     return false;
1077    
1078     - pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
1079     + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1080     + return false;
1081     +
1082     + adjusted_mode->crtc_clock = tv_mode->clock;
1083     DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
1084     pipe_config->pipe_bpp = 8*3;
1085    
1086     /* TV has it's own notion of sync and other mode flags, so clear them. */
1087     - pipe_config->base.adjusted_mode.flags = 0;
1088     + adjusted_mode->flags = 0;
1089    
1090     /*
1091     * FIXME: We don't check whether the input mode is actually what we want
1092     diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1093     index ecb35ed0eac8..61e51516fec5 100644
1094     --- a/drivers/gpu/drm/qxl/qxl_display.c
1095     +++ b/drivers/gpu/drm/qxl/qxl_display.c
1096     @@ -630,7 +630,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1097     struct qxl_cursor_cmd *cmd;
1098     struct qxl_cursor *cursor;
1099     struct drm_gem_object *obj;
1100     - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
1101     + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
1102     int ret;
1103     void *user_ptr;
1104     int size = 64*64*4;
1105     @@ -684,7 +684,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1106     cursor_bo, 0);
1107     cmd->type = QXL_CURSOR_SET;
1108    
1109     - qxl_bo_unref(&qcrtc->cursor_bo);
1110     + old_cursor_bo = qcrtc->cursor_bo;
1111     qcrtc->cursor_bo = cursor_bo;
1112     cursor_bo = NULL;
1113     } else {
1114     @@ -704,6 +704,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
1115     qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
1116     qxl_release_fence_buffer_objects(release);
1117    
1118     + if (old_cursor_bo)
1119     + qxl_bo_unref(&old_cursor_bo);
1120     +
1121     qxl_bo_unref(&cursor_bo);
1122    
1123     return;
1124     diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
1125     index cca4b3c9aeb5..1963cc1b1cc5 100644
1126     --- a/drivers/gpu/drm/sti/Kconfig
1127     +++ b/drivers/gpu/drm/sti/Kconfig
1128     @@ -1,6 +1,6 @@
1129     config DRM_STI
1130     tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
1131     - depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
1132     + depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
1133     select RESET_CONTROLLER
1134     select DRM_KMS_HELPER
1135     select DRM_GEM_CMA_HELPER
1136     @@ -8,6 +8,5 @@ config DRM_STI
1137     select DRM_PANEL
1138     select FW_LOADER
1139     select SND_SOC_HDMI_CODEC if SND_SOC
1140     - select OF
1141     help
1142     Choose this option to enable DRM on STM stiH4xx chipset
1143     diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1144     index c3d92d537240..8045871335b5 100644
1145     --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
1146     +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1147     @@ -17,7 +17,6 @@
1148     #include <drm/drm_encoder.h>
1149     #include <drm/drm_modes.h>
1150     #include <drm/drm_of.h>
1151     -#include <drm/drm_panel.h>
1152    
1153     #include <uapi/drm/drm_mode.h>
1154    
1155     @@ -350,9 +349,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
1156     static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
1157     const struct drm_display_mode *mode)
1158     {
1159     - struct drm_panel *panel = tcon->panel;
1160     - struct drm_connector *connector = panel->connector;
1161     - struct drm_display_info display_info = connector->display_info;
1162     unsigned int bp, hsync, vsync;
1163     u8 clk_delay;
1164     u32 val = 0;
1165     @@ -410,27 +406,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
1166     if (mode->flags & DRM_MODE_FLAG_PVSYNC)
1167     val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
1168    
1169     - /*
1170     - * On A20 and similar SoCs, the only way to achieve Positive Edge
1171     - * (Rising Edge), is setting dclk clock phase to 2/3(240°).
1172     - * By default TCON works in Negative Edge(Falling Edge),
1173     - * this is why phase is set to 0 in that case.
1174     - * Unfortunately there's no way to logically invert dclk through
1175     - * IO_POL register.
1176     - * The only acceptable way to work, triple checked with scope,
1177     - * is using clock phase set to 0° for Negative Edge and set to 240°
1178     - * for Positive Edge.
1179     - * On A33 and similar SoCs there would be a 90° phase option,
1180     - * but it divides also dclk by 2.
1181     - * Following code is a way to avoid quirks all around TCON
1182     - * and DOTCLOCK drivers.
1183     - */
1184     - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
1185     - clk_set_phase(tcon->dclk, 240);
1186     -
1187     - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
1188     - clk_set_phase(tcon->dclk, 0);
1189     -
1190     regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
1191     SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
1192     val);
1193     diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
1194     index 7a2da7f9d4dc..5485b35fe553 100644
1195     --- a/drivers/iio/accel/mma8452.c
1196     +++ b/drivers/iio/accel/mma8452.c
1197     @@ -1034,7 +1034,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
1198     if (src < 0)
1199     return IRQ_NONE;
1200    
1201     - if (!(src & data->chip_info->enabled_events))
1202     + if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
1203     return IRQ_NONE;
1204    
1205     if (src & MMA8452_INT_DRDY) {
1206     diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
1207     index 772dad65396e..f32c12439eee 100644
1208     --- a/drivers/staging/android/ion/ion_heap.c
1209     +++ b/drivers/staging/android/ion/ion_heap.c
1210     @@ -29,7 +29,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
1211     struct page **tmp = pages;
1212    
1213     if (!pages)
1214     - return NULL;
1215     + return ERR_PTR(-ENOMEM);
1216    
1217     if (buffer->flags & ION_FLAG_CACHED)
1218     pgprot = PAGE_KERNEL;
1219     diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1220     index cbe98bc2b998..431742201709 100644
1221     --- a/drivers/tty/n_tty.c
1222     +++ b/drivers/tty/n_tty.c
1223     @@ -124,6 +124,8 @@ struct n_tty_data {
1224     struct mutex output_lock;
1225     };
1226    
1227     +#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
1228     +
1229     static inline size_t read_cnt(struct n_tty_data *ldata)
1230     {
1231     return ldata->read_head - ldata->read_tail;
1232     @@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
1233    
1234     static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
1235     {
1236     + smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
1237     return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
1238     }
1239    
1240     @@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
1241     static void reset_buffer_flags(struct n_tty_data *ldata)
1242     {
1243     ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
1244     - ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
1245     ldata->commit_head = 0;
1246     - ldata->echo_mark = 0;
1247     ldata->line_start = 0;
1248    
1249     ldata->erasing = 0;
1250     @@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
1251     old_space = space = tty_write_room(tty);
1252    
1253     tail = ldata->echo_tail;
1254     - while (ldata->echo_commit != tail) {
1255     + while (MASK(ldata->echo_commit) != MASK(tail)) {
1256     c = echo_buf(ldata, tail);
1257     if (c == ECHO_OP_START) {
1258     unsigned char op;
1259     int no_space_left = 0;
1260    
1261     + /*
1262     + * Since add_echo_byte() is called without holding
1263     + * output_lock, we might see only portion of multi-byte
1264     + * operation.
1265     + */
1266     + if (MASK(ldata->echo_commit) == MASK(tail + 1))
1267     + goto not_yet_stored;
1268     /*
1269     * If the buffer byte is the start of a multi-byte
1270     * operation, get the next byte, which is either the
1271     @@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1272     unsigned int num_chars, num_bs;
1273    
1274     case ECHO_OP_ERASE_TAB:
1275     + if (MASK(ldata->echo_commit) == MASK(tail + 2))
1276     + goto not_yet_stored;
1277     num_chars = echo_buf(ldata, tail + 2);
1278    
1279     /*
1280     @@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1281     /* If the echo buffer is nearly full (so that the possibility exists
1282     * of echo overrun before the next commit), then discard enough
1283     * data at the tail to prevent a subsequent overrun */
1284     - while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1285     + while (ldata->echo_commit > tail &&
1286     + ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1287     if (echo_buf(ldata, tail) == ECHO_OP_START) {
1288     if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
1289     tail += 3;
1290     @@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
1291     tail++;
1292     }
1293    
1294     + not_yet_stored:
1295     ldata->echo_tail = tail;
1296     return old_space - space;
1297     }
1298     @@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
1299     size_t nr, old, echoed;
1300     size_t head;
1301    
1302     + mutex_lock(&ldata->output_lock);
1303     head = ldata->echo_head;
1304     ldata->echo_mark = head;
1305     old = ldata->echo_commit - ldata->echo_tail;
1306     @@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
1307     * is over the threshold (and try again each time another
1308     * block is accumulated) */
1309     nr = head - ldata->echo_tail;
1310     - if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
1311     + if (nr < ECHO_COMMIT_WATERMARK ||
1312     + (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
1313     + mutex_unlock(&ldata->output_lock);
1314     return;
1315     + }
1316    
1317     - mutex_lock(&ldata->output_lock);
1318     ldata->echo_commit = head;
1319     echoed = __process_echoes(tty);
1320     mutex_unlock(&ldata->output_lock);
1321     @@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
1322    
1323     static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
1324     {
1325     - *echo_buf_addr(ldata, ldata->echo_head++) = c;
1326     + *echo_buf_addr(ldata, ldata->echo_head) = c;
1327     + smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
1328     + ldata->echo_head++;
1329     }
1330    
1331     /**
1332     @@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1333     }
1334    
1335     seen_alnums = 0;
1336     - while (ldata->read_head != ldata->canon_head) {
1337     + while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
1338     head = ldata->read_head;
1339    
1340     /* erase a single possibly multibyte character */
1341     do {
1342     head--;
1343     c = read_buf(ldata, head);
1344     - } while (is_continuation(c, tty) && head != ldata->canon_head);
1345     + } while (is_continuation(c, tty) &&
1346     + MASK(head) != MASK(ldata->canon_head));
1347    
1348     /* do not partially erase */
1349     if (is_continuation(c, tty))
1350     @@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1351     * This info is used to go back the correct
1352     * number of columns.
1353     */
1354     - while (tail != ldata->canon_head) {
1355     + while (MASK(tail) != MASK(ldata->canon_head)) {
1356     tail--;
1357     c = read_buf(ldata, tail);
1358     if (c == '\t') {
1359     @@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1360     finish_erasing(ldata);
1361     echo_char(c, tty);
1362     echo_char_raw('\n', ldata);
1363     - while (tail != ldata->read_head) {
1364     + while (MASK(tail) != MASK(ldata->read_head)) {
1365     echo_char(read_buf(ldata, tail), tty);
1366     tail++;
1367     }
1368     @@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
1369     struct n_tty_data *ldata;
1370    
1371     /* Currently a malloc failure here can panic */
1372     - ldata = vmalloc(sizeof(*ldata));
1373     + ldata = vzalloc(sizeof(*ldata));
1374     if (!ldata)
1375     - goto err;
1376     + return -ENOMEM;
1377    
1378     ldata->overrun_time = jiffies;
1379     mutex_init(&ldata->atomic_read_lock);
1380     mutex_init(&ldata->output_lock);
1381    
1382     tty->disc_data = ldata;
1383     - reset_buffer_flags(tty->disc_data);
1384     - ldata->column = 0;
1385     - ldata->canon_column = 0;
1386     - ldata->num_overrun = 0;
1387     - ldata->no_room = 0;
1388     - ldata->lnext = 0;
1389     tty->closing = 0;
1390     /* indicate buffer work may resume */
1391     clear_bit(TTY_LDISC_HALTED, &tty->flags);
1392     n_tty_set_termios(tty, NULL);
1393     tty_unthrottle(tty);
1394     -
1395     return 0;
1396     -err:
1397     - return -ENOMEM;
1398     }
1399    
1400     static inline int input_available_p(struct tty_struct *tty, int poll)
1401     @@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
1402     tail = ldata->read_tail;
1403     nr = head - tail;
1404     /* Skip EOF-chars.. */
1405     - while (head != tail) {
1406     + while (MASK(head) != MASK(tail)) {
1407     if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
1408     read_buf(ldata, tail) == __DISABLED_CHAR)
1409     nr--;
1410     diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
1411     index df93b727e984..9e59f4788589 100644
1412     --- a/drivers/tty/serdev/core.c
1413     +++ b/drivers/tty/serdev/core.c
1414     @@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
1415     static void __exit serdev_exit(void)
1416     {
1417     bus_unregister(&serdev_bus_type);
1418     + ida_destroy(&ctrl_ida);
1419     }
1420     module_exit(serdev_exit);
1421    
1422     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1423     index 3296a05cda2d..f80a300b5d68 100644
1424     --- a/drivers/tty/serial/8250/8250_pci.c
1425     +++ b/drivers/tty/serial/8250/8250_pci.c
1426     @@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
1427     /* multi-io cards handled by parport_serial */
1428     { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
1429     { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
1430     - { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
1431     { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
1432     - { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
1433    
1434     /* Moxa Smartio MUE boards handled by 8250_moxa */
1435     { PCI_VDEVICE(MOXA, 0x1024), },
1436     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1437     index f97251f39c26..ec17c9fd6470 100644
1438     --- a/drivers/tty/vt/vt.c
1439     +++ b/drivers/tty/vt/vt.c
1440     @@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
1441     if (!*vc->vc_uni_pagedir_loc)
1442     con_set_default_unimap(vc);
1443    
1444     - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1445     + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1446     if (!vc->vc_screenbuf)
1447     goto err_free;
1448    
1449     @@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1450    
1451     if (new_screen_size > (4 << 20))
1452     return -EINVAL;
1453     - newscreen = kmalloc(new_screen_size, GFP_USER);
1454     + newscreen = kzalloc(new_screen_size, GFP_USER);
1455     if (!newscreen)
1456     return -ENOMEM;
1457    
1458     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1459     index 7b366a6c0b49..998b32d0167e 100644
1460     --- a/drivers/usb/class/cdc-acm.c
1461     +++ b/drivers/usb/class/cdc-acm.c
1462     @@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
1463     { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1464     .driver_info = SINGLE_RX_URB,
1465     },
1466     + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1467     + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1468     + },
1469     { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1470     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1471     },
1472     diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
1473     index e34ad5e65350..6baa75da7907 100644
1474     --- a/drivers/usb/dwc2/hcd_queue.c
1475     +++ b/drivers/usb/dwc2/hcd_queue.c
1476     @@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
1477     /* Get the map and adjust if this is a multi_tt hub */
1478     map = qh->dwc_tt->periodic_bitmaps;
1479     if (qh->dwc_tt->usb_tt->multi)
1480     - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
1481     + map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
1482    
1483     return map;
1484     }
1485     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1486     index e5ace8995b3b..99e7547f234f 100644
1487     --- a/drivers/usb/host/xhci-mem.c
1488     +++ b/drivers/usb/host/xhci-mem.c
1489     @@ -878,12 +878,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
1490    
1491     dev = xhci->devs[slot_id];
1492    
1493     - trace_xhci_free_virt_device(dev);
1494     -
1495     xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
1496     if (!dev)
1497     return;
1498    
1499     + trace_xhci_free_virt_device(dev);
1500     +
1501     if (dev->tt_info)
1502     old_active_eps = dev->tt_info->active_eps;
1503    
1504     diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
1505     index 410544ffe78f..88b427434bd8 100644
1506     --- a/drivers/usb/host/xhci-trace.h
1507     +++ b/drivers/usb/host/xhci-trace.h
1508     @@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
1509     TP_ARGS(ring, trb)
1510     );
1511    
1512     +DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
1513     + TP_PROTO(struct xhci_virt_device *vdev),
1514     + TP_ARGS(vdev),
1515     + TP_STRUCT__entry(
1516     + __field(void *, vdev)
1517     + __field(unsigned long long, out_ctx)
1518     + __field(unsigned long long, in_ctx)
1519     + __field(u8, fake_port)
1520     + __field(u8, real_port)
1521     + __field(u16, current_mel)
1522     +
1523     + ),
1524     + TP_fast_assign(
1525     + __entry->vdev = vdev;
1526     + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
1527     + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
1528     + __entry->fake_port = (u8) vdev->fake_port;
1529     + __entry->real_port = (u8) vdev->real_port;
1530     + __entry->current_mel = (u16) vdev->current_mel;
1531     + ),
1532     + TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
1533     + __entry->vdev, __entry->in_ctx, __entry->out_ctx,
1534     + __entry->fake_port, __entry->real_port, __entry->current_mel
1535     + )
1536     +);
1537     +
1538     +DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
1539     + TP_PROTO(struct xhci_virt_device *vdev),
1540     + TP_ARGS(vdev)
1541     +);
1542     +
1543     DECLARE_EVENT_CLASS(xhci_log_virt_dev,
1544     TP_PROTO(struct xhci_virt_device *vdev),
1545     TP_ARGS(vdev),
1546     @@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
1547     TP_ARGS(vdev)
1548     );
1549    
1550     -DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
1551     - TP_PROTO(struct xhci_virt_device *vdev),
1552     - TP_ARGS(vdev)
1553     -);
1554     -
1555     DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
1556     TP_PROTO(struct xhci_virt_device *vdev),
1557     TP_ARGS(vdev)
1558     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1559     index eb6c26cbe579..ee0cc1d90b51 100644
1560     --- a/drivers/usb/serial/cp210x.c
1561     +++ b/drivers/usb/serial/cp210x.c
1562     @@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
1563     { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1564     { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1565     { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
1566     + { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
1567     + { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
1568     + { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
1569     { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1570     { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1571     { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
1572     @@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
1573     { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1574     { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1575     { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1576     + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
1577     + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
1578     + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
1579     { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
1580     { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1581     { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1582     @@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
1583     { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
1584     { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1585     { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
1586     + { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
1587     { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
1588     + { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
1589     { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
1590     { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
1591     { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1592     @@ -134,17 +142,23 @@ static const struct usb_device_id id_table[] = {
1593     { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
1594     { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1595     { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1596     + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
1597     + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
1598     { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1599     { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1600     { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1601     { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1602     + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
1603     { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1604     { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
1605     { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
1606     { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1607     { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1608     + { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
1609     { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1610     { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
1611     + { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
1612     + { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
1613     { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
1614     { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
1615     { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
1616     diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
1617     index ded49e3bf2b0..9b29b67191bc 100644
1618     --- a/drivers/usb/typec/tcpm.c
1619     +++ b/drivers/usb/typec/tcpm.c
1620     @@ -388,17 +388,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
1621     u64 ts_nsec = local_clock();
1622     unsigned long rem_nsec;
1623    
1624     + mutex_lock(&port->logbuffer_lock);
1625     if (!port->logbuffer[port->logbuffer_head]) {
1626     port->logbuffer[port->logbuffer_head] =
1627     kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
1628     - if (!port->logbuffer[port->logbuffer_head])
1629     + if (!port->logbuffer[port->logbuffer_head]) {
1630     + mutex_unlock(&port->logbuffer_lock);
1631     return;
1632     + }
1633     }
1634    
1635     vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
1636    
1637     - mutex_lock(&port->logbuffer_lock);
1638     -
1639     if (tcpm_log_full(port)) {
1640     port->logbuffer_head = max(port->logbuffer_head - 1, 0);
1641     strcpy(tmpbuffer, "overflow");
1642     diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
1643     index bd5cca5632b3..8d0a6fe748bd 100644
1644     --- a/drivers/usb/typec/ucsi/ucsi.c
1645     +++ b/drivers/usb/typec/ucsi/ucsi.c
1646     @@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
1647     }
1648    
1649     if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
1650     + typec_set_pwr_role(con->port, con->status.pwr_dir);
1651     +
1652     + switch (con->status.partner_type) {
1653     + case UCSI_CONSTAT_PARTNER_TYPE_UFP:
1654     + typec_set_data_role(con->port, TYPEC_HOST);
1655     + break;
1656     + case UCSI_CONSTAT_PARTNER_TYPE_DFP:
1657     + typec_set_data_role(con->port, TYPEC_DEVICE);
1658     + break;
1659     + default:
1660     + break;
1661     + }
1662     +
1663     if (con->status.connected)
1664     ucsi_register_partner(con);
1665     else
1666     diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
1667     index 44eb4e1ea817..a18112a83fae 100644
1668     --- a/drivers/usb/typec/ucsi/ucsi_acpi.c
1669     +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
1670     @@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
1671     return -ENODEV;
1672     }
1673    
1674     + /* This will make sure we can use ioremap_nocache() */
1675     + status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
1676     + if (ACPI_FAILURE(status))
1677     + return -ENOMEM;
1678     +
1679     /*
1680     * NOTE: The memory region for the data structures is used also in an
1681     * operation region, which means ACPI has already reserved it. Therefore
1682     diff --git a/include/linux/acpi.h b/include/linux/acpi.h
1683     index 15bfb15c2fa5..a6a7ae897b40 100644
1684     --- a/include/linux/acpi.h
1685     +++ b/include/linux/acpi.h
1686     @@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
1687     int acpi_check_region(resource_size_t start, resource_size_t n,
1688     const char *name);
1689    
1690     +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1691     + u32 level);
1692     +
1693     int acpi_resources_are_enforced(void);
1694    
1695     #ifdef CONFIG_HIBERNATION
1696     diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
1697     index d12f511929f5..0fe61ede77c6 100644
1698     --- a/net/ipv6/netfilter/ip6t_rpfilter.c
1699     +++ b/net/ipv6/netfilter/ip6t_rpfilter.c
1700     @@ -48,6 +48,8 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
1701     }
1702    
1703     fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
1704     + if ((flags & XT_RPFILTER_LOOSE) == 0)
1705     + fl6.flowi6_oif = dev->ifindex;
1706    
1707     rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
1708     if (rt->dst.error)
1709     diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
1710     index 40e744572283..32b7896929f3 100644
1711     --- a/net/netfilter/nf_tables_core.c
1712     +++ b/net/netfilter/nf_tables_core.c
1713     @@ -208,7 +208,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
1714    
1715     switch (regs.verdict.code) {
1716     case NFT_JUMP:
1717     - BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
1718     + if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
1719     + return NF_DROP;
1720     jumpstack[stackptr].chain = chain;
1721     jumpstack[stackptr].rule = rule;
1722     jumpstack[stackptr].rulenum = rulenum;
1723     diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
1724     index 94df000abb92..29c38aa7f726 100644
1725     --- a/net/netfilter/xt_connmark.c
1726     +++ b/net/netfilter/xt_connmark.c
1727     @@ -211,7 +211,7 @@ static int __init connmark_mt_init(void)
1728     static void __exit connmark_mt_exit(void)
1729     {
1730     xt_unregister_match(&connmark_mt_reg);
1731     - xt_unregister_target(connmark_tg_reg);
1732     + xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
1733     }
1734    
1735     module_init(connmark_mt_init);