Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0166-4.9.67-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3051 - (hide annotations) (download)
Wed Dec 20 11:49:54 2017 UTC (6 years, 5 months ago) by niro
File size: 37739 byte(s)
-linux-4.9.67
1 niro 3051 diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
2     index 07a250498fbb..f569db58f64a 100644
3     --- a/Documentation/devicetree/bindings/hwmon/jc42.txt
4     +++ b/Documentation/devicetree/bindings/hwmon/jc42.txt
5     @@ -34,6 +34,10 @@ Required properties:
6    
7     - reg: I2C address
8    
9     +Optional properties:
10     +- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
11     + This is not supported on all chips.
12     +
13     Example:
14    
15     temp-sensor@1a {
16     diff --git a/Makefile b/Makefile
17     index 8e62f9e2a08c..70546af61a0a 100644
18     --- a/Makefile
19     +++ b/Makefile
20     @@ -1,6 +1,6 @@
21     VERSION = 4
22     PATCHLEVEL = 9
23     -SUBLEVEL = 66
24     +SUBLEVEL = 67
25     EXTRAVERSION =
26     NAME = Roaring Lionus
27    
28     diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
29     index 08cce17a25a0..b4575bbaf085 100644
30     --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
31     +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
32     @@ -192,7 +192,7 @@
33     interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
34     pinctrl-names = "default";
35     pinctrl-0 = <&mmc1_pins &mmc1_cd>;
36     - cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>; /* gpio127 */
37     + cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio127 */
38     vmmc-supply = <&vmmc1>;
39     bus-width = <4>;
40     cap-power-off-card;
41     @@ -249,9 +249,9 @@
42     OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */
43     OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */
44    
45     - OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */
46     - OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */
47     - OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */
48     + OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */
49     + OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */
50     + OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */
51     OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */
52     OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */
53     OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */
54     diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
55     index 770216baa737..da310bb779b9 100644
56     --- a/arch/arm/mach-omap2/pdata-quirks.c
57     +++ b/arch/arm/mach-omap2/pdata-quirks.c
58     @@ -162,7 +162,7 @@ static struct ti_st_plat_data wilink7_pdata = {
59     .nshutdown_gpio = 162,
60     .dev_name = "/dev/ttyO1",
61     .flow_cntrl = 1,
62     - .baud_rate = 300000,
63     + .baud_rate = 3000000,
64     };
65    
66     static struct platform_device wl128x_device = {
67     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
68     index be9df513141e..e7b0e7ff4c58 100644
69     --- a/arch/x86/entry/entry_64.S
70     +++ b/arch/x86/entry/entry_64.S
71     @@ -54,19 +54,15 @@ ENTRY(native_usergs_sysret64)
72     ENDPROC(native_usergs_sysret64)
73     #endif /* CONFIG_PARAVIRT */
74    
75     -.macro TRACE_IRQS_FLAGS flags:req
76     +.macro TRACE_IRQS_IRETQ
77     #ifdef CONFIG_TRACE_IRQFLAGS
78     - bt $9, \flags /* interrupts off? */
79     + bt $9, EFLAGS(%rsp) /* interrupts off? */
80     jnc 1f
81     TRACE_IRQS_ON
82     1:
83     #endif
84     .endm
85    
86     -.macro TRACE_IRQS_IRETQ
87     - TRACE_IRQS_FLAGS EFLAGS(%rsp)
88     -.endm
89     -
90     /*
91     * When dynamic function tracer is enabled it will add a breakpoint
92     * to all locations that it is about to modify, sync CPUs, update
93     @@ -872,13 +868,11 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
94     ENTRY(native_load_gs_index)
95     pushfq
96     DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
97     - TRACE_IRQS_OFF
98     SWAPGS
99     .Lgs_change:
100     movl %edi, %gs
101     2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
102     SWAPGS
103     - TRACE_IRQS_FLAGS (%rsp)
104     popfq
105     ret
106     END(native_load_gs_index)
107     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
108     index 3f05c044720b..b24b3c6d686e 100644
109     --- a/arch/x86/kvm/lapic.c
110     +++ b/arch/x86/kvm/lapic.c
111     @@ -246,9 +246,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
112     recalculate_apic_map(apic->vcpu->kvm);
113     }
114    
115     +static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
116     +{
117     + return ((id >> 4) << 16) | (1 << (id & 0xf));
118     +}
119     +
120     static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
121     {
122     - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
123     + u32 ldr = kvm_apic_calc_x2apic_ldr(id);
124    
125     kvm_lapic_set_reg(apic, APIC_ID, id);
126     kvm_lapic_set_reg(apic, APIC_LDR, ldr);
127     @@ -2029,6 +2034,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
128     {
129     if (apic_x2apic_mode(vcpu->arch.apic)) {
130     u32 *id = (u32 *)(s->regs + APIC_ID);
131     + u32 *ldr = (u32 *)(s->regs + APIC_LDR);
132    
133     if (vcpu->kvm->arch.x2apic_format) {
134     if (*id != vcpu->vcpu_id)
135     @@ -2039,6 +2045,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
136     else
137     *id <<= 24;
138     }
139     +
140     + /* In x2APIC mode, the LDR is fixed and based on the id */
141     + if (set)
142     + *ldr = kvm_apic_calc_x2apic_ldr(*id);
143     }
144    
145     return 0;
146     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
147     index 4fbf0c94f2d1..23f1a6bd7a0d 100644
148     --- a/arch/x86/kvm/svm.c
149     +++ b/arch/x86/kvm/svm.c
150     @@ -2149,6 +2149,8 @@ static int ud_interception(struct vcpu_svm *svm)
151     int er;
152    
153     er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
154     + if (er == EMULATE_USER_EXIT)
155     + return 0;
156     if (er != EMULATE_DONE)
157     kvm_queue_exception(&svm->vcpu, UD_VECTOR);
158     return 1;
159     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
160     index 0f0b27d96f27..f0d3de153e29 100644
161     --- a/arch/x86/kvm/vmx.c
162     +++ b/arch/x86/kvm/vmx.c
163     @@ -5502,6 +5502,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
164     return 1;
165     }
166     er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
167     + if (er == EMULATE_USER_EXIT)
168     + return 0;
169     if (er != EMULATE_DONE)
170     kvm_queue_exception(vcpu, UD_VECTOR);
171     return 1;
172     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
173     index 595f8149c0d9..02d45296a97c 100644
174     --- a/arch/x86/kvm/x86.c
175     +++ b/arch/x86/kvm/x86.c
176     @@ -1797,6 +1797,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
177     */
178     BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
179    
180     + if (guest_hv_clock.version & 1)
181     + ++guest_hv_clock.version; /* first time write, random junk */
182     +
183     vcpu->hv_clock.version = guest_hv_clock.version + 1;
184     kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
185     &vcpu->hv_clock,
186     @@ -5576,6 +5579,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
187     if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
188     emulation_type))
189     return EMULATE_DONE;
190     + if (ctxt->have_exception && inject_emulated_exception(vcpu))
191     + return EMULATE_DONE;
192     if (emulation_type & EMULTYPE_SKIP)
193     return EMULATE_FAIL;
194     return handle_emulation_failure(vcpu);
195     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
196     index f8fdbd1378a7..26afdffab5a0 100644
197     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
198     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
199     @@ -1788,34 +1788,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
200     WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
201     }
202    
203     -/* Atom needs data in little endian format
204     - * so swap as appropriate when copying data to
205     - * or from atom. Note that atom operates on
206     - * dw units.
207     +/* Atom needs data in little endian format so swap as appropriate when copying
208     + * data to or from atom. Note that atom operates on dw units.
209     + *
210     + * Use to_le=true when sending data to atom and provide at least
211     + * ALIGN(num_bytes,4) bytes in the dst buffer.
212     + *
213     + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
214     + * byes in the src buffer.
215     */
216     void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
217     {
218     #ifdef __BIG_ENDIAN
219     - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
220     - u32 *dst32, *src32;
221     + u32 src_tmp[5], dst_tmp[5];
222     int i;
223     + u8 align_num_bytes = ALIGN(num_bytes, 4);
224    
225     - memcpy(src_tmp, src, num_bytes);
226     - src32 = (u32 *)src_tmp;
227     - dst32 = (u32 *)dst_tmp;
228     if (to_le) {
229     - for (i = 0; i < ((num_bytes + 3) / 4); i++)
230     - dst32[i] = cpu_to_le32(src32[i]);
231     - memcpy(dst, dst_tmp, num_bytes);
232     + memcpy(src_tmp, src, num_bytes);
233     + for (i = 0; i < align_num_bytes / 4; i++)
234     + dst_tmp[i] = cpu_to_le32(src_tmp[i]);
235     + memcpy(dst, dst_tmp, align_num_bytes);
236     } else {
237     - u8 dws = num_bytes & ~3;
238     - for (i = 0; i < ((num_bytes + 3) / 4); i++)
239     - dst32[i] = le32_to_cpu(src32[i]);
240     - memcpy(dst, dst_tmp, dws);
241     - if (num_bytes % 4) {
242     - for (i = 0; i < (num_bytes % 4); i++)
243     - dst[dws+i] = dst_tmp[dws+i];
244     - }
245     + memcpy(src_tmp, src, align_num_bytes);
246     + for (i = 0; i < align_num_bytes / 4; i++)
247     + dst_tmp[i] = le32_to_cpu(src_tmp[i]);
248     + memcpy(dst, dst_tmp, num_bytes);
249     }
250     #else
251     memcpy(dst, src, num_bytes);
252     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
253     index 743a12df6971..3bb2b9b5ef9c 100644
254     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
255     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
256     @@ -648,7 +648,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
257     uint32_t allocated = 0;
258     uint32_t tmp, handle = 0;
259     uint32_t *size = &tmp;
260     - int i, r, idx = 0;
261     + int i, r = 0, idx = 0;
262    
263     r = amdgpu_cs_sysvm_access_required(p);
264     if (r)
265     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
266     index 968c4260d7a7..47503759906b 100644
267     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
268     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
269     @@ -744,7 +744,7 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
270     int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
271     struct amdgpu_vm *vm)
272     {
273     - int r;
274     + int r = 0;
275    
276     r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
277     if (r)
278     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
279     index 4477c55a58e3..a8b59b3decd8 100644
280     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
281     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
282     @@ -850,9 +850,9 @@ static int init_over_drive_limits(
283     const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
284     {
285     hwmgr->platform_descriptor.overdriveLimit.engineClock =
286     - le16_to_cpu(powerplay_table->ulMaxODEngineClock);
287     + le32_to_cpu(powerplay_table->ulMaxODEngineClock);
288     hwmgr->platform_descriptor.overdriveLimit.memoryClock =
289     - le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
290     + le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
291    
292     hwmgr->platform_descriptor.minOverdriveVDDC = 0;
293     hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
294     diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
295     index 7e7a4d43d6b6..0f563c954520 100644
296     --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
297     +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
298     @@ -521,9 +521,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
299     {
300     struct ade_crtc *acrtc = to_ade_crtc(crtc);
301     struct ade_hw_ctx *ctx = acrtc->ctx;
302     + struct drm_display_mode *mode = &crtc->state->mode;
303     + struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
304    
305     if (!ctx->power_on)
306     (void)ade_power_up(ctx);
307     + ade_ldi_set_mode(acrtc, mode, adj_mode);
308     }
309    
310     static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
311     diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
312     index 79aab9ad6faa..6769aa1b6922 100644
313     --- a/drivers/gpu/drm/i915/intel_i2c.c
314     +++ b/drivers/gpu/drm/i915/intel_i2c.c
315     @@ -430,7 +430,9 @@ static bool
316     gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
317     {
318     return (i + 1 < num &&
319     - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
320     + msgs[i].addr == msgs[i + 1].addr &&
321     + !(msgs[i].flags & I2C_M_RD) &&
322     + (msgs[i].len == 1 || msgs[i].len == 2) &&
323     (msgs[i + 1].flags & I2C_M_RD));
324     }
325    
326     diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
327     index 27cb42467b20..6f65846b1783 100644
328     --- a/drivers/gpu/drm/panel/panel-simple.c
329     +++ b/drivers/gpu/drm/panel/panel-simple.c
330     @@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev)
331     drm_panel_remove(&panel->base);
332    
333     panel_simple_disable(&panel->base);
334     + panel_simple_unprepare(&panel->base);
335    
336     if (panel->ddc)
337     put_device(&panel->ddc->dev);
338     @@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev)
339     struct panel_simple *panel = dev_get_drvdata(dev);
340    
341     panel_simple_disable(&panel->base);
342     + panel_simple_unprepare(&panel->base);
343     }
344    
345     static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
346     diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
347     index 432cb46f6a34..fd7682bf335d 100644
348     --- a/drivers/gpu/drm/radeon/atombios_dp.c
349     +++ b/drivers/gpu/drm/radeon/atombios_dp.c
350     @@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
351    
352     /***** radeon AUX functions *****/
353    
354     -/* Atom needs data in little endian format
355     - * so swap as appropriate when copying data to
356     - * or from atom. Note that atom operates on
357     - * dw units.
358     +/* Atom needs data in little endian format so swap as appropriate when copying
359     + * data to or from atom. Note that atom operates on dw units.
360     + *
361     + * Use to_le=true when sending data to atom and provide at least
362     + * ALIGN(num_bytes,4) bytes in the dst buffer.
363     + *
364     + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
365     + * byes in the src buffer.
366     */
367     void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
368     {
369     #ifdef __BIG_ENDIAN
370     - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
371     - u32 *dst32, *src32;
372     + u32 src_tmp[5], dst_tmp[5];
373     int i;
374     + u8 align_num_bytes = ALIGN(num_bytes, 4);
375    
376     - memcpy(src_tmp, src, num_bytes);
377     - src32 = (u32 *)src_tmp;
378     - dst32 = (u32 *)dst_tmp;
379     if (to_le) {
380     - for (i = 0; i < ((num_bytes + 3) / 4); i++)
381     - dst32[i] = cpu_to_le32(src32[i]);
382     - memcpy(dst, dst_tmp, num_bytes);
383     + memcpy(src_tmp, src, num_bytes);
384     + for (i = 0; i < align_num_bytes / 4; i++)
385     + dst_tmp[i] = cpu_to_le32(src_tmp[i]);
386     + memcpy(dst, dst_tmp, align_num_bytes);
387     } else {
388     - u8 dws = num_bytes & ~3;
389     - for (i = 0; i < ((num_bytes + 3) / 4); i++)
390     - dst32[i] = le32_to_cpu(src32[i]);
391     - memcpy(dst, dst_tmp, dws);
392     - if (num_bytes % 4) {
393     - for (i = 0; i < (num_bytes % 4); i++)
394     - dst[dws+i] = dst_tmp[dws+i];
395     - }
396     + memcpy(src_tmp, src, align_num_bytes);
397     + for (i = 0; i < align_num_bytes / 4; i++)
398     + dst_tmp[i] = le32_to_cpu(src_tmp[i]);
399     + memcpy(dst, dst_tmp, num_bytes);
400     }
401     #else
402     memcpy(dst, src, num_bytes);
403     diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
404     index 0daad446d2c7..af84705b82ed 100644
405     --- a/drivers/gpu/drm/radeon/radeon_fb.c
406     +++ b/drivers/gpu/drm/radeon/radeon_fb.c
407     @@ -252,7 +252,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
408     }
409    
410     info->par = rfbdev;
411     - info->skip_vt_switch = true;
412    
413     ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
414     if (ret) {
415     diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
416     index bf6e21655c57..7d22f9874d5f 100644
417     --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
418     +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
419     @@ -473,6 +473,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
420     INIT_LIST_HEAD(&fbo->lru);
421     INIT_LIST_HEAD(&fbo->swap);
422     INIT_LIST_HEAD(&fbo->io_reserve_lru);
423     + mutex_init(&fbo->wu_mutex);
424     fbo->moving = NULL;
425     drm_vma_node_reset(&fbo->vma_node);
426     atomic_set(&fbo->cpu_writers, 0);
427     diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
428     index 1bf22eff0b08..0f1f6421845f 100644
429     --- a/drivers/hwmon/jc42.c
430     +++ b/drivers/hwmon/jc42.c
431     @@ -22,6 +22,7 @@
432     * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
433     */
434    
435     +#include <linux/bitops.h>
436     #include <linux/module.h>
437     #include <linux/init.h>
438     #include <linux/slab.h>
439     @@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
440     #define JC42_REG_TEMP 0x05
441     #define JC42_REG_MANID 0x06
442     #define JC42_REG_DEVICEID 0x07
443     +#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */
444    
445     /* Status bits in temperature register */
446     #define JC42_ALARM_CRIT_BIT 15
447     @@ -73,6 +75,9 @@ static const unsigned short normal_i2c[] = {
448     #define ONS_MANID 0x1b09 /* ON Semiconductor */
449     #define STM_MANID 0x104a /* ST Microelectronics */
450    
451     +/* SMBUS register */
452     +#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */
453     +
454     /* Supported chips */
455    
456     /* Analog Devices */
457     @@ -476,6 +481,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
458    
459     data->extended = !!(cap & JC42_CAP_RANGE);
460    
461     + if (device_property_read_bool(dev, "smbus-timeout-disable")) {
462     + int smbus;
463     +
464     + /*
465     + * Not all chips support this register, but from a
466     + * quick read of various datasheets no chip appears
467     + * incompatible with the below attempt to disable
468     + * the timeout. And the whole thing is opt-in...
469     + */
470     + smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
471     + if (smbus < 0)
472     + return smbus;
473     + i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
474     + smbus | SMBUS_STMOUT);
475     + }
476     +
477     config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
478     if (config < 0)
479     return config;
480     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
481     index eb3627f35d12..e6fe21a6135b 100644
482     --- a/drivers/i2c/busses/i2c-i801.c
483     +++ b/drivers/i2c/busses/i2c-i801.c
484     @@ -1592,6 +1592,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
485     /* Default timeout in interrupt mode: 200 ms */
486     priv->adapter.timeout = HZ / 5;
487    
488     + if (dev->irq == IRQ_NOTCONNECTED)
489     + priv->features &= ~FEATURE_IRQ;
490     +
491     if (priv->features & FEATURE_IRQ) {
492     u16 pcictl, pcists;
493    
494     diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
495     index 3fba31cea66e..537903bf9add 100644
496     --- a/drivers/md/bcache/alloc.c
497     +++ b/drivers/md/bcache/alloc.c
498     @@ -477,7 +477,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
499     if (b == -1)
500     goto err;
501    
502     - k->ptr[i] = PTR(ca->buckets[b].gen,
503     + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
504     bucket_to_sector(c, b),
505     ca->sb.nr_this_dev);
506    
507     diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
508     index 243de0bf15cd..4bf15182c4da 100644
509     --- a/drivers/md/bcache/extents.c
510     +++ b/drivers/md/bcache/extents.c
511     @@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
512     return false;
513    
514     for (i = 0; i < KEY_PTRS(l); i++)
515     - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
516     + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
517     PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
518     return false;
519    
520     diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
521     index 6925023e12d4..08f20b7cd199 100644
522     --- a/drivers/md/bcache/journal.c
523     +++ b/drivers/md/bcache/journal.c
524     @@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c)
525     continue;
526    
527     ja->cur_idx = next;
528     - k->ptr[n++] = PTR(0,
529     + k->ptr[n++] = MAKE_PTR(0,
530     bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
531     ca->sb.nr_this_dev);
532     }
533     diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
534     index 1beb722f6080..e1e69a480c56 100644
535     --- a/drivers/mfd/twl4030-power.c
536     +++ b/drivers/mfd/twl4030-power.c
537     @@ -701,6 +701,7 @@ static struct twl4030_ins omap3_wrst_seq[] = {
538     TWL_RESOURCE_RESET(RES_MAIN_REF),
539     TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2),
540     TWL_RESOURCE_RESET(RES_VUSB_3V1),
541     + TWL_RESOURCE_RESET(RES_VMMC1),
542     TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1),
543     TWL_RESOURCE_GROUP_RESET(RES_GRP_RC, RES_TYPE_ALL, RES_TYPE2_R0),
544     TWL_RESOURCE_ON(RES_RESET),
545     diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
546     index 051b14766ef9..19c10dc56513 100644
547     --- a/drivers/misc/eeprom/at24.c
548     +++ b/drivers/misc/eeprom/at24.c
549     @@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
550     memset(msg, 0, sizeof(msg));
551     msg[0].addr = client->addr;
552     msg[0].buf = addrbuf;
553     - addrbuf[0] = 0x90 + offset;
554     + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
555     + addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
556     msg[0].len = 1;
557     msg[1].addr = client->addr;
558     msg[1].flags = I2C_M_RD;
559     @@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
560     if (unlikely(!count))
561     return count;
562    
563     + if (off + count > at24->chip.byte_len)
564     + return -EINVAL;
565     +
566     /*
567     * Read data from chip, protecting against concurrent updates
568     * from this host, but not from other I2C masters.
569     @@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
570     if (unlikely(!count))
571     return -EINVAL;
572    
573     + if (off + count > at24->chip.byte_len)
574     + return -EINVAL;
575     +
576     /*
577     * Write data to chip, protecting against concurrent updates
578     * from this host, but not from other I2C masters.
579     @@ -638,6 +645,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
580     dev_warn(&client->dev,
581     "page_size looks suspicious (no power of 2)!\n");
582    
583     + /*
584     + * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
585     + * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
586     + *
587     + * Eventually we'll get rid of the magic values altoghether in favor of
588     + * real structs, but for now just manually set the right size.
589     + */
590     + if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
591     + chip.byte_len = 6;
592     +
593     /* Use I2C operations unless we're stuck with SMBus extensions. */
594     if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
595     if (chip.flags & AT24_FLAG_ADDR16)
596     diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
597     index c64266f5a399..60ebe5b4500b 100644
598     --- a/drivers/mmc/core/bus.c
599     +++ b/drivers/mmc/core/bus.c
600     @@ -155,6 +155,9 @@ static int mmc_bus_suspend(struct device *dev)
601     return ret;
602    
603     ret = host->bus_ops->suspend(host);
604     + if (ret)
605     + pm_generic_resume(dev);
606     +
607     return ret;
608     }
609    
610     diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
611     index 4f4a627f6b20..0c6de9f12ee8 100644
612     --- a/drivers/mmc/core/mmc.c
613     +++ b/drivers/mmc/core/mmc.c
614     @@ -752,7 +752,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
615     MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
616     MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
617     MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
618     -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
619     +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
620    
621     static ssize_t mmc_fwrev_show(struct device *dev,
622     struct device_attribute *attr,
623     diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
624     index f6f40a1673ae..f09148a4ab55 100644
625     --- a/drivers/mmc/core/sd.c
626     +++ b/drivers/mmc/core/sd.c
627     @@ -683,7 +683,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
628     MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
629     MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
630     MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
631     -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
632     +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
633    
634    
635     static ssize_t mmc_dsr_show(struct device *dev,
636     diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
637     index 8edafd8cb8ce..5c52a6182765 100644
638     --- a/drivers/nvme/host/nvme.h
639     +++ b/drivers/nvme/host/nvme.h
640     @@ -84,7 +84,7 @@ enum nvme_quirks {
641     * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
642     * found empirically.
643     */
644     -#define NVME_QUIRK_DELAY_AMOUNT 2000
645     +#define NVME_QUIRK_DELAY_AMOUNT 2300
646    
647     enum nvme_ctrl_state {
648     NVME_CTRL_NEW,
649     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
650     index 54ea90f89b70..e48ecb9303ca 100644
651     --- a/drivers/nvme/host/pci.c
652     +++ b/drivers/nvme/host/pci.c
653     @@ -2109,6 +2109,8 @@ static const struct pci_device_id nvme_id_table[] = {
654     .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
655     { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
656     .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
657     + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
658     + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
659     { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
660     .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
661     { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
662     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
663     index 705bb5f5a87f..c4cff5cc9c93 100644
664     --- a/fs/btrfs/extent-tree.c
665     +++ b/fs/btrfs/extent-tree.c
666     @@ -3397,13 +3397,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
667     goto again;
668     }
669    
670     - /* We've already setup this transaction, go ahead and exit */
671     - if (block_group->cache_generation == trans->transid &&
672     - i_size_read(inode)) {
673     - dcs = BTRFS_DC_SETUP;
674     - goto out_put;
675     - }
676     -
677     /*
678     * We want to set the generation to 0, that way if anything goes wrong
679     * from here on out we know not to trust this cache when we load up next
680     @@ -3427,6 +3420,13 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
681     }
682     WARN_ON(ret);
683    
684     + /* We've already setup this transaction, go ahead and exit */
685     + if (block_group->cache_generation == trans->transid &&
686     + i_size_read(inode)) {
687     + dcs = BTRFS_DC_SETUP;
688     + goto out_put;
689     + }
690     +
691     if (i_size_read(inode) > 0) {
692     ret = btrfs_check_trunc_cache_free_space(root,
693     &root->fs_info->global_block_rsv);
694     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
695     index d04ec3814779..65566d5fcf39 100644
696     --- a/fs/nfs/dir.c
697     +++ b/fs/nfs/dir.c
698     @@ -1292,7 +1292,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
699     return 0;
700     }
701    
702     - error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
703     + error = nfs_lookup_verify_inode(inode, flags);
704     dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
705     __func__, inode->i_ino, error ? "invalid" : "valid");
706     return !error;
707     @@ -1443,6 +1443,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
708    
709     const struct dentry_operations nfs4_dentry_operations = {
710     .d_revalidate = nfs4_lookup_revalidate,
711     + .d_weak_revalidate = nfs_weak_revalidate,
712     .d_delete = nfs_dentry_delete,
713     .d_iput = nfs_dentry_iput,
714     .d_automount = nfs_d_automount,
715     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
716     index ec2a69dac536..9ebb2d7c8182 100644
717     --- a/fs/nfsd/nfs4state.c
718     +++ b/fs/nfsd/nfs4state.c
719     @@ -3513,7 +3513,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
720     /* ignore lock owners */
721     if (local->st_stateowner->so_is_open_owner == 0)
722     continue;
723     - if (local->st_stateowner == &oo->oo_owner) {
724     + if (local->st_stateowner != &oo->oo_owner)
725     + continue;
726     + if (local->st_stid.sc_type == NFS4_OPEN_STID) {
727     ret = local;
728     atomic_inc(&ret->st_stid.sc_count);
729     break;
730     @@ -3522,6 +3524,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
731     return ret;
732     }
733    
734     +static __be32
735     +nfsd4_verify_open_stid(struct nfs4_stid *s)
736     +{
737     + __be32 ret = nfs_ok;
738     +
739     + switch (s->sc_type) {
740     + default:
741     + break;
742     + case NFS4_CLOSED_STID:
743     + case NFS4_CLOSED_DELEG_STID:
744     + ret = nfserr_bad_stateid;
745     + break;
746     + case NFS4_REVOKED_DELEG_STID:
747     + ret = nfserr_deleg_revoked;
748     + }
749     + return ret;
750     +}
751     +
752     +/* Lock the stateid st_mutex, and deal with races with CLOSE */
753     +static __be32
754     +nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
755     +{
756     + __be32 ret;
757     +
758     + mutex_lock(&stp->st_mutex);
759     + ret = nfsd4_verify_open_stid(&stp->st_stid);
760     + if (ret != nfs_ok)
761     + mutex_unlock(&stp->st_mutex);
762     + return ret;
763     +}
764     +
765     +static struct nfs4_ol_stateid *
766     +nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
767     +{
768     + struct nfs4_ol_stateid *stp;
769     + for (;;) {
770     + spin_lock(&fp->fi_lock);
771     + stp = nfsd4_find_existing_open(fp, open);
772     + spin_unlock(&fp->fi_lock);
773     + if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
774     + break;
775     + nfs4_put_stid(&stp->st_stid);
776     + }
777     + return stp;
778     +}
779     +
780     static struct nfs4_openowner *
781     alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
782     struct nfsd4_compound_state *cstate)
783     @@ -3566,6 +3614,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
784     mutex_init(&stp->st_mutex);
785     mutex_lock(&stp->st_mutex);
786    
787     +retry:
788     spin_lock(&oo->oo_owner.so_client->cl_lock);
789     spin_lock(&fp->fi_lock);
790    
791     @@ -3590,7 +3639,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
792     spin_unlock(&fp->fi_lock);
793     spin_unlock(&oo->oo_owner.so_client->cl_lock);
794     if (retstp) {
795     - mutex_lock(&retstp->st_mutex);
796     + /* Handle races with CLOSE */
797     + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
798     + nfs4_put_stid(&retstp->st_stid);
799     + goto retry;
800     + }
801     /* To keep mutex tracking happy */
802     mutex_unlock(&stp->st_mutex);
803     stp = retstp;
804     @@ -4400,6 +4453,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
805     struct nfs4_ol_stateid *stp = NULL;
806     struct nfs4_delegation *dp = NULL;
807     __be32 status;
808     + bool new_stp = false;
809    
810     /*
811     * Lookup file; if found, lookup stateid and check open request,
812     @@ -4411,9 +4465,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
813     status = nfs4_check_deleg(cl, open, &dp);
814     if (status)
815     goto out;
816     - spin_lock(&fp->fi_lock);
817     - stp = nfsd4_find_existing_open(fp, open);
818     - spin_unlock(&fp->fi_lock);
819     + stp = nfsd4_find_and_lock_existing_open(fp, open);
820     } else {
821     open->op_file = NULL;
822     status = nfserr_bad_stateid;
823     @@ -4421,35 +4473,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
824     goto out;
825     }
826    
827     + if (!stp) {
828     + stp = init_open_stateid(fp, open);
829     + if (!open->op_stp)
830     + new_stp = true;
831     + }
832     +
833     /*
834     * OPEN the file, or upgrade an existing OPEN.
835     * If truncate fails, the OPEN fails.
836     + *
837     + * stp is already locked.
838     */
839     - if (stp) {
840     + if (!new_stp) {
841     /* Stateid was found, this is an OPEN upgrade */
842     - mutex_lock(&stp->st_mutex);
843     status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
844     if (status) {
845     mutex_unlock(&stp->st_mutex);
846     goto out;
847     }
848     } else {
849     - /* stp is returned locked. */
850     - stp = init_open_stateid(fp, open);
851     - /* See if we lost the race to some other thread */
852     - if (stp->st_access_bmap != 0) {
853     - status = nfs4_upgrade_open(rqstp, fp, current_fh,
854     - stp, open);
855     - if (status) {
856     - mutex_unlock(&stp->st_mutex);
857     - goto out;
858     - }
859     - goto upgrade_out;
860     - }
861     status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
862     if (status) {
863     - mutex_unlock(&stp->st_mutex);
864     + stp->st_stid.sc_type = NFS4_CLOSED_STID;
865     release_open_stateid(stp);
866     + mutex_unlock(&stp->st_mutex);
867     goto out;
868     }
869    
870     @@ -4458,7 +4506,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
871     if (stp->st_clnt_odstate == open->op_odstate)
872     open->op_odstate = NULL;
873     }
874     -upgrade_out:
875     +
876     nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
877     mutex_unlock(&stp->st_mutex);
878    
879     @@ -4684,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn)
880     spin_unlock(&nn->blocked_locks_lock);
881    
882     while (!list_empty(&reaplist)) {
883     - nbl = list_first_entry(&nn->blocked_locks_lru,
884     + nbl = list_first_entry(&reaplist,
885     struct nfsd4_blocked_lock, nbl_lru);
886     list_del_init(&nbl->nbl_lru);
887     posix_unblock_lock(&nbl->nbl_lock);
888     @@ -5314,7 +5362,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
889     bool unhashed;
890     LIST_HEAD(reaplist);
891    
892     - s->st_stid.sc_type = NFS4_CLOSED_STID;
893     spin_lock(&clp->cl_lock);
894     unhashed = unhash_open_stateid(s, &reaplist);
895    
896     @@ -5353,10 +5400,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
897     nfsd4_bump_seqid(cstate, status);
898     if (status)
899     goto out;
900     +
901     + stp->st_stid.sc_type = NFS4_CLOSED_STID;
902     nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
903     - mutex_unlock(&stp->st_mutex);
904    
905     nfsd4_close_open_stateid(stp);
906     + mutex_unlock(&stp->st_mutex);
907    
908     /* put reference from nfs4_preprocess_seqid_op */
909     nfs4_put_stid(&stp->st_stid);
910     @@ -7094,7 +7143,7 @@ nfs4_state_shutdown_net(struct net *net)
911     spin_unlock(&nn->blocked_locks_lock);
912    
913     while (!list_empty(&reaplist)) {
914     - nbl = list_first_entry(&nn->blocked_locks_lru,
915     + nbl = list_first_entry(&reaplist,
916     struct nfsd4_blocked_lock, nbl_lru);
917     list_del_init(&nbl->nbl_lru);
918     posix_unblock_lock(&nbl->nbl_lock);
919     diff --git a/include/linux/mm.h b/include/linux/mm.h
920     index 6c9e1ad12831..2217e2f18247 100644
921     --- a/include/linux/mm.h
922     +++ b/include/linux/mm.h
923     @@ -347,6 +347,7 @@ struct fault_env {
924     struct vm_operations_struct {
925     void (*open)(struct vm_area_struct * area);
926     void (*close)(struct vm_area_struct * area);
927     + int (*split)(struct vm_area_struct * area, unsigned long addr);
928     int (*mremap)(struct vm_area_struct * area);
929     int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
930     int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
931     diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
932     index 22b6ad31c706..8562b1cb776b 100644
933     --- a/include/uapi/linux/bcache.h
934     +++ b/include/uapi/linux/bcache.h
935     @@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
936    
937     #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
938    
939     -#define PTR(gen, offset, dev) \
940     +#define MAKE_PTR(gen, offset, dev) \
941     ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
942    
943     /* Bkey utility code */
944     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
945     index 8258e9eee806..3cae1dcf069c 100644
946     --- a/mm/huge_memory.c
947     +++ b/mm/huge_memory.c
948     @@ -745,20 +745,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
949     EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
950    
951     static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
952     - pmd_t *pmd)
953     + pmd_t *pmd, int flags)
954     {
955     pmd_t _pmd;
956    
957     - /*
958     - * We should set the dirty bit only for FOLL_WRITE but for now
959     - * the dirty bit in the pmd is meaningless. And if the dirty
960     - * bit will become meaningful and we'll only set it with
961     - * FOLL_WRITE, an atomic set_bit will be required on the pmd to
962     - * set the young bit, instead of the current set_pmd_at.
963     - */
964     - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
965     + _pmd = pmd_mkyoung(*pmd);
966     + if (flags & FOLL_WRITE)
967     + _pmd = pmd_mkdirty(_pmd);
968     if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
969     - pmd, _pmd, 1))
970     + pmd, _pmd, flags & FOLL_WRITE))
971     update_mmu_cache_pmd(vma, addr, pmd);
972     }
973    
974     @@ -787,7 +782,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
975     return NULL;
976    
977     if (flags & FOLL_TOUCH)
978     - touch_pmd(vma, addr, pmd);
979     + touch_pmd(vma, addr, pmd, flags);
980    
981     /*
982     * device mapped pages can only be returned if the
983     @@ -1158,7 +1153,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
984     page = pmd_page(*pmd);
985     VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
986     if (flags & FOLL_TOUCH)
987     - touch_pmd(vma, addr, pmd);
988     + touch_pmd(vma, addr, pmd, flags);
989     if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
990     /*
991     * We don't mlock() pte-mapped THPs. This way we can avoid
992     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
993     index 65c36acf8a6b..6ff65c405243 100644
994     --- a/mm/hugetlb.c
995     +++ b/mm/hugetlb.c
996     @@ -3135,6 +3135,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
997     }
998     }
999    
1000     +static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
1001     +{
1002     + if (addr & ~(huge_page_mask(hstate_vma(vma))))
1003     + return -EINVAL;
1004     + return 0;
1005     +}
1006     +
1007     /*
1008     * We cannot handle pagefaults against hugetlb pages at all. They cause
1009     * handle_mm_fault() to try to instantiate regular-sized pages in the
1010     @@ -3151,6 +3158,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
1011     .fault = hugetlb_vm_op_fault,
1012     .open = hugetlb_vm_op_open,
1013     .close = hugetlb_vm_op_close,
1014     + .split = hugetlb_vm_op_split,
1015     };
1016    
1017     static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1018     diff --git a/mm/madvise.c b/mm/madvise.c
1019     index 55f30ec32e5b..a49afe08698b 100644
1020     --- a/mm/madvise.c
1021     +++ b/mm/madvise.c
1022     @@ -228,15 +228,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
1023     {
1024     struct file *file = vma->vm_file;
1025    
1026     + *prev = vma;
1027     #ifdef CONFIG_SWAP
1028     if (!file) {
1029     - *prev = vma;
1030     force_swapin_readahead(vma, start, end);
1031     return 0;
1032     }
1033    
1034     if (shmem_mapping(file->f_mapping)) {
1035     - *prev = vma;
1036     force_shm_swapin_readahead(vma, start, end,
1037     file->f_mapping);
1038     return 0;
1039     @@ -251,7 +250,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
1040     return 0;
1041     }
1042    
1043     - *prev = vma;
1044     start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1045     if (end > vma->vm_end)
1046     end = vma->vm_end;
1047     diff --git a/mm/mmap.c b/mm/mmap.c
1048     index 75d263bd8739..5b48adb4aa56 100644
1049     --- a/mm/mmap.c
1050     +++ b/mm/mmap.c
1051     @@ -2538,9 +2538,11 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1052     struct vm_area_struct *new;
1053     int err;
1054    
1055     - if (is_vm_hugetlb_page(vma) && (addr &
1056     - ~(huge_page_mask(hstate_vma(vma)))))
1057     - return -EINVAL;
1058     + if (vma->vm_ops && vma->vm_ops->split) {
1059     + err = vma->vm_ops->split(vma, addr);
1060     + if (err)
1061     + return err;
1062     + }
1063    
1064     new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1065     if (!new)
1066     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1067     index 4a044134ce84..ef5ee56095e8 100644
1068     --- a/mm/page_alloc.c
1069     +++ b/mm/page_alloc.c
1070     @@ -7309,11 +7309,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
1071    
1072     /*
1073     * In case of -EBUSY, we'd like to know which page causes problem.
1074     - * So, just fall through. We will check it in test_pages_isolated().
1075     + * So, just fall through. test_pages_isolated() has a tracepoint
1076     + * which will report the busy page.
1077     + *
1078     + * It is possible that busy pages could become available before
1079     + * the call to test_pages_isolated, and the range will actually be
1080     + * allocated. So, if we fall through be sure to clear ret so that
1081     + * -EBUSY is not accidentally used or returned to caller.
1082     */
1083     ret = __alloc_contig_migrate_range(&cc, start, end);
1084     if (ret && ret != -EBUSY)
1085     goto done;
1086     + ret =0;
1087    
1088     /*
1089     * Pages from [start, end) are within a MAX_ORDER_NR_PAGES