Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.13/0108-4.13.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3012 - (hide annotations) (download)
Wed Oct 25 09:41:48 2017 UTC (6 years, 8 months ago) by niro
File size: 28207 byte(s)
-linux-4.13.9
1 niro 3012 diff --git a/Makefile b/Makefile
2     index 66ec023da822..aa0267950444 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 13
8     -SUBLEVEL = 8
9     +SUBLEVEL = 9
10     EXTRAVERSION =
11     NAME = Fearless Coyote
12    
13     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14     index 98b3dd8cf2bf..a7be1b4283a0 100644
15     --- a/arch/x86/kernel/apic/apic.c
16     +++ b/arch/x86/kernel/apic/apic.c
17     @@ -599,9 +599,14 @@ static const struct x86_cpu_id deadline_match[] = {
18    
19     static void apic_check_deadline_errata(void)
20     {
21     - const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
22     + const struct x86_cpu_id *m;
23     u32 rev;
24    
25     + if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
26     + boot_cpu_has(X86_FEATURE_HYPERVISOR))
27     + return;
28     +
29     + m = x86_match_cpu(deadline_match);
30     if (!m)
31     return;
32    
33     diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
34     index 3cd60f460b61..8b27211f6c50 100644
35     --- a/drivers/hid/Kconfig
36     +++ b/drivers/hid/Kconfig
37     @@ -281,6 +281,7 @@ config HID_ELECOM
38     Support for ELECOM devices:
39     - BM084 Bluetooth Mouse
40     - DEFT Trackball (Wired and wireless)
41     + - HUGE Trackball (Wired and wireless)
42    
43     config HID_ELO
44     tristate "ELO USB 4000/4500 touchscreen"
45     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
46     index 9017dcc14502..efb3501b4123 100644
47     --- a/drivers/hid/hid-core.c
48     +++ b/drivers/hid/hid-core.c
49     @@ -2031,6 +2031,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
50     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
51     { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
52     { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
53     + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
54     + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
55     #endif
56     #if IS_ENABLED(CONFIG_HID_ELO)
57     { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
58     diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
59     index e2c7465df69f..54aeea57d209 100644
60     --- a/drivers/hid/hid-elecom.c
61     +++ b/drivers/hid/hid-elecom.c
62     @@ -3,6 +3,7 @@
63     * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
64     * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
65     * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
66     + * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
67     */
68    
69     /*
70     @@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
71     break;
72     case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
73     case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
74     - /* The DEFT trackball has eight buttons, but its descriptor only
75     - * reports five, disabling the three Fn buttons on the top of
76     - * the mouse.
77     + case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
78     + case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
79     + /* The DEFT/HUGE trackball has eight buttons, but its descriptor
80     + * only reports five, disabling the three Fn buttons on the top
81     + * of the mouse.
82     *
83     * Apply the following diff to the descriptor:
84     *
85     @@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
86     * End Collection, End Collection,
87     */
88     if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
89     - hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
90     + hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
91     rdesc[13] = 8; /* Button/Variable Report Count */
92     rdesc[21] = 8; /* Button/Variable Usage Maximum */
93     rdesc[29] = 0; /* Button/Constant Report Count */
94     @@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
95     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
96     { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
97     { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
98     + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
99     + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
100     { }
101     };
102     MODULE_DEVICE_TABLE(hid, elecom_devices);
103     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
104     index c9ba4c6db74c..1333ac5c6597 100644
105     --- a/drivers/hid/hid-ids.h
106     +++ b/drivers/hid/hid-ids.h
107     @@ -366,6 +366,8 @@
108     #define USB_DEVICE_ID_ELECOM_BM084 0x0061
109     #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
110     #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
111     +#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
112     +#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
113    
114     #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
115     #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
116     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
117     index e57cc40cb768..be3fccab07fe 100644
118     --- a/drivers/hv/channel.c
119     +++ b/drivers/hv/channel.c
120     @@ -177,6 +177,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
121     &vmbus_connection.chn_msg_list);
122     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
123    
124     + if (newchannel->rescind) {
125     + err = -ENODEV;
126     + goto error_free_gpadl;
127     + }
128     +
129     ret = vmbus_post_msg(open_msg,
130     sizeof(struct vmbus_channel_open_channel), true);
131    
132     @@ -421,6 +426,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
133    
134     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
135    
136     + if (channel->rescind) {
137     + ret = -ENODEV;
138     + goto cleanup;
139     + }
140     +
141     ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
142     sizeof(*msginfo), true);
143     if (ret != 0)
144     @@ -494,6 +504,10 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
145     list_add_tail(&info->msglistentry,
146     &vmbus_connection.chn_msg_list);
147     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
148     +
149     + if (channel->rescind)
150     + goto post_msg_err;
151     +
152     ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
153     true);
154    
155     @@ -626,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
156     */
157     return;
158     }
159     + mutex_lock(&vmbus_connection.channel_mutex);
160     /*
161     * Close all the sub-channels first and then close the
162     * primary channel.
163     @@ -634,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
164     cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
165     vmbus_close_internal(cur_channel);
166     if (cur_channel->rescind) {
167     - mutex_lock(&vmbus_connection.channel_mutex);
168     - hv_process_channel_removal(cur_channel,
169     + hv_process_channel_removal(
170     cur_channel->offermsg.child_relid);
171     - mutex_unlock(&vmbus_connection.channel_mutex);
172     }
173     }
174     /*
175     * Now close the primary.
176     */
177     vmbus_close_internal(channel);
178     + mutex_unlock(&vmbus_connection.channel_mutex);
179     }
180     EXPORT_SYMBOL_GPL(vmbus_close);
181    
182     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
183     index 037361158074..18c94ed02562 100644
184     --- a/drivers/hv/channel_mgmt.c
185     +++ b/drivers/hv/channel_mgmt.c
186     @@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
187    
188    
189     spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
190     -
191     + channel->rescind = true;
192     list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
193     msglistentry) {
194    
195     @@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
196     true);
197     }
198    
199     -void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
200     +void hv_process_channel_removal(u32 relid)
201     {
202     unsigned long flags;
203     - struct vmbus_channel *primary_channel;
204     + struct vmbus_channel *primary_channel, *channel;
205    
206     - BUG_ON(!channel->rescind);
207     BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
208    
209     + /*
210     + * Make sure channel is valid as we may have raced.
211     + */
212     + channel = relid2channel(relid);
213     + if (!channel)
214     + return;
215     +
216     + BUG_ON(!channel->rescind);
217     if (channel->target_cpu != get_cpu()) {
218     put_cpu();
219     smp_call_function_single(channel->target_cpu,
220     @@ -451,6 +458,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
221     /* Make sure this is a new offer */
222     mutex_lock(&vmbus_connection.channel_mutex);
223    
224     + /*
225     + * Now that we have acquired the channel_mutex,
226     + * we can release the potentially racing rescind thread.
227     + */
228     + atomic_dec(&vmbus_connection.offer_in_progress);
229     +
230     list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
231     if (!uuid_le_cmp(channel->offermsg.offer.if_type,
232     newchannel->offermsg.offer.if_type) &&
233     @@ -481,7 +494,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
234     channel->num_sc++;
235     spin_unlock_irqrestore(&channel->lock, flags);
236     } else {
237     - atomic_dec(&vmbus_connection.offer_in_progress);
238     goto err_free_chan;
239     }
240     }
241     @@ -510,7 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
242     if (!fnew) {
243     if (channel->sc_creation_callback != NULL)
244     channel->sc_creation_callback(newchannel);
245     - atomic_dec(&vmbus_connection.offer_in_progress);
246     + newchannel->probe_done = true;
247     return;
248     }
249    
250     @@ -541,7 +553,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
251     goto err_deq_chan;
252     }
253    
254     - atomic_dec(&vmbus_connection.offer_in_progress);
255     + newchannel->probe_done = true;
256     return;
257    
258     err_deq_chan:
259     @@ -839,7 +851,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
260     {
261     struct vmbus_channel_rescind_offer *rescind;
262     struct vmbus_channel *channel;
263     - unsigned long flags;
264     struct device *dev;
265    
266     rescind = (struct vmbus_channel_rescind_offer *)hdr;
267     @@ -878,15 +889,25 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
268     return;
269     }
270    
271     - spin_lock_irqsave(&channel->lock, flags);
272     - channel->rescind = true;
273     - spin_unlock_irqrestore(&channel->lock, flags);
274     + /*
275     + * Now wait for offer handling to complete.
276     + */
277     + while (READ_ONCE(channel->probe_done) == false) {
278     + /*
279     + * We wait here until any channel offer is currently
280     + * being processed.
281     + */
282     + msleep(1);
283     + }
284    
285     - vmbus_rescind_cleanup(channel);
286     + /*
287     + * At this point, the rescind handling can proceed safely.
288     + */
289    
290     if (channel->device_obj) {
291     if (channel->chn_rescind_callback) {
292     channel->chn_rescind_callback(channel);
293     + vmbus_rescind_cleanup(channel);
294     return;
295     }
296     /*
297     @@ -895,6 +916,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
298     */
299     dev = get_device(&channel->device_obj->device);
300     if (dev) {
301     + vmbus_rescind_cleanup(channel);
302     vmbus_device_unregister(channel->device_obj);
303     put_device(dev);
304     }
305     @@ -907,16 +929,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
306     * 1. Close all sub-channels first
307     * 2. Then close the primary channel.
308     */
309     + mutex_lock(&vmbus_connection.channel_mutex);
310     + vmbus_rescind_cleanup(channel);
311     if (channel->state == CHANNEL_OPEN_STATE) {
312     /*
313     * The channel is currently not open;
314     * it is safe for us to cleanup the channel.
315     */
316     - mutex_lock(&vmbus_connection.channel_mutex);
317     - hv_process_channel_removal(channel,
318     - channel->offermsg.child_relid);
319     - mutex_unlock(&vmbus_connection.channel_mutex);
320     + hv_process_channel_removal(rescind->child_relid);
321     }
322     + mutex_unlock(&vmbus_connection.channel_mutex);
323     }
324     }
325    
326     diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
327     index 1f450c39a9b0..741daa6e2fc7 100644
328     --- a/drivers/hv/ring_buffer.c
329     +++ b/drivers/hv/ring_buffer.c
330     @@ -29,6 +29,7 @@
331     #include <linux/uio.h>
332     #include <linux/vmalloc.h>
333     #include <linux/slab.h>
334     +#include <linux/prefetch.h>
335    
336     #include "hyperv_vmbus.h"
337    
338     @@ -94,30 +95,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
339     ring_info->ring_buffer->write_index = next_write_location;
340     }
341    
342     -/* Get the next read location for the specified ring buffer. */
343     -static inline u32
344     -hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
345     -{
346     - return ring_info->ring_buffer->read_index;
347     -}
348     -
349     -/*
350     - * Get the next read location + offset for the specified ring buffer.
351     - * This allows the caller to skip.
352     - */
353     -static inline u32
354     -hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
355     - u32 offset)
356     -{
357     - u32 next = ring_info->ring_buffer->read_index;
358     -
359     - next += offset;
360     - if (next >= ring_info->ring_datasize)
361     - next -= ring_info->ring_datasize;
362     -
363     - return next;
364     -}
365     -
366     /* Set the next read location for the specified ring buffer. */
367     static inline void
368     hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
369     @@ -141,29 +118,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
370     return (u64)ring_info->ring_buffer->write_index << 32;
371     }
372    
373     -/*
374     - * Helper routine to copy to source from ring buffer.
375     - * Assume there is enough room. Handles wrap-around in src case only!!
376     - */
377     -static u32 hv_copyfrom_ringbuffer(
378     - const struct hv_ring_buffer_info *ring_info,
379     - void *dest,
380     - u32 destlen,
381     - u32 start_read_offset)
382     -{
383     - void *ring_buffer = hv_get_ring_buffer(ring_info);
384     - u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
385     -
386     - memcpy(dest, ring_buffer + start_read_offset, destlen);
387     -
388     - start_read_offset += destlen;
389     - if (start_read_offset >= ring_buffer_size)
390     - start_read_offset -= ring_buffer_size;
391     -
392     - return start_read_offset;
393     -}
394     -
395     -
396     /*
397     * Helper routine to copy from source to ring buffer.
398     * Assume there is enough room. Handles wrap-around in dest case only!!
399     @@ -334,33 +288,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
400     return 0;
401     }
402    
403     -static inline void
404     -init_cached_read_index(struct hv_ring_buffer_info *rbi)
405     -{
406     - rbi->cached_read_index = rbi->ring_buffer->read_index;
407     -}
408     -
409     int hv_ringbuffer_read(struct vmbus_channel *channel,
410     void *buffer, u32 buflen, u32 *buffer_actual_len,
411     u64 *requestid, bool raw)
412     {
413     - u32 bytes_avail_toread;
414     - u32 next_read_location;
415     - u64 prev_indices = 0;
416     - struct vmpacket_descriptor desc;
417     - u32 offset;
418     - u32 packetlen;
419     - struct hv_ring_buffer_info *inring_info = &channel->inbound;
420     -
421     - if (buflen <= 0)
422     + struct vmpacket_descriptor *desc;
423     + u32 packetlen, offset;
424     +
425     + if (unlikely(buflen == 0))
426     return -EINVAL;
427    
428     *buffer_actual_len = 0;
429     *requestid = 0;
430    
431     - bytes_avail_toread = hv_get_bytes_to_read(inring_info);
432     /* Make sure there is something to read */
433     - if (bytes_avail_toread < sizeof(desc)) {
434     + desc = hv_pkt_iter_first(channel);
435     + if (desc == NULL) {
436     /*
437     * No error is set when there is even no header, drivers are
438     * supposed to analyze buffer_actual_len.
439     @@ -368,48 +311,22 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
440     return 0;
441     }
442    
443     - init_cached_read_index(inring_info);
444     -
445     - next_read_location = hv_get_next_read_location(inring_info);
446     - next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
447     - sizeof(desc),
448     - next_read_location);
449     -
450     - offset = raw ? 0 : (desc.offset8 << 3);
451     - packetlen = (desc.len8 << 3) - offset;
452     + offset = raw ? 0 : (desc->offset8 << 3);
453     + packetlen = (desc->len8 << 3) - offset;
454     *buffer_actual_len = packetlen;
455     - *requestid = desc.trans_id;
456     -
457     - if (bytes_avail_toread < packetlen + offset)
458     - return -EAGAIN;
459     + *requestid = desc->trans_id;
460    
461     - if (packetlen > buflen)
462     + if (unlikely(packetlen > buflen))
463     return -ENOBUFS;
464    
465     - next_read_location =
466     - hv_get_next_readlocation_withoffset(inring_info, offset);
467     + /* since ring is double mapped, only one copy is necessary */
468     + memcpy(buffer, (const char *)desc + offset, packetlen);
469    
470     - next_read_location = hv_copyfrom_ringbuffer(inring_info,
471     - buffer,
472     - packetlen,
473     - next_read_location);
474     + /* Advance ring index to next packet descriptor */
475     + __hv_pkt_iter_next(channel, desc);
476    
477     - next_read_location = hv_copyfrom_ringbuffer(inring_info,
478     - &prev_indices,
479     - sizeof(u64),
480     - next_read_location);
481     -
482     - /*
483     - * Make sure all reads are done before we update the read index since
484     - * the writer may start writing to the read area once the read index
485     - * is updated.
486     - */
487     - virt_mb();
488     -
489     - /* Update the read index */
490     - hv_set_next_read_location(inring_info, next_read_location);
491     -
492     - hv_signal_on_read(channel);
493     + /* Notify host of update */
494     + hv_pkt_iter_close(channel);
495    
496     return 0;
497     }
498     @@ -441,9 +358,6 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
499     {
500     struct hv_ring_buffer_info *rbi = &channel->inbound;
501    
502     - /* set state for later hv_signal_on_read() */
503     - init_cached_read_index(rbi);
504     -
505     if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
506     return NULL;
507    
508     @@ -471,10 +385,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
509     rbi->priv_read_index -= dsize;
510    
511     /* more data? */
512     - if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
513     - return NULL;
514     - else
515     - return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
516     + return hv_pkt_iter_first(channel);
517     }
518     EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
519    
520     @@ -484,6 +395,7 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
521     void hv_pkt_iter_close(struct vmbus_channel *channel)
522     {
523     struct hv_ring_buffer_info *rbi = &channel->inbound;
524     + u32 orig_write_sz = hv_get_bytes_to_write(rbi);
525    
526     /*
527     * Make sure all reads are done before we update the read index since
528     @@ -493,6 +405,40 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
529     virt_rmb();
530     rbi->ring_buffer->read_index = rbi->priv_read_index;
531    
532     - hv_signal_on_read(channel);
533     + /*
534     + * Issue a full memory barrier before making the signaling decision.
535     + * Here is the reason for having this barrier:
536     + * If the reading of the pend_sz (in this function)
537     + * were to be reordered and read before we commit the new read
538     + * index (in the calling function) we could
539     + * have a problem. If the host were to set the pending_sz after we
540     + * have sampled pending_sz and go to sleep before we commit the
541     + * read index, we could miss sending the interrupt. Issue a full
542     + * memory barrier to address this.
543     + */
544     + virt_mb();
545     +
546     + /* If host has disabled notifications then skip */
547     + if (rbi->ring_buffer->interrupt_mask)
548     + return;
549     +
550     + if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
551     + u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
552     +
553     + /*
554     + * If there was space before we began iteration,
555     + * then host was not blocked. Also handles case where
556     + * pending_sz is zero then host has nothing pending
557     + * and does not need to be signaled.
558     + */
559     + if (orig_write_sz > pending_sz)
560     + return;
561     +
562     + /* If pending write will not fit, don't give false hope. */
563     + if (hv_get_bytes_to_write(rbi) < pending_sz)
564     + return;
565     + }
566     +
567     + vmbus_setevent(channel);
568     }
569     EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
570     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
571     index ed84e96715a0..5ad627044dd1 100644
572     --- a/drivers/hv/vmbus_drv.c
573     +++ b/drivers/hv/vmbus_drv.c
574     @@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
575     struct vmbus_channel *channel = hv_dev->channel;
576    
577     mutex_lock(&vmbus_connection.channel_mutex);
578     - hv_process_channel_removal(channel,
579     - channel->offermsg.child_relid);
580     + hv_process_channel_removal(channel->offermsg.child_relid);
581     mutex_unlock(&vmbus_connection.channel_mutex);
582     kfree(hv_dev);
583    
584     @@ -940,6 +939,9 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
585     if (channel->offermsg.child_relid != relid)
586     continue;
587    
588     + if (channel->rescind)
589     + continue;
590     +
591     switch (channel->callback_mode) {
592     case HV_CALL_ISR:
593     vmbus_channel_isr(channel);
594     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
595     index b7d7bbec74e0..3647085dab0a 100644
596     --- a/include/linux/hyperv.h
597     +++ b/include/linux/hyperv.h
598     @@ -127,7 +127,6 @@ struct hv_ring_buffer_info {
599     u32 ring_data_startoffset;
600     u32 priv_write_index;
601     u32 priv_read_index;
602     - u32 cached_read_index;
603     };
604    
605     /*
606     @@ -180,19 +179,6 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
607     return write;
608     }
609    
610     -static inline u32 hv_get_cached_bytes_to_write(
611     - const struct hv_ring_buffer_info *rbi)
612     -{
613     - u32 read_loc, write_loc, dsize, write;
614     -
615     - dsize = rbi->ring_datasize;
616     - read_loc = rbi->cached_read_index;
617     - write_loc = rbi->ring_buffer->write_index;
618     -
619     - write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
620     - read_loc - write_loc;
621     - return write;
622     -}
623     /*
624     * VMBUS version is 32 bit entity broken up into
625     * two 16 bit quantities: major_number. minor_number.
626     @@ -895,6 +881,8 @@ struct vmbus_channel {
627     */
628     enum hv_numa_policy affinity_policy;
629    
630     + bool probe_done;
631     +
632     };
633    
634     static inline bool is_hvsock_channel(const struct vmbus_channel *c)
635     @@ -1453,7 +1441,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
636     const int *srv_version, int srv_vercnt,
637     int *nego_fw_version, int *nego_srv_version);
638    
639     -void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
640     +void hv_process_channel_removal(u32 relid);
641    
642     void vmbus_setevent(struct vmbus_channel *channel);
643     /*
644     @@ -1473,55 +1461,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
645     return ring_info->ring_buffer->buffer;
646     }
647    
648     -/*
649     - * To optimize the flow management on the send-side,
650     - * when the sender is blocked because of lack of
651     - * sufficient space in the ring buffer, potential the
652     - * consumer of the ring buffer can signal the producer.
653     - * This is controlled by the following parameters:
654     - *
655     - * 1. pending_send_sz: This is the size in bytes that the
656     - * producer is trying to send.
657     - * 2. The feature bit feat_pending_send_sz set to indicate if
658     - * the consumer of the ring will signal when the ring
659     - * state transitions from being full to a state where
660     - * there is room for the producer to send the pending packet.
661     - */
662     -
663     -static inline void hv_signal_on_read(struct vmbus_channel *channel)
664     -{
665     - u32 cur_write_sz, cached_write_sz;
666     - u32 pending_sz;
667     - struct hv_ring_buffer_info *rbi = &channel->inbound;
668     -
669     - /*
670     - * Issue a full memory barrier before making the signaling decision.
671     - * Here is the reason for having this barrier:
672     - * If the reading of the pend_sz (in this function)
673     - * were to be reordered and read before we commit the new read
674     - * index (in the calling function) we could
675     - * have a problem. If the host were to set the pending_sz after we
676     - * have sampled pending_sz and go to sleep before we commit the
677     - * read index, we could miss sending the interrupt. Issue a full
678     - * memory barrier to address this.
679     - */
680     - virt_mb();
681     -
682     - pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
683     - /* If the other end is not blocked on write don't bother. */
684     - if (pending_sz == 0)
685     - return;
686     -
687     - cur_write_sz = hv_get_bytes_to_write(rbi);
688     -
689     - if (cur_write_sz < pending_sz)
690     - return;
691     -
692     - cached_write_sz = hv_get_cached_bytes_to_write(rbi);
693     - if (cached_write_sz < pending_sz)
694     - vmbus_setevent(channel);
695     -}
696     -
697     /*
698     * Mask off host interrupt callback notifications
699     */
700     diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
701     index 8ec6ba230bb9..6b9311631aa1 100644
702     --- a/mm/page_vma_mapped.c
703     +++ b/mm/page_vma_mapped.c
704     @@ -6,17 +6,6 @@
705    
706     #include "internal.h"
707    
708     -static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
709     -{
710     - pmd_t pmde;
711     - /*
712     - * Make sure we don't re-load pmd between present and !trans_huge check.
713     - * We need a consistent view.
714     - */
715     - pmde = READ_ONCE(*pvmw->pmd);
716     - return pmd_present(pmde) && !pmd_trans_huge(pmde);
717     -}
718     -
719     static inline bool not_found(struct page_vma_mapped_walk *pvmw)
720     {
721     page_vma_mapped_walk_done(pvmw);
722     @@ -106,6 +95,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
723     pgd_t *pgd;
724     p4d_t *p4d;
725     pud_t *pud;
726     + pmd_t pmde;
727    
728     /* The only possible pmd mapping has been handled on last iteration */
729     if (pvmw->pmd && !pvmw->pte)
730     @@ -138,7 +128,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
731     if (!pud_present(*pud))
732     return false;
733     pvmw->pmd = pmd_offset(pud, pvmw->address);
734     - if (pmd_trans_huge(*pvmw->pmd)) {
735     + /*
736     + * Make sure the pmd value isn't cached in a register by the
737     + * compiler and used as a stale value after we've observed a
738     + * subsequent update.
739     + */
740     + pmde = READ_ONCE(*pvmw->pmd);
741     + if (pmd_trans_huge(pmde)) {
742     pvmw->ptl = pmd_lock(mm, pvmw->pmd);
743     if (!pmd_present(*pvmw->pmd))
744     return not_found(pvmw);
745     @@ -153,9 +149,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
746     spin_unlock(pvmw->ptl);
747     pvmw->ptl = NULL;
748     }
749     - } else {
750     - if (!check_pmd(pvmw))
751     - return false;
752     + } else if (!pmd_present(pmde)) {
753     + return false;
754     }
755     if (!map_pte(pvmw))
756     goto next_pte;
757     diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
758     index 01e779b91c8e..2e3ffc3bc483 100644
759     --- a/tools/perf/util/parse-events.c
760     +++ b/tools/perf/util/parse-events.c
761     @@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
762     static struct perf_evsel *
763     __add_event(struct list_head *list, int *idx,
764     struct perf_event_attr *attr,
765     - char *name, struct cpu_map *cpus,
766     + char *name, struct perf_pmu *pmu,
767     struct list_head *config_terms)
768     {
769     struct perf_evsel *evsel;
770     + struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
771    
772     event_attr_init(attr);
773    
774     @@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
775     (*idx)++;
776     evsel->cpus = cpu_map__get(cpus);
777     evsel->own_cpus = cpu_map__get(cpus);
778     - evsel->system_wide = !!cpus;
779     + evsel->system_wide = pmu ? pmu->is_uncore : false;
780    
781     if (name)
782     evsel->name = strdup(name);
783     @@ -1232,7 +1233,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
784    
785     if (!head_config) {
786     attr.type = pmu->type;
787     - evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL);
788     + evsel = __add_event(list, &data->idx, &attr, NULL, pmu, NULL);
789     return evsel ? 0 : -ENOMEM;
790     }
791    
792     @@ -1253,7 +1254,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
793     return -EINVAL;
794    
795     evsel = __add_event(list, &data->idx, &attr,
796     - get_config_name(head_config), pmu->cpus,
797     + get_config_name(head_config), pmu,
798     &config_terms);
799     if (evsel) {
800     evsel->unit = info.unit;
801     diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
802     index ac16a9db1fb5..1c4d7b4e4fb5 100644
803     --- a/tools/perf/util/pmu.c
804     +++ b/tools/perf/util/pmu.c
805     @@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
806     closedir(dir);
807     }
808    
809     +static struct cpu_map *__pmu_cpumask(const char *path)
810     +{
811     + FILE *file;
812     + struct cpu_map *cpus;
813     +
814     + file = fopen(path, "r");
815     + if (!file)
816     + return NULL;
817     +
818     + cpus = cpu_map__read(file);
819     + fclose(file);
820     + return cpus;
821     +}
822     +
823     +/*
824     + * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
825     + * may have a "cpus" file.
826     + */
827     +#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
828     +#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
829     +
830     static struct cpu_map *pmu_cpumask(const char *name)
831     {
832     - struct stat st;
833     char path[PATH_MAX];
834     - FILE *file;
835     struct cpu_map *cpus;
836     const char *sysfs = sysfs__mountpoint();
837     const char *templates[] = {
838     - "%s/bus/event_source/devices/%s/cpumask",
839     - "%s/bus/event_source/devices/%s/cpus",
840     - NULL
841     + CPUS_TEMPLATE_UNCORE,
842     + CPUS_TEMPLATE_CPU,
843     + NULL
844     };
845     const char **template;
846    
847     @@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
848    
849     for (template = templates; *template; template++) {
850     snprintf(path, PATH_MAX, *template, sysfs, name);
851     - if (stat(path, &st) == 0)
852     - break;
853     + cpus = __pmu_cpumask(path);
854     + if (cpus)
855     + return cpus;
856     }
857    
858     - if (!*template)
859     - return NULL;
860     + return NULL;
861     +}
862    
863     - file = fopen(path, "r");
864     - if (!file)
865     - return NULL;
866     +static bool pmu_is_uncore(const char *name)
867     +{
868     + char path[PATH_MAX];
869     + struct cpu_map *cpus;
870     + const char *sysfs = sysfs__mountpoint();
871    
872     - cpus = cpu_map__read(file);
873     - fclose(file);
874     - return cpus;
875     + snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
876     + cpus = __pmu_cpumask(path);
877     + cpu_map__put(cpus);
878     +
879     + return !!cpus;
880     }
881    
882     /*
883     @@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
884    
885     pmu->cpus = pmu_cpumask(name);
886    
887     + pmu->is_uncore = pmu_is_uncore(name);
888     +
889     INIT_LIST_HEAD(&pmu->format);
890     INIT_LIST_HEAD(&pmu->aliases);
891     list_splice(&format, &pmu->format);
892     diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
893     index 389e9729331f..fe0de0502ce2 100644
894     --- a/tools/perf/util/pmu.h
895     +++ b/tools/perf/util/pmu.h
896     @@ -22,6 +22,7 @@ struct perf_pmu {
897     char *name;
898     __u32 type;
899     bool selectable;
900     + bool is_uncore;
901     struct perf_event_attr *default_config;
902     struct cpu_map *cpus;
903     struct list_head format; /* HEAD struct perf_pmu_format -> list */