Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0164-3.10.65-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2652 - (hide annotations) (download)
Tue Jul 21 16:20:22 2015 UTC (8 years, 9 months ago) by niro
File size: 51228 byte(s)
-linux-3.10.65
1 niro 2652 diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
2     index 69b3cac4749d..5d8675615e59 100644
3     --- a/Documentation/ramoops.txt
4     +++ b/Documentation/ramoops.txt
5     @@ -14,11 +14,19 @@ survive after a restart.
6    
7     1. Ramoops concepts
8    
9     -Ramoops uses a predefined memory area to store the dump. The start and size of
10     -the memory area are set using two variables:
11     +Ramoops uses a predefined memory area to store the dump. The start and size
12     +and type of the memory area are set using three variables:
13     * "mem_address" for the start
14     * "mem_size" for the size. The memory size will be rounded down to a
15     power of two.
16     + * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
17     +
18     +Typically the default value of mem_type=0 should be used as that sets the pstore
19     +mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
20     +pgprot_noncached, which only works on some platforms. This is because pstore
21     +depends on atomic operations. At least on ARM, pgprot_noncached causes the
22     +memory to be mapped strongly ordered, and atomic operations on strongly ordered
23     +memory are implementation defined, and won't work on many ARMs such as omaps.
24    
25     The memory area is divided into "record_size" chunks (also rounded down to
26     power of two) and each oops/panic writes a "record_size" chunk of
27     @@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
28     static struct ramoops_platform_data ramoops_data = {
29     .mem_size = <...>,
30     .mem_address = <...>,
31     + .mem_type = <...>,
32     .record_size = <...>,
33     .dump_oops = <...>,
34     .ecc = <...>,
35     diff --git a/Makefile b/Makefile
36     index e5b63fb3d0e1..7889b38766db 100644
37     --- a/Makefile
38     +++ b/Makefile
39     @@ -1,6 +1,6 @@
40     VERSION = 3
41     PATCHLEVEL = 10
42     -SUBLEVEL = 64
43     +SUBLEVEL = 65
44     EXTRAVERSION =
45     NAME = TOSSUG Baby Fish
46    
47     diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
48     index 8278960066c3..3ee701f1d38e 100644
49     --- a/arch/arm/mach-mvebu/coherency.c
50     +++ b/arch/arm/mach-mvebu/coherency.c
51     @@ -141,6 +141,29 @@ int __init coherency_init(void)
52     {
53     struct device_node *np;
54    
55     + /*
56     + * The coherency fabric is needed:
57     + * - For coherency between processors on Armada XP, so only
58     + * when SMP is enabled.
59     + * - For coherency between the processor and I/O devices, but
60     + * this coherency requires many pre-requisites (write
61     + * allocate cache policy, shareable pages, SMP bit set) that
62     + * are only meant in SMP situations.
63     + *
64     + * Note that this means that on Armada 370, there is currently
65     + * no way to use hardware I/O coherency, because even when
66     + * CONFIG_SMP is enabled, is_smp() returns false due to the
67     + * Armada 370 being a single-core processor. To lift this
68     + * limitation, we would have to find a way to make the cache
69     + * policy set to write-allocate (on all Armada SoCs), and to
70     + * set the shareable attribute in page tables (on all Armada
71     + * SoCs except the Armada 370). Unfortunately, such decisions
72     + * are taken very early in the kernel boot process, at a point
73     + * where we don't know yet on which SoC we are running.
74     + */
75     + if (!is_smp())
76     + return 0;
77     +
78     np = of_find_matching_node(NULL, of_coherency_table);
79     if (np) {
80     pr_info("Initializing Coherency fabric\n");
81     diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
82     index 2a46ca720afc..2874be9aef0a 100644
83     --- a/arch/x86/include/asm/vsyscall.h
84     +++ b/arch/x86/include/asm/vsyscall.h
85     @@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
86     native_read_tscp(&p);
87     } else {
88     /* Load per CPU data from GDT */
89     - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
90     + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
91     }
92    
93     return p;
94     diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
95     index 8aac56bda7dc..7185af255fb5 100644
96     --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
97     +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
98     @@ -2657,6 +2657,17 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
99     return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
100     }
101    
102     +/*
103     + * Using uncore_pmu_event_init pmu event_init callback
104     + * as a detection point for uncore events.
105     + */
106     +static int uncore_pmu_event_init(struct perf_event *event);
107     +
108     +static bool is_uncore_event(struct perf_event *event)
109     +{
110     + return event->pmu->event_init == uncore_pmu_event_init;
111     +}
112     +
113     static int
114     uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
115     {
116     @@ -2671,13 +2682,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
117     return -EINVAL;
118    
119     n = box->n_events;
120     - box->event_list[n] = leader;
121     - n++;
122     +
123     + if (is_uncore_event(leader)) {
124     + box->event_list[n] = leader;
125     + n++;
126     + }
127     +
128     if (!dogrp)
129     return n;
130    
131     list_for_each_entry(event, &leader->sibling_list, group_entry) {
132     - if (event->state <= PERF_EVENT_STATE_OFF)
133     + if (!is_uncore_event(event) ||
134     + event->state <= PERF_EVENT_STATE_OFF)
135     continue;
136    
137     if (n >= max_count)
138     diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
139     index 431e87544411..ab6ba35a9357 100644
140     --- a/arch/x86/vdso/vma.c
141     +++ b/arch/x86/vdso/vma.c
142     @@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
143    
144     struct linux_binprm;
145    
146     -/* Put the vdso above the (randomized) stack with another randomized offset.
147     - This way there is no hole in the middle of address space.
148     - To save memory make sure it is still in the same PTE as the stack top.
149     - This doesn't give that many random bits */
150     +/*
151     + * Put the vdso above the (randomized) stack with another randomized
152     + * offset. This way there is no hole in the middle of address space.
153     + * To save memory make sure it is still in the same PTE as the stack
154     + * top. This doesn't give that many random bits.
155     + *
156     + * Note that this algorithm is imperfect: the distribution of the vdso
157     + * start address within a PMD is biased toward the end.
158     + *
159     + * Only used for the 64-bit and x32 vdsos.
160     + */
161     static unsigned long vdso_addr(unsigned long start, unsigned len)
162     {
163     unsigned long addr, end;
164     unsigned offset;
165     - end = (start + PMD_SIZE - 1) & PMD_MASK;
166     +
167     + /*
168     + * Round up the start address. It can start out unaligned as a result
169     + * of stack start randomization.
170     + */
171     + start = PAGE_ALIGN(start);
172     +
173     + /* Round the lowest possible end address up to a PMD boundary. */
174     + end = (start + len + PMD_SIZE - 1) & PMD_MASK;
175     if (end >= TASK_SIZE_MAX)
176     end = TASK_SIZE_MAX;
177     end -= len;
178     - /* This loses some more bits than a modulo, but is cheaper */
179     - offset = get_random_int() & (PTRS_PER_PTE - 1);
180     - addr = start + (offset << PAGE_SHIFT);
181     - if (addr >= end)
182     - addr = end;
183     +
184     + if (end > start) {
185     + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
186     + addr = start + (offset << PAGE_SHIFT);
187     + } else {
188     + addr = start;
189     + }
190    
191     /*
192     - * page-align it here so that get_unmapped_area doesn't
193     - * align it wrongfully again to the next page. addr can come in 4K
194     - * unaligned here as a result of stack start randomization.
195     + * Forcibly align the final address in case we have a hardware
196     + * issue that requires alignment for performance reasons.
197     */
198     - addr = PAGE_ALIGN(addr);
199     addr = align_vdso_addr(addr);
200    
201     return addr;
202     diff --git a/block/genhd.c b/block/genhd.c
203     index e670148c3773..7694dffe9f0e 100644
204     --- a/block/genhd.c
205     +++ b/block/genhd.c
206     @@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
207     struct disk_part_tbl *old_ptbl = disk->part_tbl;
208     struct disk_part_tbl *new_ptbl;
209     int len = old_ptbl ? old_ptbl->len : 0;
210     - int target = partno + 1;
211     + int i, target;
212     size_t size;
213     - int i;
214     +
215     + /*
216     + * check for int overflow, since we can get here from blkpg_ioctl()
217     + * with a user passed 'partno'.
218     + */
219     + target = partno + 1;
220     + if (target < 0)
221     + return -EINVAL;
222    
223     /* disk_max_parts() is zero during initialization, ignore if so */
224     if (disk_max_parts(disk) && target > disk_max_parts(disk))
225     diff --git a/drivers/base/bus.c b/drivers/base/bus.c
226     index d414331b480e..558d562f4901 100644
227     --- a/drivers/base/bus.c
228     +++ b/drivers/base/bus.c
229     @@ -242,13 +242,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
230     const char *buf, size_t count)
231     {
232     struct device *dev;
233     + int err = -EINVAL;
234    
235     dev = bus_find_device_by_name(bus, NULL, buf);
236     if (!dev)
237     return -ENODEV;
238     - if (bus_rescan_devices_helper(dev, NULL) != 0)
239     - return -EINVAL;
240     - return count;
241     + if (bus_rescan_devices_helper(dev, NULL) == 0)
242     + err = count;
243     + put_device(dev);
244     + return err;
245     }
246    
247     static struct device *next_device(struct klist_iter *i)
248     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
249     index 81d0e6e1f754..2bd798a7d9aa 100644
250     --- a/drivers/hid/hid-core.c
251     +++ b/drivers/hid/hid-core.c
252     @@ -1687,6 +1687,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
253     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
254     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
255     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
256     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
257     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
258     { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
259     { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
260     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
261     index a1e431f830e3..45c593dbf5cd 100644
262     --- a/drivers/hid/hid-ids.h
263     +++ b/drivers/hid/hid-ids.h
264     @@ -478,6 +478,7 @@
265     #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
266     #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
267     #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
268     +#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
269     #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
270    
271     #define USB_VENDOR_ID_LABTEC 0x1020
272     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
273     index 012880a2228c..03a6acffed5d 100644
274     --- a/drivers/hid/hid-input.c
275     +++ b/drivers/hid/hid-input.c
276     @@ -317,6 +317,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
277     USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
278     HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
279     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
280     + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
281     + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
282     + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
283     USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
284     HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
285     {}
286     diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
287     index 843f2dd55200..973eed788cc6 100644
288     --- a/drivers/hid/hid-kye.c
289     +++ b/drivers/hid/hid-kye.c
290     @@ -303,6 +303,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
291     }
292     break;
293     case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
294     + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
295     if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
296     rdesc = mousepen_i608x_rdesc_fixed;
297     *rsize = sizeof(mousepen_i608x_rdesc_fixed);
298     @@ -383,6 +384,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
299     switch (id->product) {
300     case USB_DEVICE_ID_KYE_EASYPEN_I405X:
301     case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
302     + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
303     case USB_DEVICE_ID_KYE_EASYPEN_M610X:
304     ret = kye_tablet_enable(hdev);
305     if (ret) {
306     @@ -406,6 +408,8 @@ static const struct hid_device_id kye_devices[] = {
307     { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
308     USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
309     { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
310     + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
311     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
312     USB_DEVICE_ID_KYE_EASYPEN_M610X) },
313     { }
314     };
315     diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
316     index d4f1e3bee590..264ddc4a0118 100644
317     --- a/drivers/hid/hid-roccat-pyra.c
318     +++ b/drivers/hid/hid-roccat-pyra.c
319     @@ -35,6 +35,8 @@ static struct class *pyra_class;
320     static void profile_activated(struct pyra_device *pyra,
321     unsigned int new_profile)
322     {
323     + if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
324     + return;
325     pyra->actual_profile = new_profile;
326     pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
327     }
328     @@ -236,9 +238,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
329     if (off != 0 || count != PYRA_SIZE_SETTINGS)
330     return -EINVAL;
331    
332     - mutex_lock(&pyra->pyra_lock);
333     -
334     settings = (struct pyra_settings const *)buf;
335     + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
336     + return -EINVAL;
337     +
338     + mutex_lock(&pyra->pyra_lock);
339    
340     retval = pyra_set_settings(usb_dev, settings);
341     if (retval) {
342     diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
343     index 2b1799a3b212..469daa04dadb 100644
344     --- a/drivers/hid/i2c-hid/i2c-hid.c
345     +++ b/drivers/hid/i2c-hid/i2c-hid.c
346     @@ -134,6 +134,7 @@ struct i2c_hid {
347     * descriptor. */
348     unsigned int bufsize; /* i2c buffer size */
349     char *inbuf; /* Input buffer */
350     + char *rawbuf; /* Raw Input buffer */
351     char *cmdbuf; /* Command buffer */
352     char *argsbuf; /* Command arguments buffer */
353    
354     @@ -340,7 +341,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
355     static void i2c_hid_get_input(struct i2c_hid *ihid)
356     {
357     int ret, ret_size;
358     - int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
359     + int size = ihid->bufsize;
360    
361     ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
362     if (ret != size) {
363     @@ -471,9 +472,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
364     static void i2c_hid_free_buffers(struct i2c_hid *ihid)
365     {
366     kfree(ihid->inbuf);
367     + kfree(ihid->rawbuf);
368     kfree(ihid->argsbuf);
369     kfree(ihid->cmdbuf);
370     ihid->inbuf = NULL;
371     + ihid->rawbuf = NULL;
372     ihid->cmdbuf = NULL;
373     ihid->argsbuf = NULL;
374     ihid->bufsize = 0;
375     @@ -489,10 +492,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
376     report_size; /* report */
377    
378     ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
379     + ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
380     ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
381     ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
382    
383     - if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
384     + if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
385     i2c_hid_free_buffers(ihid);
386     return -ENOMEM;
387     }
388     @@ -519,12 +523,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
389    
390     ret = i2c_hid_get_report(client,
391     report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
392     - report_number, ihid->inbuf, ask_count);
393     + report_number, ihid->rawbuf, ask_count);
394    
395     if (ret < 0)
396     return ret;
397    
398     - ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
399     + ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
400    
401     if (ret_count <= 2)
402     return 0;
403     @@ -533,7 +537,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
404    
405     /* The query buffer contains the size, dropping it in the reply */
406     count = min(count, ret_count - 2);
407     - memcpy(buf, ihid->inbuf + 2, count);
408     + memcpy(buf, ihid->rawbuf + 2, count);
409    
410     return count;
411     }
412     diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
413     index 0db9a67278ba..5b46a79dcb1f 100644
414     --- a/drivers/hid/usbhid/hid-quirks.c
415     +++ b/drivers/hid/usbhid/hid-quirks.c
416     @@ -110,6 +110,7 @@ static const struct hid_blacklist {
417     { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
418     { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
419     { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
420     + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
421     { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
422     { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
423     { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
424     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
425     index 6771e3c94801..db4e10d4c7f5 100644
426     --- a/drivers/iommu/intel-iommu.c
427     +++ b/drivers/iommu/intel-iommu.c
428     @@ -1796,7 +1796,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
429     struct dma_pte *first_pte = NULL, *pte = NULL;
430     phys_addr_t uninitialized_var(pteval);
431     int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
432     - unsigned long sg_res;
433     + unsigned long sg_res = 0;
434     unsigned int largepage_lvl = 0;
435     unsigned long lvl_pages = 0;
436    
437     @@ -1807,10 +1807,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
438    
439     prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
440    
441     - if (sg)
442     - sg_res = 0;
443     - else {
444     - sg_res = nr_pages + 1;
445     + if (!sg) {
446     + sg_res = nr_pages;
447     pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
448     }
449    
450     diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
451     index ec2c2dc1c1ca..2a1b6e037e1a 100644
452     --- a/drivers/mtd/ubi/upd.c
453     +++ b/drivers/mtd/ubi/upd.c
454     @@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
455     ubi_assert(!vol->updating && !vol->changing_leb);
456     vol->updating = 1;
457    
458     + vol->upd_buf = vmalloc(ubi->leb_size);
459     + if (!vol->upd_buf)
460     + return -ENOMEM;
461     +
462     err = set_update_marker(ubi, vol);
463     if (err)
464     return err;
465     @@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
466     err = clear_update_marker(ubi, vol, 0);
467     if (err)
468     return err;
469     +
470     + vfree(vol->upd_buf);
471     vol->updating = 0;
472     return 0;
473     }
474    
475     - vol->upd_buf = vmalloc(ubi->leb_size);
476     - if (!vol->upd_buf)
477     - return -ENOMEM;
478     -
479     vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
480     vol->usable_leb_size);
481     vol->upd_bytes = bytes;
482     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
483     index c95bfb183c62..49e570abe58b 100644
484     --- a/drivers/mtd/ubi/wl.c
485     +++ b/drivers/mtd/ubi/wl.c
486     @@ -1209,7 +1209,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
487    
488     err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
489     if (err) {
490     - kmem_cache_free(ubi_wl_entry_slab, e1);
491     if (e2)
492     kmem_cache_free(ubi_wl_entry_slab, e2);
493     goto out_ro;
494     @@ -1223,10 +1222,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
495     dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
496     e2->pnum, vol_id, lnum);
497     err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
498     - if (err) {
499     - kmem_cache_free(ubi_wl_entry_slab, e2);
500     + if (err)
501     goto out_ro;
502     - }
503     }
504    
505     dbg_wl("done");
506     @@ -1262,10 +1259,9 @@ out_not_moved:
507    
508     ubi_free_vid_hdr(ubi, vid_hdr);
509     err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
510     - if (err) {
511     - kmem_cache_free(ubi_wl_entry_slab, e2);
512     + if (err)
513     goto out_ro;
514     - }
515     +
516     mutex_unlock(&ubi->move_mutex);
517     return 0;
518    
519     diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
520     index a0f647f92bf5..3a220d2f2ee1 100644
521     --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
522     +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
523     @@ -727,7 +727,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
524     dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
525     if (!dev->cmd_buf) {
526     err = -ENOMEM;
527     - goto lbl_set_intf_data;
528     + goto lbl_free_candev;
529     }
530    
531     dev->udev = usb_dev;
532     @@ -766,7 +766,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
533     err = register_candev(netdev);
534     if (err) {
535     dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
536     - goto lbl_free_cmd_buf;
537     + goto lbl_restore_intf_data;
538     }
539    
540     if (dev->prev_siblings)
541     @@ -779,14 +779,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
542     if (dev->adapter->dev_init) {
543     err = dev->adapter->dev_init(dev);
544     if (err)
545     - goto lbl_free_cmd_buf;
546     + goto lbl_unregister_candev;
547     }
548    
549     /* set bus off */
550     if (dev->adapter->dev_set_bus) {
551     err = dev->adapter->dev_set_bus(dev, 0);
552     if (err)
553     - goto lbl_free_cmd_buf;
554     + goto lbl_unregister_candev;
555     }
556    
557     /* get device number early */
558     @@ -798,11 +798,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
559    
560     return 0;
561    
562     -lbl_free_cmd_buf:
563     - kfree(dev->cmd_buf);
564     +lbl_unregister_candev:
565     + unregister_candev(netdev);
566    
567     -lbl_set_intf_data:
568     +lbl_restore_intf_data:
569     usb_set_intfdata(intf, dev->prev_siblings);
570     + kfree(dev->cmd_buf);
571     +
572     +lbl_free_candev:
573     free_candev(netdev);
574    
575     return err;
576     diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
577     index 263dd921edc4..f7f796a2c50b 100644
578     --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
579     +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
580     @@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
581     if (!(dev->state & PCAN_USB_STATE_CONNECTED))
582     return 0;
583    
584     - memset(req_addr, '\0', req_size);
585     -
586     req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
587    
588     switch (req_id) {
589     @@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
590     default:
591     p = usb_rcvctrlpipe(dev->udev, 0);
592     req_type |= USB_DIR_IN;
593     + memset(req_addr, '\0', req_size);
594     break;
595     }
596    
597     diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
598     index 65fe929529a8..3bfd0b88016e 100644
599     --- a/drivers/net/wireless/ath/ath5k/qcu.c
600     +++ b/drivers/net/wireless/ath/ath5k/qcu.c
601     @@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
602     } else {
603     switch (queue_type) {
604     case AR5K_TX_QUEUE_DATA:
605     - for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
606     - ah->ah_txq[queue].tqi_type !=
607     - AR5K_TX_QUEUE_INACTIVE; queue++) {
608     -
609     - if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
610     - return -EINVAL;
611     - }
612     + queue = queue_info->tqi_subtype;
613     break;
614     case AR5K_TX_QUEUE_UAPSD:
615     queue = AR5K_TX_QUEUE_ID_UAPSD;
616     diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
617     index ae3034374bc4..d7d9e311089f 100644
618     --- a/drivers/net/wireless/ath/ath9k/hw.h
619     +++ b/drivers/net/wireless/ath/ath9k/hw.h
620     @@ -215,8 +215,8 @@
621     #define AH_WOW_BEACON_MISS BIT(3)
622    
623     enum ath_hw_txq_subtype {
624     - ATH_TXQ_AC_BE = 0,
625     - ATH_TXQ_AC_BK = 1,
626     + ATH_TXQ_AC_BK = 0,
627     + ATH_TXQ_AC_BE = 1,
628     ATH_TXQ_AC_VI = 2,
629     ATH_TXQ_AC_VO = 3,
630     };
631     diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
632     index 566109a40fb3..941b08b71308 100644
633     --- a/drivers/net/wireless/ath/ath9k/mac.c
634     +++ b/drivers/net/wireless/ath/ath9k/mac.c
635     @@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
636     q = ATH9K_NUM_TX_QUEUES - 3;
637     break;
638     case ATH9K_TX_QUEUE_DATA:
639     - for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
640     - if (ah->txq[q].tqi_type ==
641     - ATH9K_TX_QUEUE_INACTIVE)
642     - break;
643     - if (q == ATH9K_NUM_TX_QUEUES) {
644     - ath_err(common, "No available TX queue\n");
645     - return -1;
646     - }
647     + q = qinfo->tqi_subtype;
648     break;
649     default:
650     ath_err(common, "Invalid TX queue type: %u\n", type);
651     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
652     index ea37072e8bf2..034a4d2964d6 100644
653     --- a/drivers/pci/probe.c
654     +++ b/drivers/pci/probe.c
655     @@ -210,14 +210,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
656     res->flags |= IORESOURCE_SIZEALIGN;
657     if (res->flags & IORESOURCE_IO) {
658     l &= PCI_BASE_ADDRESS_IO_MASK;
659     + sz &= PCI_BASE_ADDRESS_IO_MASK;
660     mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
661     } else {
662     l &= PCI_BASE_ADDRESS_MEM_MASK;
663     + sz &= PCI_BASE_ADDRESS_MEM_MASK;
664     mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
665     }
666     } else {
667     res->flags |= (l & IORESOURCE_ROM_ENABLE);
668     l &= PCI_ROM_ADDRESS_MASK;
669     + sz &= PCI_ROM_ADDRESS_MASK;
670     mask = (u32)PCI_ROM_ADDRESS_MASK;
671     }
672    
673     diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
674     index 0c8a9fa2be6c..b8366b154fb9 100644
675     --- a/drivers/tty/serial/samsung.c
676     +++ b/drivers/tty/serial/samsung.c
677     @@ -534,11 +534,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
678     unsigned int old)
679     {
680     struct s3c24xx_uart_port *ourport = to_ourport(port);
681     + int timeout = 10000;
682    
683     ourport->pm_level = level;
684    
685     switch (level) {
686     case 3:
687     + while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
688     + udelay(100);
689     +
690     if (!IS_ERR(ourport->baudclk))
691     clk_disable_unprepare(ourport->baudclk);
692    
693     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
694     index 1e71f918eb9f..2800776b2e91 100644
695     --- a/drivers/usb/class/cdc-acm.c
696     +++ b/drivers/usb/class/cdc-acm.c
697     @@ -1087,10 +1087,11 @@ next_desc:
698     } else {
699     control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
700     data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
701     - if (!control_interface || !data_interface) {
702     - dev_dbg(&intf->dev, "no interfaces\n");
703     - return -ENODEV;
704     - }
705     + }
706     +
707     + if (!control_interface || !data_interface) {
708     + dev_dbg(&intf->dev, "no interfaces\n");
709     + return -ENODEV;
710     }
711    
712     if (data_interface_num != call_interface_num)
713     @@ -1365,6 +1366,7 @@ alloc_fail8:
714     &dev_attr_wCountryCodes);
715     device_remove_file(&acm->control->dev,
716     &dev_attr_iCountryCodeRelDate);
717     + kfree(acm->country_codes);
718     }
719     device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
720     alloc_fail7:
721     diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
722     index 1d94316f0ea4..301b08496478 100644
723     --- a/drivers/xen/swiotlb-xen.c
724     +++ b/drivers/xen/swiotlb-xen.c
725     @@ -390,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
726    
727     /* NOTE: We use dev_addr here, not paddr! */
728     if (is_xen_swiotlb_buffer(dev_addr)) {
729     - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
730     + swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
731     return;
732     }
733    
734     diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
735     index f26f38ccd194..019fc5a68a14 100644
736     --- a/fs/btrfs/delayed-inode.c
737     +++ b/fs/btrfs/delayed-inode.c
738     @@ -1843,6 +1843,14 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
739     struct btrfs_delayed_node *delayed_node;
740     int ret = 0;
741    
742     + /*
743     + * we don't do delayed inode updates during log recovery because it
744     + * leads to enospc problems. This means we also can't do
745     + * delayed inode refs
746     + */
747     + if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
748     + return -EAGAIN;
749     +
750     delayed_node = btrfs_get_or_create_delayed_node(inode);
751     if (IS_ERR(delayed_node))
752     return PTR_ERR(delayed_node);
753     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
754     index 387213ac2608..b44306378193 100644
755     --- a/fs/fs-writeback.c
756     +++ b/fs/fs-writeback.c
757     @@ -470,12 +470,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
758     * write_inode()
759     */
760     spin_lock(&inode->i_lock);
761     - /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
762     - if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
763     - inode->i_state &= ~I_DIRTY_PAGES;
764     +
765     dirty = inode->i_state & I_DIRTY;
766     - inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
767     + inode->i_state &= ~I_DIRTY;
768     +
769     + /*
770     + * Paired with smp_mb() in __mark_inode_dirty(). This allows
771     + * __mark_inode_dirty() to test i_state without grabbing i_lock -
772     + * either they see the I_DIRTY bits cleared or we see the dirtied
773     + * inode.
774     + *
775     + * I_DIRTY_PAGES is always cleared together above even if @mapping
776     + * still has dirty pages. The flag is reinstated after smp_mb() if
777     + * necessary. This guarantees that either __mark_inode_dirty()
778     + * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
779     + */
780     + smp_mb();
781     +
782     + if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
783     + inode->i_state |= I_DIRTY_PAGES;
784     +
785     spin_unlock(&inode->i_lock);
786     +
787     /* Don't write the inode if only I_DIRTY_PAGES was set */
788     if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
789     int err = write_inode(inode, wbc);
790     @@ -1146,12 +1162,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
791     }
792    
793     /*
794     - * make sure that changes are seen by all cpus before we test i_state
795     - * -- mikulas
796     + * Paired with smp_mb() in __writeback_single_inode() for the
797     + * following lockless i_state test. See there for details.
798     */
799     smp_mb();
800    
801     - /* avoid the locking if we can */
802     if ((inode->i_state & flags) == flags)
803     return;
804    
805     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
806     index 836307ae1f08..4a58afa99654 100644
807     --- a/fs/nfsd/nfs4state.c
808     +++ b/fs/nfsd/nfs4state.c
809     @@ -1200,15 +1200,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
810     return 0;
811     }
812    
813     -static long long
814     +static int
815     compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
816     {
817     - long long res;
818     -
819     - res = o1->len - o2->len;
820     - if (res)
821     - return res;
822     - return (long long)memcmp(o1->data, o2->data, o1->len);
823     + if (o1->len < o2->len)
824     + return -1;
825     + if (o1->len > o2->len)
826     + return 1;
827     + return memcmp(o1->data, o2->data, o1->len);
828     }
829    
830     static int same_name(const char *n1, const char *n2)
831     @@ -1365,7 +1364,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
832     static struct nfs4_client *
833     find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
834     {
835     - long long cmp;
836     + int cmp;
837     struct rb_node *node = root->rb_node;
838     struct nfs4_client *clp;
839    
840     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
841     index 9b45f0666cfc..acf179d7615f 100644
842     --- a/fs/nfsd/nfs4xdr.c
843     +++ b/fs/nfsd/nfs4xdr.c
844     @@ -1743,6 +1743,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char *components,
845     }
846     else
847     end++;
848     + if (found_esc)
849     + end = next;
850     +
851     str = end;
852     }
853     *pp = p;
854     diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
855     index 2e1372efbb00..587d699bdc2c 100644
856     --- a/fs/nilfs2/inode.c
857     +++ b/fs/nilfs2/inode.c
858     @@ -49,6 +49,8 @@ struct nilfs_iget_args {
859     int for_gc;
860     };
861    
862     +static int nilfs_iget_test(struct inode *inode, void *opaque);
863     +
864     void nilfs_inode_add_blocks(struct inode *inode, int n)
865     {
866     struct nilfs_root *root = NILFS_I(inode)->i_root;
867     @@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
868     .is_partially_uptodate = block_is_partially_uptodate,
869     };
870    
871     +static int nilfs_insert_inode_locked(struct inode *inode,
872     + struct nilfs_root *root,
873     + unsigned long ino)
874     +{
875     + struct nilfs_iget_args args = {
876     + .ino = ino, .root = root, .cno = 0, .for_gc = 0
877     + };
878     +
879     + return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
880     +}
881     +
882     struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
883     {
884     struct super_block *sb = dir->i_sb;
885     @@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
886     if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
887     err = nilfs_bmap_read(ii->i_bmap, NULL);
888     if (err < 0)
889     - goto failed_bmap;
890     + goto failed_after_creation;
891    
892     set_bit(NILFS_I_BMAP, &ii->i_state);
893     /* No lock is needed; iget() ensures it. */
894     @@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
895     spin_lock(&nilfs->ns_next_gen_lock);
896     inode->i_generation = nilfs->ns_next_generation++;
897     spin_unlock(&nilfs->ns_next_gen_lock);
898     - insert_inode_hash(inode);
899     + if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
900     + err = -EIO;
901     + goto failed_after_creation;
902     + }
903    
904     err = nilfs_init_acl(inode, dir);
905     if (unlikely(err))
906     - goto failed_acl; /* never occur. When supporting
907     + goto failed_after_creation; /* never occur. When supporting
908     nilfs_init_acl(), proper cancellation of
909     above jobs should be considered */
910    
911     return inode;
912    
913     - failed_acl:
914     - failed_bmap:
915     + failed_after_creation:
916     clear_nlink(inode);
917     + unlock_new_inode(inode);
918     iput(inode); /* raw_inode will be deleted through
919     - generic_delete_inode() */
920     + nilfs_evict_inode() */
921     goto failed;
922    
923     failed_ifile_create_inode:
924     @@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
925     inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
926     inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
927     inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
928     - if (inode->i_nlink == 0 && inode->i_mode == 0)
929     - return -EINVAL; /* this inode is deleted */
930     + if (inode->i_nlink == 0)
931     + return -ESTALE; /* this inode is deleted */
932    
933     inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
934     ii->i_flags = le32_to_cpu(raw_inode->i_flags);
935     diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
936     index 9de78f08989e..0f84b257932c 100644
937     --- a/fs/nilfs2/namei.c
938     +++ b/fs/nilfs2/namei.c
939     @@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
940     int err = nilfs_add_link(dentry, inode);
941     if (!err) {
942     d_instantiate(dentry, inode);
943     + unlock_new_inode(inode);
944     return 0;
945     }
946     inode_dec_link_count(inode);
947     + unlock_new_inode(inode);
948     iput(inode);
949     return err;
950     }
951     @@ -182,6 +184,7 @@ out:
952     out_fail:
953     drop_nlink(inode);
954     nilfs_mark_inode_dirty(inode);
955     + unlock_new_inode(inode);
956     iput(inode);
957     goto out;
958     }
959     @@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
960     inode_inc_link_count(inode);
961     ihold(inode);
962    
963     - err = nilfs_add_nondir(dentry, inode);
964     - if (!err)
965     + err = nilfs_add_link(dentry, inode);
966     + if (!err) {
967     + d_instantiate(dentry, inode);
968     err = nilfs_transaction_commit(dir->i_sb);
969     - else
970     + } else {
971     + inode_dec_link_count(inode);
972     + iput(inode);
973     nilfs_transaction_abort(dir->i_sb);
974     + }
975    
976     return err;
977     }
978     @@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
979    
980     nilfs_mark_inode_dirty(inode);
981     d_instantiate(dentry, inode);
982     + unlock_new_inode(inode);
983     out:
984     if (!err)
985     err = nilfs_transaction_commit(dir->i_sb);
986     @@ -255,6 +263,7 @@ out_fail:
987     drop_nlink(inode);
988     drop_nlink(inode);
989     nilfs_mark_inode_dirty(inode);
990     + unlock_new_inode(inode);
991     iput(inode);
992     out_dir:
993     drop_nlink(dir);
994     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
995     index 20dfec72e903..f998c6009ad4 100644
996     --- a/fs/ocfs2/aops.c
997     +++ b/fs/ocfs2/aops.c
998     @@ -917,7 +917,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
999     }
1000     }
1001    
1002     -static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1003     +static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
1004     {
1005     int i;
1006    
1007     @@ -938,7 +938,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1008     page_cache_release(wc->w_target_page);
1009     }
1010     ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
1011     +}
1012    
1013     +static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1014     +{
1015     + ocfs2_unlock_pages(wc);
1016     brelse(wc->w_di_bh);
1017     kfree(wc);
1018     }
1019     @@ -2060,11 +2064,19 @@ out_write_size:
1020     di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
1021     ocfs2_journal_dirty(handle, wc->w_di_bh);
1022    
1023     + /* unlock pages before dealloc since it needs acquiring j_trans_barrier
1024     + * lock, or it will cause a deadlock since journal commit threads holds
1025     + * this lock and will ask for the page lock when flushing the data.
1026     + * put it here to preserve the unlock order.
1027     + */
1028     + ocfs2_unlock_pages(wc);
1029     +
1030     ocfs2_commit_trans(osb, handle);
1031    
1032     ocfs2_run_deallocs(osb, &wc->w_dealloc);
1033    
1034     - ocfs2_free_write_ctxt(wc);
1035     + brelse(wc->w_di_bh);
1036     + kfree(wc);
1037    
1038     return copied;
1039     }
1040     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1041     index 1376e5a8f0d6..42d5911c7e29 100644
1042     --- a/fs/pstore/ram.c
1043     +++ b/fs/pstore/ram.c
1044     @@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
1045     MODULE_PARM_DESC(mem_size,
1046     "size of reserved RAM used to store oops/panic logs");
1047    
1048     +static unsigned int mem_type;
1049     +module_param(mem_type, uint, 0600);
1050     +MODULE_PARM_DESC(mem_type,
1051     + "set to 1 to try to use unbuffered memory (default 0)");
1052     +
1053     static int dump_oops = 1;
1054     module_param(dump_oops, int, 0600);
1055     MODULE_PARM_DESC(dump_oops,
1056     @@ -79,6 +84,7 @@ struct ramoops_context {
1057     struct persistent_ram_zone *fprz;
1058     phys_addr_t phys_addr;
1059     unsigned long size;
1060     + unsigned int memtype;
1061     size_t record_size;
1062     size_t console_size;
1063     size_t ftrace_size;
1064     @@ -331,7 +337,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
1065     size_t sz = cxt->record_size;
1066    
1067     cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
1068     - &cxt->ecc_info);
1069     + &cxt->ecc_info,
1070     + cxt->memtype);
1071     if (IS_ERR(cxt->przs[i])) {
1072     err = PTR_ERR(cxt->przs[i]);
1073     dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
1074     @@ -361,7 +368,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
1075     return -ENOMEM;
1076     }
1077    
1078     - *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
1079     + *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
1080     if (IS_ERR(*prz)) {
1081     int err = PTR_ERR(*prz);
1082    
1083     @@ -411,6 +418,7 @@ static int ramoops_probe(struct platform_device *pdev)
1084     cxt->dump_read_cnt = 0;
1085     cxt->size = pdata->mem_size;
1086     cxt->phys_addr = pdata->mem_address;
1087     + cxt->memtype = pdata->mem_type;
1088     cxt->record_size = pdata->record_size;
1089     cxt->console_size = pdata->console_size;
1090     cxt->ftrace_size = pdata->ftrace_size;
1091     @@ -541,6 +549,7 @@ static void ramoops_register_dummy(void)
1092    
1093     dummy_data->mem_size = mem_size;
1094     dummy_data->mem_address = mem_address;
1095     + dummy_data->mem_type = 0;
1096     dummy_data->record_size = record_size;
1097     dummy_data->console_size = ramoops_console_size;
1098     dummy_data->ftrace_size = ramoops_ftrace_size;
1099     diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
1100     index 59337326e288..6ff97553331b 100644
1101     --- a/fs/pstore/ram_core.c
1102     +++ b/fs/pstore/ram_core.c
1103     @@ -333,7 +333,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
1104     persistent_ram_update_header_ecc(prz);
1105     }
1106    
1107     -static void *persistent_ram_vmap(phys_addr_t start, size_t size)
1108     +static void *persistent_ram_vmap(phys_addr_t start, size_t size,
1109     + unsigned int memtype)
1110     {
1111     struct page **pages;
1112     phys_addr_t page_start;
1113     @@ -345,7 +346,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
1114     page_start = start - offset_in_page(start);
1115     page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
1116    
1117     - prot = pgprot_noncached(PAGE_KERNEL);
1118     + if (memtype)
1119     + prot = pgprot_noncached(PAGE_KERNEL);
1120     + else
1121     + prot = pgprot_writecombine(PAGE_KERNEL);
1122    
1123     pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
1124     if (!pages) {
1125     @@ -364,27 +368,35 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
1126     return vaddr;
1127     }
1128    
1129     -static void *persistent_ram_iomap(phys_addr_t start, size_t size)
1130     +static void *persistent_ram_iomap(phys_addr_t start, size_t size,
1131     + unsigned int memtype)
1132     {
1133     + void *va;
1134     +
1135     if (!request_mem_region(start, size, "persistent_ram")) {
1136     pr_err("request mem region (0x%llx@0x%llx) failed\n",
1137     (unsigned long long)size, (unsigned long long)start);
1138     return NULL;
1139     }
1140    
1141     - return ioremap(start, size);
1142     + if (memtype)
1143     + va = ioremap(start, size);
1144     + else
1145     + va = ioremap_wc(start, size);
1146     +
1147     + return va;
1148     }
1149    
1150     static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
1151     - struct persistent_ram_zone *prz)
1152     + struct persistent_ram_zone *prz, int memtype)
1153     {
1154     prz->paddr = start;
1155     prz->size = size;
1156    
1157     if (pfn_valid(start >> PAGE_SHIFT))
1158     - prz->vaddr = persistent_ram_vmap(start, size);
1159     + prz->vaddr = persistent_ram_vmap(start, size, memtype);
1160     else
1161     - prz->vaddr = persistent_ram_iomap(start, size);
1162     + prz->vaddr = persistent_ram_iomap(start, size, memtype);
1163    
1164     if (!prz->vaddr) {
1165     pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
1166     @@ -452,7 +464,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
1167     }
1168    
1169     struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
1170     - u32 sig, struct persistent_ram_ecc_info *ecc_info)
1171     + u32 sig, struct persistent_ram_ecc_info *ecc_info,
1172     + unsigned int memtype)
1173     {
1174     struct persistent_ram_zone *prz;
1175     int ret = -ENOMEM;
1176     @@ -463,7 +476,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
1177     goto err;
1178     }
1179    
1180     - ret = persistent_ram_buffer_map(start, size, prz);
1181     + ret = persistent_ram_buffer_map(start, size, prz, memtype);
1182     if (ret)
1183     goto err;
1184    
1185     diff --git a/include/linux/mm.h b/include/linux/mm.h
1186     index d4cdac903468..c4085192c2b6 100644
1187     --- a/include/linux/mm.h
1188     +++ b/include/linux/mm.h
1189     @@ -1630,7 +1630,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
1190     #if VM_GROWSUP
1191     extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1192     #else
1193     - #define expand_upwards(vma, address) do { } while (0)
1194     + #define expand_upwards(vma, address) (0)
1195     #endif
1196    
1197     /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1198     diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
1199     index 9974975d40db..4af3fdc85b01 100644
1200     --- a/include/linux/pstore_ram.h
1201     +++ b/include/linux/pstore_ram.h
1202     @@ -53,7 +53,8 @@ struct persistent_ram_zone {
1203     };
1204    
1205     struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
1206     - u32 sig, struct persistent_ram_ecc_info *ecc_info);
1207     + u32 sig, struct persistent_ram_ecc_info *ecc_info,
1208     + unsigned int memtype);
1209     void persistent_ram_free(struct persistent_ram_zone *prz);
1210     void persistent_ram_zap(struct persistent_ram_zone *prz);
1211    
1212     @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
1213     struct ramoops_platform_data {
1214     unsigned long mem_size;
1215     unsigned long mem_address;
1216     + unsigned int mem_type;
1217     unsigned long record_size;
1218     unsigned long console_size;
1219     unsigned long ftrace_size;
1220     diff --git a/kernel/events/core.c b/kernel/events/core.c
1221     index 3f63ea6464ca..7bf4d519c20f 100644
1222     --- a/kernel/events/core.c
1223     +++ b/kernel/events/core.c
1224     @@ -6887,11 +6887,11 @@ SYSCALL_DEFINE5(perf_event_open,
1225    
1226     if (move_group) {
1227     synchronize_rcu();
1228     - perf_install_in_context(ctx, group_leader, event->cpu);
1229     + perf_install_in_context(ctx, group_leader, group_leader->cpu);
1230     get_ctx(ctx);
1231     list_for_each_entry(sibling, &group_leader->sibling_list,
1232     group_entry) {
1233     - perf_install_in_context(ctx, sibling, event->cpu);
1234     + perf_install_in_context(ctx, sibling, sibling->cpu);
1235     get_ctx(ctx);
1236     }
1237     }
1238     diff --git a/mm/memory.c b/mm/memory.c
1239     index 0926ccd04d7a..8b2d75f61b32 100644
1240     --- a/mm/memory.c
1241     +++ b/mm/memory.c
1242     @@ -3200,7 +3200,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
1243     if (prev && prev->vm_end == address)
1244     return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
1245    
1246     - expand_downwards(vma, address - PAGE_SIZE);
1247     + return expand_downwards(vma, address - PAGE_SIZE);
1248     }
1249     if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
1250     struct vm_area_struct *next = vma->vm_next;
1251     @@ -3209,7 +3209,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
1252     if (next && next->vm_start == address + PAGE_SIZE)
1253     return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
1254    
1255     - expand_upwards(vma, address + PAGE_SIZE);
1256     + return expand_upwards(vma, address + PAGE_SIZE);
1257     }
1258     return 0;
1259     }
1260     diff --git a/mm/mmap.c b/mm/mmap.c
1261     index 8f87b14c7968..43a7089c6a7c 100644
1262     --- a/mm/mmap.c
1263     +++ b/mm/mmap.c
1264     @@ -2056,14 +2056,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1265     {
1266     struct mm_struct *mm = vma->vm_mm;
1267     struct rlimit *rlim = current->signal->rlim;
1268     - unsigned long new_start;
1269     + unsigned long new_start, actual_size;
1270    
1271     /* address space limit tests */
1272     if (!may_expand_vm(mm, grow))
1273     return -ENOMEM;
1274    
1275     /* Stack limit test */
1276     - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1277     + actual_size = size;
1278     + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
1279     + actual_size -= PAGE_SIZE;
1280     + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1281     return -ENOMEM;
1282    
1283     /* mlock limit tests */
1284     diff --git a/mm/vmscan.c b/mm/vmscan.c
1285     index 4e89500391dc..a2fd7e759cb7 100644
1286     --- a/mm/vmscan.c
1287     +++ b/mm/vmscan.c
1288     @@ -2631,18 +2631,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
1289     return false;
1290    
1291     /*
1292     - * There is a potential race between when kswapd checks its watermarks
1293     - * and a process gets throttled. There is also a potential race if
1294     - * processes get throttled, kswapd wakes, a large process exits therby
1295     - * balancing the zones that causes kswapd to miss a wakeup. If kswapd
1296     - * is going to sleep, no process should be sleeping on pfmemalloc_wait
1297     - * so wake them now if necessary. If necessary, processes will wake
1298     - * kswapd and get throttled again
1299     + * The throttled processes are normally woken up in balance_pgdat() as
1300     + * soon as pfmemalloc_watermark_ok() is true. But there is a potential
1301     + * race between when kswapd checks the watermarks and a process gets
1302     + * throttled. There is also a potential race if processes get
1303     + * throttled, kswapd wakes, a large process exits thereby balancing the
1304     + * zones, which causes kswapd to exit balance_pgdat() before reaching
1305     + * the wake up checks. If kswapd is going to sleep, no process should
1306     + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
1307     + * the wake up is premature, processes will wake kswapd and get
1308     + * throttled again. The difference from wake ups in balance_pgdat() is
1309     + * that here we are under prepare_to_wait().
1310     */
1311     - if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
1312     - wake_up(&pgdat->pfmemalloc_wait);
1313     - return false;
1314     - }
1315     + if (waitqueue_active(&pgdat->pfmemalloc_wait))
1316     + wake_up_all(&pgdat->pfmemalloc_wait);
1317    
1318     return pgdat_balanced(pgdat, order, classzone_idx);
1319     }
1320     diff --git a/scripts/kernel-doc b/scripts/kernel-doc
1321     index 4305b2f2ec5e..8c0e07b7a70b 100755
1322     --- a/scripts/kernel-doc
1323     +++ b/scripts/kernel-doc
1324     @@ -1750,7 +1750,7 @@ sub dump_struct($$) {
1325     # strip kmemcheck_bitfield_{begin,end}.*;
1326     $members =~ s/kmemcheck_bitfield_.*?;//gos;
1327     # strip attributes
1328     - $members =~ s/__aligned\s*\(.+\)//gos;
1329     + $members =~ s/__aligned\s*\([^;]*\)//gos;
1330    
1331     create_parameterlist($members, ';', $file);
1332     check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
1333     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
1334     index aeefec74a061..83a0f9b4452b 100644
1335     --- a/sound/pci/hda/hda_codec.c
1336     +++ b/sound/pci/hda/hda_codec.c
1337     @@ -327,8 +327,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
1338     unsigned int parm;
1339    
1340     parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
1341     - if (parm == -1)
1342     + if (parm == -1) {
1343     + *start_id = 0;
1344     return 0;
1345     + }
1346     *start_id = (parm >> 16) & 0x7fff;
1347     return (int)(parm & 0x7fff);
1348     }
1349     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
1350     index 5dd4c4af9c9f..4ae5767a2cf5 100644
1351     --- a/sound/pci/hda/patch_sigmatel.c
1352     +++ b/sound/pci/hda/patch_sigmatel.c
1353     @@ -573,9 +573,9 @@ static void stac_store_hints(struct hda_codec *codec)
1354     spec->gpio_mask;
1355     }
1356     if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
1357     - spec->gpio_mask &= spec->gpio_mask;
1358     - if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
1359     spec->gpio_dir &= spec->gpio_mask;
1360     + if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
1361     + spec->gpio_data &= spec->gpio_mask;
1362     if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
1363     spec->eapd_mask &= spec->gpio_mask;
1364     if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
1365     diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
1366     index 76bfeb3c3e30..be8de7ce1cda 100644
1367     --- a/sound/soc/codecs/max98090.c
1368     +++ b/sound/soc/codecs/max98090.c
1369     @@ -1364,8 +1364,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
1370     {"STENL Mux", "Sidetone Left", "DMICL"},
1371     {"STENR Mux", "Sidetone Right", "ADCR"},
1372     {"STENR Mux", "Sidetone Right", "DMICR"},
1373     - {"DACL", "NULL", "STENL Mux"},
1374     - {"DACR", "NULL", "STENL Mux"},
1375     + {"DACL", NULL, "STENL Mux"},
1376     + {"DACR", NULL, "STENL Mux"},
1377    
1378     {"AIFINL", NULL, "SHDN"},
1379     {"AIFINR", NULL, "SHDN"},
1380     diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
1381     index 4068f2491232..bb3878c9625f 100644
1382     --- a/sound/soc/codecs/sigmadsp.c
1383     +++ b/sound/soc/codecs/sigmadsp.c
1384     @@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
1385     goto done;
1386     }
1387    
1388     + if (ssfw_head->version != 1) {
1389     + dev_err(dev,
1390     + "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n",
1391     + ssfw_head->version);
1392     + goto done;
1393     + }
1394     +
1395     crc = crc32(0, fw->data + sizeof(*ssfw_head),
1396     fw->size - sizeof(*ssfw_head));
1397     pr_debug("%s: crc=%x\n", __func__, crc);
1398     diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
1399     index 593a3ea12d4c..489a9abf112b 100644
1400     --- a/sound/soc/dwc/designware_i2s.c
1401     +++ b/sound/soc/dwc/designware_i2s.c
1402     @@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
1403     snd_soc_dai_set_dma_data(dai, substream, NULL);
1404     }
1405    
1406     +static int dw_i2s_prepare(struct snd_pcm_substream *substream,
1407     + struct snd_soc_dai *dai)
1408     +{
1409     + struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
1410     +
1411     + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
1412     + i2s_write_reg(dev->i2s_base, TXFFR, 1);
1413     + else
1414     + i2s_write_reg(dev->i2s_base, RXFFR, 1);
1415     +
1416     + return 0;
1417     +}
1418     +
1419     static int dw_i2s_trigger(struct snd_pcm_substream *substream,
1420     int cmd, struct snd_soc_dai *dai)
1421     {
1422     @@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
1423     .startup = dw_i2s_startup,
1424     .shutdown = dw_i2s_shutdown,
1425     .hw_params = dw_i2s_hw_params,
1426     + .prepare = dw_i2s_prepare,
1427     .trigger = dw_i2s_trigger,
1428     };
1429    
1430     diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
1431     index 0339d464791a..4df31b0f94a3 100644
1432     --- a/sound/usb/mixer_maps.c
1433     +++ b/sound/usb/mixer_maps.c
1434     @@ -322,8 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
1435     { 0 } /* terminator */
1436     };
1437    
1438     -static const struct usbmix_name_map kef_x300a_map[] = {
1439     - { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
1440     +/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
1441     + * when anything attempts to access FU 10 (control)
1442     + */
1443     +static const struct usbmix_name_map scms_usb3318_map[] = {
1444     + { 10, NULL },
1445     { 0 }
1446     };
1447    
1448     @@ -415,8 +418,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
1449     .map = ebox44_map,
1450     },
1451     {
1452     + /* KEF X300A */
1453     .id = USB_ID(0x27ac, 0x1000),
1454     - .map = kef_x300a_map,
1455     + .map = scms_usb3318_map,
1456     + },
1457     + {
1458     + /* Arcam rPAC */
1459     + .id = USB_ID(0x25c4, 0x0003),
1460     + .map = scms_usb3318_map,
1461     },
1462     { 0 } /* terminator */
1463     };
1464     diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
1465     index 14c2fe20aa62..20764e01df16 100644
1466     --- a/tools/perf/util/hist.h
1467     +++ b/tools/perf/util/hist.h
1468     @@ -34,6 +34,7 @@ struct events_stats {
1469     u32 nr_invalid_chains;
1470     u32 nr_unknown_id;
1471     u32 nr_unprocessable_samples;
1472     + u32 nr_unordered_events;
1473     };
1474    
1475     enum hist_column {
1476     diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
1477     index e392202b96bc..6f593a704ea5 100644
1478     --- a/tools/perf/util/session.c
1479     +++ b/tools/perf/util/session.c
1480     @@ -656,8 +656,7 @@ static int perf_session_queue_event(struct perf_session *s, union perf_event *ev
1481     return -ETIME;
1482    
1483     if (timestamp < s->ordered_samples.last_flush) {
1484     - printf("Warning: Timestamp below last timeslice flush\n");
1485     - return -EINVAL;
1486     + s->stats.nr_unordered_events++;
1487     }
1488    
1489     if (!list_empty(sc)) {
1490     @@ -1057,6 +1056,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1491     "Do you have a KVM guest running and not using 'perf kvm'?\n",
1492     session->stats.nr_unprocessable_samples);
1493     }
1494     + if (session->stats.nr_unordered_events != 0)
1495     + ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
1496     }
1497    
1498     #define session_done() (*(volatile int *)(&session_done))