Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0234-4.9.135-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3240 - (hide annotations) (download)
Fri Nov 9 13:47:01 2018 UTC (5 years, 7 months ago) by niro
File size: 54580 byte(s)
-linux-4.9.135
1 niro 3240 diff --git a/Makefile b/Makefile
2     index 46135e4333e6..3678e4d19ebc 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 134
9     +SUBLEVEL = 135
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arc/Makefile b/arch/arc/Makefile
14     index 8447eed836ef..a3b456008201 100644
15     --- a/arch/arc/Makefile
16     +++ b/arch/arc/Makefile
17     @@ -8,34 +8,12 @@
18    
19     UTS_MACHINE := arc
20    
21     -ifeq ($(CROSS_COMPILE),)
22     -ifndef CONFIG_CPU_BIG_ENDIAN
23     -CROSS_COMPILE := arc-linux-
24     -else
25     -CROSS_COMPILE := arceb-linux-
26     -endif
27     -endif
28     -
29     KBUILD_DEFCONFIG := nsim_700_defconfig
30    
31     cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
32     cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
33     cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
34    
35     -is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
36     -
37     -ifdef CONFIG_ISA_ARCOMPACT
38     -ifeq ($(is_700), 0)
39     - $(error Toolchain not configured for ARCompact builds)
40     -endif
41     -endif
42     -
43     -ifdef CONFIG_ISA_ARCV2
44     -ifeq ($(is_700), 1)
45     - $(error Toolchain not configured for ARCv2 builds)
46     -endif
47     -endif
48     -
49     ifdef CONFIG_ARC_CURR_IN_REG
50     # For a global register defintion, make sure it gets passed to every file
51     # We had a customer reported bug where some code built in kernel was NOT using
52     @@ -89,7 +67,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
53     # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
54     ldflags-$(upto_gcc44) += -marclinux
55    
56     -LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
57     +LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
58    
59     # Modules with short calls might break for calls into builtin-kernel
60     KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
61     diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
62     index 3a2d04134da9..f59b73810630 100644
63     --- a/arch/powerpc/kernel/tm.S
64     +++ b/arch/powerpc/kernel/tm.S
65     @@ -166,13 +166,27 @@ _GLOBAL(tm_reclaim)
66     std r1, PACATMSCRATCH(r13)
67     ld r1, PACAR1(r13)
68    
69     - /* Store the PPR in r11 and reset to decent value */
70     std r11, GPR11(r1) /* Temporary stash */
71    
72     + /*
73     + * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
74     + * clobbered by an exception once we turn on MSR_RI below.
75     + */
76     + ld r11, PACATMSCRATCH(r13)
77     + std r11, GPR1(r1)
78     +
79     + /*
80     + * Store r13 away so we can free up the scratch SPR for the SLB fault
81     + * handler (needed once we start accessing the thread_struct).
82     + */
83     + GET_SCRATCH0(r11)
84     + std r11, GPR13(r1)
85     +
86     /* Reset MSR RI so we can take SLB faults again */
87     li r11, MSR_RI
88     mtmsrd r11, 1
89    
90     + /* Store the PPR in r11 and reset to decent value */
91     mfspr r11, SPRN_PPR
92     HMT_MEDIUM
93    
94     @@ -197,11 +211,11 @@ _GLOBAL(tm_reclaim)
95     SAVE_GPR(8, r7) /* user r8 */
96     SAVE_GPR(9, r7) /* user r9 */
97     SAVE_GPR(10, r7) /* user r10 */
98     - ld r3, PACATMSCRATCH(r13) /* user r1 */
99     + ld r3, GPR1(r1) /* user r1 */
100     ld r4, GPR7(r1) /* user r7 */
101     ld r5, GPR11(r1) /* user r11 */
102     ld r6, GPR12(r1) /* user r12 */
103     - GET_SCRATCH0(8) /* user r13 */
104     + ld r8, GPR13(r1) /* user r13 */
105     std r3, GPR1(r7)
106     std r4, GPR7(r7)
107     std r5, GPR11(r7)
108     diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
109     index 69b23b25ac34..08b9e942a262 100644
110     --- a/arch/s390/appldata/appldata_os.c
111     +++ b/arch/s390/appldata/appldata_os.c
112     @@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data)
113     j = 0;
114     for_each_online_cpu(i) {
115     os_data->os_cpu[j].per_cpu_user =
116     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
117     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
118     os_data->os_cpu[j].per_cpu_nice =
119     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
120     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
121     os_data->os_cpu[j].per_cpu_system =
122     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
123     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
124     os_data->os_cpu[j].per_cpu_idle =
125     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
126     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
127     os_data->os_cpu[j].per_cpu_irq =
128     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
129     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
130     os_data->os_cpu[j].per_cpu_softirq =
131     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
132     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
133     os_data->os_cpu[j].per_cpu_iowait =
134     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
135     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
136     os_data->os_cpu[j].per_cpu_steal =
137     - cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
138     + nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
139     os_data->os_cpu[j].cpu_id = i;
140     j++;
141     }
142     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
143     index dfdb7e21ba56..93d089c9509a 100644
144     --- a/arch/x86/include/asm/pgtable_types.h
145     +++ b/arch/x86/include/asm/pgtable_types.h
146     @@ -134,7 +134,7 @@
147     */
148     #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
149     _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
150     - _PAGE_SOFT_DIRTY)
151     + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
152     #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
153    
154     /* The ASID is the lower 12 bits of CR3 */
155     diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
156     index cf5b14e442e4..e9ab92d0c358 100644
157     --- a/drivers/clocksource/timer-ti-32k.c
158     +++ b/drivers/clocksource/timer-ti-32k.c
159     @@ -98,6 +98,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
160     return -ENXIO;
161     }
162    
163     + if (!of_machine_is_compatible("ti,am43"))
164     + ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
165     +
166     ti_32k_timer.counter = ti_32k_timer.base;
167    
168     /*
169     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
170     index af5eff6835a8..d6d91e8afa9e 100644
171     --- a/drivers/cpufreq/cpufreq.c
172     +++ b/drivers/cpufreq/cpufreq.c
173     @@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
174     u64 cur_wall_time;
175     u64 busy_time;
176    
177     - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
178     + cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
179    
180     busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182     @@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
183    
184     idle_time = cur_wall_time - busy_time;
185     if (wall)
186     - *wall = cputime_to_usecs(cur_wall_time);
187     + *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
188    
189     - return cputime_to_usecs(idle_time);
190     + return div_u64(idle_time, NSEC_PER_USEC);
191     }
192    
193     u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
194     diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
195     index 642dd0f183a8..38d1a8216084 100644
196     --- a/drivers/cpufreq/cpufreq_governor.c
197     +++ b/drivers/cpufreq/cpufreq_governor.c
198     @@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
199     if (ignore_nice) {
200     u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
201    
202     - idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
203     + idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
204     j_cdbs->prev_cpu_nice = cur_nice;
205     }
206    
207     diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
208     index 06d3abdffd3a..b084708fd113 100644
209     --- a/drivers/cpufreq/cpufreq_stats.c
210     +++ b/drivers/cpufreq/cpufreq_stats.c
211     @@ -13,7 +13,6 @@
212     #include <linux/cpufreq.h>
213     #include <linux/module.h>
214     #include <linux/slab.h>
215     -#include <linux/cputime.h>
216    
217     static DEFINE_SPINLOCK(cpufreq_stats_lock);
218    
219     diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
220     index 9280358b8f15..59d484736b4e 100644
221     --- a/drivers/gpu/drm/arm/malidp_drv.c
222     +++ b/drivers/gpu/drm/arm/malidp_drv.c
223     @@ -378,6 +378,7 @@ static int malidp_bind(struct device *dev)
224     goto irq_init_fail;
225    
226     ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
227     + drm_crtc_vblank_reset(&malidp->crtc);
228     if (ret < 0) {
229     DRM_ERROR("failed to initialise vblank\n");
230     goto vblank_fail;
231     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
232     index 2248b330c047..70597854397f 100644
233     --- a/drivers/hid/hid-core.c
234     +++ b/drivers/hid/hid-core.c
235     @@ -1853,6 +1853,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
236     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
237     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
238     { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
239     + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
240     + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
241     + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
242     { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
243     { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
244     { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
245     diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
246     index 5e1fdc8d32ab..2fd0f119a67b 100644
247     --- a/drivers/hv/hv_kvp.c
248     +++ b/drivers/hv/hv_kvp.c
249     @@ -616,21 +616,22 @@ void hv_kvp_onchannelcallback(void *context)
250     NEGO_IN_PROGRESS,
251     NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
252    
253     - if (host_negotiatied == NEGO_NOT_STARTED &&
254     - kvp_transaction.state < HVUTIL_READY) {
255     + if (kvp_transaction.state < HVUTIL_READY) {
256     /*
257     * If userspace daemon is not connected and host is asking
258     * us to negotiate we need to delay to not lose messages.
259     * This is important for Failover IP setting.
260     */
261     - host_negotiatied = NEGO_IN_PROGRESS;
262     - schedule_delayed_work(&kvp_host_handshake_work,
263     + if (host_negotiatied == NEGO_NOT_STARTED) {
264     + host_negotiatied = NEGO_IN_PROGRESS;
265     + schedule_delayed_work(&kvp_host_handshake_work,
266     HV_UTIL_NEGO_TIMEOUT * HZ);
267     + }
268     return;
269     }
270     if (kvp_transaction.state > HVUTIL_READY)
271     return;
272     -
273     +recheck:
274     vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
275     &requestid);
276    
277     @@ -707,6 +708,8 @@ void hv_kvp_onchannelcallback(void *context)
278     VM_PKT_DATA_INBAND, 0);
279    
280     host_negotiatied = NEGO_FINISHED;
281     +
282     + goto recheck;
283     }
284    
285     }
286     diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
287     index f1235831283d..fdeda0b0fbd6 100644
288     --- a/drivers/input/keyboard/atakbd.c
289     +++ b/drivers/input/keyboard/atakbd.c
290     @@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
291     */
292    
293    
294     -static unsigned char atakbd_keycode[0x72] = { /* American layout */
295     - [0] = KEY_GRAVE,
296     +static unsigned char atakbd_keycode[0x73] = { /* American layout */
297     [1] = KEY_ESC,
298     [2] = KEY_1,
299     [3] = KEY_2,
300     @@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
301     [38] = KEY_L,
302     [39] = KEY_SEMICOLON,
303     [40] = KEY_APOSTROPHE,
304     - [41] = KEY_BACKSLASH, /* FIXME, '#' */
305     + [41] = KEY_GRAVE,
306     [42] = KEY_LEFTSHIFT,
307     - [43] = KEY_GRAVE, /* FIXME: '~' */
308     + [43] = KEY_BACKSLASH,
309     [44] = KEY_Z,
310     [45] = KEY_X,
311     [46] = KEY_C,
312     @@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
313     [66] = KEY_F8,
314     [67] = KEY_F9,
315     [68] = KEY_F10,
316     - [69] = KEY_ESC,
317     - [70] = KEY_DELETE,
318     - [71] = KEY_KP7,
319     - [72] = KEY_KP8,
320     - [73] = KEY_KP9,
321     + [71] = KEY_HOME,
322     + [72] = KEY_UP,
323     [74] = KEY_KPMINUS,
324     - [75] = KEY_KP4,
325     - [76] = KEY_KP5,
326     - [77] = KEY_KP6,
327     + [75] = KEY_LEFT,
328     + [77] = KEY_RIGHT,
329     [78] = KEY_KPPLUS,
330     - [79] = KEY_KP1,
331     - [80] = KEY_KP2,
332     - [81] = KEY_KP3,
333     - [82] = KEY_KP0,
334     - [83] = KEY_KPDOT,
335     - [90] = KEY_KPLEFTPAREN,
336     - [91] = KEY_KPRIGHTPAREN,
337     - [92] = KEY_KPASTERISK, /* FIXME */
338     - [93] = KEY_KPASTERISK,
339     - [94] = KEY_KPPLUS,
340     - [95] = KEY_HELP,
341     + [80] = KEY_DOWN,
342     + [82] = KEY_INSERT,
343     + [83] = KEY_DELETE,
344     [96] = KEY_102ND,
345     - [97] = KEY_KPASTERISK, /* FIXME */
346     - [98] = KEY_KPSLASH,
347     + [97] = KEY_UNDO,
348     + [98] = KEY_HELP,
349     [99] = KEY_KPLEFTPAREN,
350     [100] = KEY_KPRIGHTPAREN,
351     [101] = KEY_KPSLASH,
352     [102] = KEY_KPASTERISK,
353     - [103] = KEY_UP,
354     - [104] = KEY_KPASTERISK, /* FIXME */
355     - [105] = KEY_LEFT,
356     - [106] = KEY_RIGHT,
357     - [107] = KEY_KPASTERISK, /* FIXME */
358     - [108] = KEY_DOWN,
359     - [109] = KEY_KPASTERISK, /* FIXME */
360     - [110] = KEY_KPASTERISK, /* FIXME */
361     - [111] = KEY_KPASTERISK, /* FIXME */
362     - [112] = KEY_KPASTERISK, /* FIXME */
363     - [113] = KEY_KPASTERISK /* FIXME */
364     + [103] = KEY_KP7,
365     + [104] = KEY_KP8,
366     + [105] = KEY_KP9,
367     + [106] = KEY_KP4,
368     + [107] = KEY_KP5,
369     + [108] = KEY_KP6,
370     + [109] = KEY_KP1,
371     + [110] = KEY_KP2,
372     + [111] = KEY_KP3,
373     + [112] = KEY_KP0,
374     + [113] = KEY_KPDOT,
375     + [114] = KEY_KPENTER,
376     };
377    
378     static struct input_dev *atakbd_dev;
379     @@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
380     static void atakbd_interrupt(unsigned char scancode, char down)
381     {
382    
383     - if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
384     + if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
385    
386     // report raw events here?
387    
388     scancode = atakbd_keycode[scancode];
389    
390     - if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
391     - input_report_key(atakbd_dev, scancode, 1);
392     - input_report_key(atakbd_dev, scancode, 0);
393     - input_sync(atakbd_dev);
394     - } else {
395     - input_report_key(atakbd_dev, scancode, down);
396     - input_sync(atakbd_dev);
397     - }
398     - } else /* scancodes >= 0xf2 are mouse data, most likely */
399     + input_report_key(atakbd_dev, scancode, down);
400     + input_sync(atakbd_dev);
401     + } else /* scancodes >= 0xf3 are mouse data, most likely */
402     printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
403    
404     return;
405     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
406     index 16199b36a11e..bba1b9f2f782 100644
407     --- a/drivers/iommu/amd_iommu.c
408     +++ b/drivers/iommu/amd_iommu.c
409     @@ -288,7 +288,13 @@ static u16 get_alias(struct device *dev)
410    
411     /* The callers make sure that get_device_id() does not fail here */
412     devid = get_device_id(dev);
413     +
414     + /* For ACPI HID devices, we simply return the devid as such */
415     + if (!dev_is_pci(dev))
416     + return devid;
417     +
418     ivrs_alias = amd_iommu_alias_table[devid];
419     +
420     pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
421    
422     if (ivrs_alias == pci_alias)
423     diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
424     index 25852e399ab2..c5aba26c604a 100644
425     --- a/drivers/macintosh/rack-meter.c
426     +++ b/drivers/macintosh/rack-meter.c
427     @@ -52,8 +52,8 @@ struct rackmeter_dma {
428     struct rackmeter_cpu {
429     struct delayed_work sniffer;
430     struct rackmeter *rm;
431     - cputime64_t prev_wall;
432     - cputime64_t prev_idle;
433     + u64 prev_wall;
434     + u64 prev_idle;
435     int zero;
436     } ____cacheline_aligned;
437    
438     @@ -81,7 +81,7 @@ static int rackmeter_ignore_nice;
439     /* This is copied from cpufreq_ondemand, maybe we should put it in
440     * a common header somewhere
441     */
442     -static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
443     +static inline u64 get_cpu_idle_time(unsigned int cpu)
444     {
445     u64 retval;
446    
447     @@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work)
448     container_of(work, struct rackmeter_cpu, sniffer.work);
449     struct rackmeter *rm = rcpu->rm;
450     unsigned int cpu = smp_processor_id();
451     - cputime64_t cur_jiffies, total_idle_ticks;
452     - unsigned int total_ticks, idle_ticks;
453     + u64 cur_nsecs, total_idle_nsecs;
454     + u64 total_nsecs, idle_nsecs;
455     int i, offset, load, cumm, pause;
456    
457     - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
458     - total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
459     - rcpu->prev_wall = cur_jiffies;
460     + cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
461     + total_nsecs = cur_nsecs - rcpu->prev_wall;
462     + rcpu->prev_wall = cur_nsecs;
463    
464     - total_idle_ticks = get_cpu_idle_time(cpu);
465     - idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
466     - idle_ticks = min(idle_ticks, total_ticks);
467     - rcpu->prev_idle = total_idle_ticks;
468     + total_idle_nsecs = get_cpu_idle_time(cpu);
469     + idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
470     + idle_nsecs = min(idle_nsecs, total_nsecs);
471     + rcpu->prev_idle = total_idle_nsecs;
472    
473     /* We do a very dumb calculation to update the LEDs for now,
474     * we'll do better once we have actual PWM implemented
475     */
476     - load = (9 * (total_ticks - idle_ticks)) / total_ticks;
477     + load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
478    
479     offset = cpu << 3;
480     cumm = 0;
481     @@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
482     continue;
483     rcpu = &rm->cpu[cpu];
484     rcpu->prev_idle = get_cpu_idle_time(cpu);
485     - rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
486     + rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
487     schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
488     msecs_to_jiffies(CPU_SAMPLING_RATE));
489     }
490     diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
491     index 8961dd732522..64be30d53847 100644
492     --- a/drivers/media/usb/dvb-usb-v2/af9035.c
493     +++ b/drivers/media/usb/dvb-usb-v2/af9035.c
494     @@ -406,8 +406,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
495     msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
496     reg |= 0x100000;
497    
498     - ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
499     - msg[0].len - 3);
500     + ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
501     + &msg[0].buf[3],
502     + msg[0].len - 3)
503     + : -EOPNOTSUPP;
504     } else {
505     /* I2C write */
506     u8 buf[MAX_XFER_SIZE];
507     diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
508     index 0509996957d9..e70d6fe504b8 100644
509     --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
510     +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
511     @@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
512     struct mlx4_dev *dev = &priv->dev;
513     struct mlx4_eq *eq = &priv->eq_table.eq[vec];
514    
515     - if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
516     + if (!cpumask_available(eq->affinity_mask) ||
517     + cpumask_empty(eq->affinity_mask))
518     return;
519    
520     hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
521     diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
522     index f1109661a533..410399070639 100644
523     --- a/drivers/net/ethernet/renesas/ravb.h
524     +++ b/drivers/net/ethernet/renesas/ravb.h
525     @@ -421,6 +421,7 @@ enum EIS_BIT {
526     EIS_CULF1 = 0x00000080,
527     EIS_TFFF = 0x00000100,
528     EIS_QFS = 0x00010000,
529     + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
530     };
531    
532     /* RIC0 */
533     @@ -465,6 +466,7 @@ enum RIS0_BIT {
534     RIS0_FRF15 = 0x00008000,
535     RIS0_FRF16 = 0x00010000,
536     RIS0_FRF17 = 0x00020000,
537     + RIS0_RESERVED = GENMASK(31, 18),
538     };
539    
540     /* RIC1 */
541     @@ -521,6 +523,7 @@ enum RIS2_BIT {
542     RIS2_QFF16 = 0x00010000,
543     RIS2_QFF17 = 0x00020000,
544     RIS2_RFFF = 0x80000000,
545     + RIS2_RESERVED = GENMASK(30, 18),
546     };
547    
548     /* TIC */
549     @@ -537,6 +540,7 @@ enum TIS_BIT {
550     TIS_FTF1 = 0x00000002, /* Undocumented? */
551     TIS_TFUF = 0x00000100,
552     TIS_TFWF = 0x00000200,
553     + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
554     };
555    
556     /* ISS */
557     @@ -610,6 +614,7 @@ enum GIC_BIT {
558     enum GIS_BIT {
559     GIS_PTCF = 0x00000001, /* Undocumented? */
560     GIS_PTMF = 0x00000004,
561     + GIS_RESERVED = GENMASK(15, 10),
562     };
563    
564     /* GIE (R-Car Gen3 only) */
565     diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
566     index 307ecd500dac..71836a7f56b0 100644
567     --- a/drivers/net/ethernet/renesas/ravb_main.c
568     +++ b/drivers/net/ethernet/renesas/ravb_main.c
569     @@ -717,10 +717,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
570     u32 eis, ris2;
571    
572     eis = ravb_read(ndev, EIS);
573     - ravb_write(ndev, ~EIS_QFS, EIS);
574     + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
575     if (eis & EIS_QFS) {
576     ris2 = ravb_read(ndev, RIS2);
577     - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
578     + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
579     + RIS2);
580    
581     /* Receive Descriptor Empty int */
582     if (ris2 & RIS2_QFF0)
583     @@ -773,7 +774,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
584     u32 tis = ravb_read(ndev, TIS);
585    
586     if (tis & TIS_TFUF) {
587     - ravb_write(ndev, ~TIS_TFUF, TIS);
588     + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
589     ravb_get_tx_tstamp(ndev);
590     return true;
591     }
592     @@ -908,7 +909,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
593     /* Processing RX Descriptor Ring */
594     if (ris0 & mask) {
595     /* Clear RX interrupt */
596     - ravb_write(ndev, ~mask, RIS0);
597     + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
598     if (ravb_rx(ndev, &quota, q))
599     goto out;
600     }
601     @@ -916,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
602     if (tis & mask) {
603     spin_lock_irqsave(&priv->lock, flags);
604     /* Clear TX interrupt */
605     - ravb_write(ndev, ~mask, TIS);
606     + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
607     ravb_tx_free(ndev, q, true);
608     netif_wake_subqueue(ndev, q);
609     mmiowb();
610     diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
611     index eede70ec37f8..9e3222fd69f9 100644
612     --- a/drivers/net/ethernet/renesas/ravb_ptp.c
613     +++ b/drivers/net/ethernet/renesas/ravb_ptp.c
614     @@ -319,7 +319,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
615     }
616     }
617    
618     - ravb_write(ndev, ~gis, GIS);
619     + ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
620     }
621    
622     void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
623     diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
624     index 04148438d7ec..5ed28111c3c3 100644
625     --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
626     +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
627     @@ -3342,11 +3342,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
628     vscsi->dds.window[LOCAL].liobn,
629     vscsi->dds.window[REMOTE].liobn);
630    
631     - strcpy(vscsi->eye, "VSCSI ");
632     - strncat(vscsi->eye, vdev->name, MAX_EYE);
633     + snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
634    
635     vscsi->dds.unit_id = vdev->unit_address;
636     - strncpy(vscsi->dds.partition_name, partition_name,
637     + strscpy(vscsi->dds.partition_name, partition_name,
638     sizeof(vscsi->dds.partition_name));
639     vscsi->dds.partition_num = partition_number;
640    
641     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
642     index f8b6bf56c48e..ab999c4444b8 100644
643     --- a/drivers/scsi/sd.c
644     +++ b/drivers/scsi/sd.c
645     @@ -1158,7 +1158,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
646     case REQ_OP_WRITE:
647     return sd_setup_read_write_cmnd(cmd);
648     default:
649     - BUG();
650     + WARN_ON_ONCE(1);
651     + return BLKPREP_KILL;
652     }
653     }
654    
655     diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
656     index e0cd1e4c8892..2f151e0aa6da 100644
657     --- a/drivers/usb/gadget/function/u_serial.c
658     +++ b/drivers/usb/gadget/function/u_serial.c
659     @@ -537,7 +537,7 @@ static void gs_rx_push(unsigned long _port)
660     }
661    
662     /* push data to (open) tty */
663     - if (req->actual) {
664     + if (req->actual && tty) {
665     char *packet = req->buf;
666     unsigned size = req->actual;
667     unsigned n;
668     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
669     index 43e27d8ec770..567a6c7af677 100644
670     --- a/fs/ext4/ext4.h
671     +++ b/fs/ext4/ext4.h
672     @@ -3038,9 +3038,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
673     extern int ext4_inline_data_fiemap(struct inode *inode,
674     struct fiemap_extent_info *fieinfo,
675     int *has_inline, __u64 start, __u64 len);
676     -extern int ext4_try_to_evict_inline_data(handle_t *handle,
677     - struct inode *inode,
678     - int needed);
679     extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
680    
681     extern int ext4_convert_inline_data(struct inode *inode);
682     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
683     index 211539a7adfc..6779a9f1de3b 100644
684     --- a/fs/ext4/inline.c
685     +++ b/fs/ext4/inline.c
686     @@ -889,11 +889,11 @@ retry_journal:
687     flags |= AOP_FLAG_NOFS;
688    
689     if (ret == -ENOSPC) {
690     + ext4_journal_stop(handle);
691     ret = ext4_da_convert_inline_data_to_extent(mapping,
692     inode,
693     flags,
694     fsdata);
695     - ext4_journal_stop(handle);
696     if (ret == -ENOSPC &&
697     ext4_should_retry_alloc(inode->i_sb, &retries))
698     goto retry_journal;
699     @@ -1865,42 +1865,6 @@ out:
700     return (error < 0 ? error : 0);
701     }
702    
703     -/*
704     - * Called during xattr set, and if we can sparse space 'needed',
705     - * just create the extent tree evict the data to the outer block.
706     - *
707     - * We use jbd2 instead of page cache to move data to the 1st block
708     - * so that the whole transaction can be committed as a whole and
709     - * the data isn't lost because of the delayed page cache write.
710     - */
711     -int ext4_try_to_evict_inline_data(handle_t *handle,
712     - struct inode *inode,
713     - int needed)
714     -{
715     - int error;
716     - struct ext4_xattr_entry *entry;
717     - struct ext4_inode *raw_inode;
718     - struct ext4_iloc iloc;
719     -
720     - error = ext4_get_inode_loc(inode, &iloc);
721     - if (error)
722     - return error;
723     -
724     - raw_inode = ext4_raw_inode(&iloc);
725     - entry = (struct ext4_xattr_entry *)((void *)raw_inode +
726     - EXT4_I(inode)->i_inline_off);
727     - if (EXT4_XATTR_LEN(entry->e_name_len) +
728     - EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
729     - error = -ENOSPC;
730     - goto out;
731     - }
732     -
733     - error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
734     -out:
735     - brelse(iloc.bh);
736     - return error;
737     -}
738     -
739     void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
740     {
741     handle_t *handle;
742     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
743     index 7d6da09e637b..38385bcb9148 100644
744     --- a/fs/ext4/xattr.c
745     +++ b/fs/ext4/xattr.c
746     @@ -1086,22 +1086,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
747     if (EXT4_I(inode)->i_extra_isize == 0)
748     return -ENOSPC;
749     error = ext4_xattr_set_entry(i, s, inode);
750     - if (error) {
751     - if (error == -ENOSPC &&
752     - ext4_has_inline_data(inode)) {
753     - error = ext4_try_to_evict_inline_data(handle, inode,
754     - EXT4_XATTR_LEN(strlen(i->name) +
755     - EXT4_XATTR_SIZE(i->value_len)));
756     - if (error)
757     - return error;
758     - error = ext4_xattr_ibody_find(inode, i, is);
759     - if (error)
760     - return error;
761     - error = ext4_xattr_set_entry(i, s, inode);
762     - }
763     - if (error)
764     - return error;
765     - }
766     + if (error)
767     + return error;
768     header = IHDR(inode, ext4_raw_inode(&is->iloc));
769     if (!IS_LAST_ENTRY(s->first)) {
770     header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
771     diff --git a/fs/proc/stat.c b/fs/proc/stat.c
772     index d700c42b3572..44475a44cbf1 100644
773     --- a/fs/proc/stat.c
774     +++ b/fs/proc/stat.c
775     @@ -21,23 +21,23 @@
776    
777     #ifdef arch_idle_time
778    
779     -static cputime64_t get_idle_time(int cpu)
780     +static u64 get_idle_time(int cpu)
781     {
782     - cputime64_t idle;
783     + u64 idle;
784    
785     idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
786     if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
787     - idle += arch_idle_time(cpu);
788     + idle += cputime_to_nsecs(arch_idle_time(cpu));
789     return idle;
790     }
791    
792     -static cputime64_t get_iowait_time(int cpu)
793     +static u64 get_iowait_time(int cpu)
794     {
795     - cputime64_t iowait;
796     + u64 iowait;
797    
798     iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
799     if (cpu_online(cpu) && nr_iowait_cpu(cpu))
800     - iowait += arch_idle_time(cpu);
801     + iowait += cputime_to_nsecs(arch_idle_time(cpu));
802     return iowait;
803     }
804    
805     @@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu)
806    
807     static u64 get_idle_time(int cpu)
808     {
809     - u64 idle, idle_time = -1ULL;
810     + u64 idle, idle_usecs = -1ULL;
811    
812     if (cpu_online(cpu))
813     - idle_time = get_cpu_idle_time_us(cpu, NULL);
814     + idle_usecs = get_cpu_idle_time_us(cpu, NULL);
815    
816     - if (idle_time == -1ULL)
817     + if (idle_usecs == -1ULL)
818     /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
819     idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
820     else
821     - idle = usecs_to_cputime64(idle_time);
822     + idle = idle_usecs * NSEC_PER_USEC;
823    
824     return idle;
825     }
826    
827     static u64 get_iowait_time(int cpu)
828     {
829     - u64 iowait, iowait_time = -1ULL;
830     + u64 iowait, iowait_usecs = -1ULL;
831    
832     if (cpu_online(cpu))
833     - iowait_time = get_cpu_iowait_time_us(cpu, NULL);
834     + iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
835    
836     - if (iowait_time == -1ULL)
837     + if (iowait_usecs == -1ULL)
838     /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
839     iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
840     else
841     - iowait = usecs_to_cputime64(iowait_time);
842     + iowait = iowait_usecs * NSEC_PER_USEC;
843    
844     return iowait;
845     }
846     @@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v)
847     }
848     sum += arch_irq_stat();
849    
850     - seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user));
851     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
852     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
853     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
854     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
855     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
856     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
857     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
858     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
859     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
860     + seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
861     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
862     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
863     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
864     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
865     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
866     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
867     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
868     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
869     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
870     seq_putc(p, '\n');
871    
872     for_each_online_cpu(i) {
873     @@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v)
874     guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
875     guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
876     seq_printf(p, "cpu%d", i);
877     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
878     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
879     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
880     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
881     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
882     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
883     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
884     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
885     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
886     - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
887     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
888     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
889     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
890     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
891     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
892     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
893     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
894     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
895     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
896     + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
897     seq_putc(p, '\n');
898     }
899     seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
900     diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
901     index 33de567c25af..7981c4ffe787 100644
902     --- a/fs/proc/uptime.c
903     +++ b/fs/proc/uptime.c
904     @@ -5,23 +5,20 @@
905     #include <linux/seq_file.h>
906     #include <linux/time.h>
907     #include <linux/kernel_stat.h>
908     -#include <linux/cputime.h>
909    
910     static int uptime_proc_show(struct seq_file *m, void *v)
911     {
912     struct timespec uptime;
913     struct timespec idle;
914     - u64 idletime;
915     u64 nsec;
916     u32 rem;
917     int i;
918    
919     - idletime = 0;
920     + nsec = 0;
921     for_each_possible_cpu(i)
922     - idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
923     + nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
924    
925     get_monotonic_boottime(&uptime);
926     - nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
927     idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
928     idle.tv_nsec = rem;
929     seq_printf(m, "%lu.%02lu %lu.%02lu\n",
930     diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
931     index e35e6de633b9..9b9f65d99873 100644
932     --- a/include/linux/huge_mm.h
933     +++ b/include/linux/huge_mm.h
934     @@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
935     unsigned char *vec);
936     extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
937     unsigned long new_addr, unsigned long old_end,
938     - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
939     + pmd_t *old_pmd, pmd_t *new_pmd);
940     extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
941     unsigned long addr, pgprot_t newprot,
942     int prot_numa);
943     diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
944     index bc0b309c3f19..4c882791c10c 100644
945     --- a/kernel/sched/cpuacct.c
946     +++ b/kernel/sched/cpuacct.c
947     @@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v)
948     for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
949     seq_printf(sf, "%s %lld\n",
950     cpuacct_stat_desc[stat],
951     - cputime64_to_clock_t(val[stat]));
952     + nsec_to_clock_t(val[stat]));
953     }
954    
955     return 0;
956     diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
957     index 5ebee3164e64..448d6426fa5f 100644
958     --- a/kernel/sched/cputime.c
959     +++ b/kernel/sched/cputime.c
960     @@ -37,6 +37,18 @@ void disable_sched_clock_irqtime(void)
961     sched_clock_irqtime = 0;
962     }
963    
964     +static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
965     + enum cpu_usage_stat idx)
966     +{
967     + u64 *cpustat = kcpustat_this_cpu->cpustat;
968     +
969     + u64_stats_update_begin(&irqtime->sync);
970     + cpustat[idx] += delta;
971     + irqtime->total += delta;
972     + irqtime->tick_delta += delta;
973     + u64_stats_update_end(&irqtime->sync);
974     +}
975     +
976     /*
977     * Called before incrementing preempt_count on {soft,}irq_enter
978     * and before decrementing preempt_count on {soft,}irq_exit.
979     @@ -54,7 +66,6 @@ void irqtime_account_irq(struct task_struct *curr)
980     delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
981     irqtime->irq_start_time += delta;
982    
983     - u64_stats_update_begin(&irqtime->sync);
984     /*
985     * We do not account for softirq time from ksoftirqd here.
986     * We want to continue accounting softirq time to ksoftirqd thread
987     @@ -62,48 +73,29 @@ void irqtime_account_irq(struct task_struct *curr)
988     * that do not consume any time, but still wants to run.
989     */
990     if (hardirq_count())
991     - irqtime->hardirq_time += delta;
992     + irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
993     else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
994     - irqtime->softirq_time += delta;
995     -
996     - u64_stats_update_end(&irqtime->sync);
997     + irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
998     }
999     EXPORT_SYMBOL_GPL(irqtime_account_irq);
1000    
1001     -static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
1002     +static cputime_t irqtime_tick_accounted(cputime_t maxtime)
1003     {
1004     - u64 *cpustat = kcpustat_this_cpu->cpustat;
1005     - cputime_t irq_cputime;
1006     -
1007     - irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx];
1008     - irq_cputime = min(irq_cputime, maxtime);
1009     - cpustat[idx] += irq_cputime;
1010     -
1011     - return irq_cputime;
1012     -}
1013     + struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
1014     + cputime_t delta;
1015    
1016     -static cputime_t irqtime_account_hi_update(cputime_t maxtime)
1017     -{
1018     - return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
1019     - CPUTIME_IRQ, maxtime);
1020     -}
1021     + delta = nsecs_to_cputime(irqtime->tick_delta);
1022     + delta = min(delta, maxtime);
1023     + irqtime->tick_delta -= cputime_to_nsecs(delta);
1024    
1025     -static cputime_t irqtime_account_si_update(cputime_t maxtime)
1026     -{
1027     - return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
1028     - CPUTIME_SOFTIRQ, maxtime);
1029     + return delta;
1030     }
1031    
1032     #else /* CONFIG_IRQ_TIME_ACCOUNTING */
1033    
1034     #define sched_clock_irqtime (0)
1035    
1036     -static cputime_t irqtime_account_hi_update(cputime_t dummy)
1037     -{
1038     - return 0;
1039     -}
1040     -
1041     -static cputime_t irqtime_account_si_update(cputime_t dummy)
1042     +static cputime_t irqtime_tick_accounted(cputime_t dummy)
1043     {
1044     return 0;
1045     }
1046     @@ -143,7 +135,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
1047     index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
1048    
1049     /* Add user time to cpustat. */
1050     - task_group_account_field(p, index, (__force u64) cputime);
1051     + task_group_account_field(p, index, cputime_to_nsecs(cputime));
1052    
1053     /* Account for user time used */
1054     acct_account_cputime(p);
1055     @@ -168,11 +160,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
1056    
1057     /* Add guest time to cpustat. */
1058     if (task_nice(p) > 0) {
1059     - cpustat[CPUTIME_NICE] += (__force u64) cputime;
1060     - cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
1061     + cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime);
1062     + cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime);
1063     } else {
1064     - cpustat[CPUTIME_USER] += (__force u64) cputime;
1065     - cpustat[CPUTIME_GUEST] += (__force u64) cputime;
1066     + cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime);
1067     + cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime);
1068     }
1069     }
1070    
1071     @@ -193,7 +185,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
1072     account_group_system_time(p, cputime);
1073    
1074     /* Add system time to cpustat. */
1075     - task_group_account_field(p, index, (__force u64) cputime);
1076     + task_group_account_field(p, index, cputime_to_nsecs(cputime));
1077    
1078     /* Account for system time used */
1079     acct_account_cputime(p);
1080     @@ -234,7 +226,7 @@ void account_steal_time(cputime_t cputime)
1081     {
1082     u64 *cpustat = kcpustat_this_cpu->cpustat;
1083    
1084     - cpustat[CPUTIME_STEAL] += (__force u64) cputime;
1085     + cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime);
1086     }
1087    
1088     /*
1089     @@ -247,9 +239,9 @@ void account_idle_time(cputime_t cputime)
1090     struct rq *rq = this_rq();
1091    
1092     if (atomic_read(&rq->nr_iowait) > 0)
1093     - cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
1094     + cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime);
1095     else
1096     - cpustat[CPUTIME_IDLE] += (__force u64) cputime;
1097     + cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime);
1098     }
1099    
1100     /*
1101     @@ -290,10 +282,7 @@ static inline cputime_t account_other_time(cputime_t max)
1102     accounted = steal_account_process_time(max);
1103    
1104     if (accounted < max)
1105     - accounted += irqtime_account_hi_update(max - accounted);
1106     -
1107     - if (accounted < max)
1108     - accounted += irqtime_account_si_update(max - accounted);
1109     + accounted += irqtime_tick_accounted(max - accounted);
1110    
1111     return accounted;
1112     }
1113     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1114     index f564a1d2c9d5..923cc35e8490 100644
1115     --- a/kernel/sched/sched.h
1116     +++ b/kernel/sched/sched.h
1117     @@ -4,6 +4,7 @@
1118     #include <linux/sched/rt.h>
1119     #include <linux/u64_stats_sync.h>
1120     #include <linux/sched/deadline.h>
1121     +#include <linux/kernel_stat.h>
1122     #include <linux/binfmts.h>
1123     #include <linux/mutex.h>
1124     #include <linux/spinlock.h>
1125     @@ -1742,14 +1743,19 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
1126    
1127     #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1128     struct irqtime {
1129     - u64 hardirq_time;
1130     - u64 softirq_time;
1131     + u64 total;
1132     + u64 tick_delta;
1133     u64 irq_start_time;
1134     struct u64_stats_sync sync;
1135     };
1136    
1137     DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
1138    
1139     +/*
1140     + * Returns the irqtime minus the softirq time computed by ksoftirqd.
1141     + * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
1142     + * and never move forward.
1143     + */
1144     static inline u64 irq_time_read(int cpu)
1145     {
1146     struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1147     @@ -1758,7 +1764,7 @@ static inline u64 irq_time_read(int cpu)
1148    
1149     do {
1150     seq = __u64_stats_fetch_begin(&irqtime->sync);
1151     - total = irqtime->softirq_time + irqtime->hardirq_time;
1152     + total = irqtime->total;
1153     } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
1154    
1155     return total;
1156     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1157     index e4c6c3edaf6a..9f7bba700e4e 100644
1158     --- a/mm/huge_memory.c
1159     +++ b/mm/huge_memory.c
1160     @@ -1445,7 +1445,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1161    
1162     bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1163     unsigned long new_addr, unsigned long old_end,
1164     - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1165     + pmd_t *old_pmd, pmd_t *new_pmd)
1166     {
1167     spinlock_t *old_ptl, *new_ptl;
1168     pmd_t pmd;
1169     @@ -1476,7 +1476,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1170     if (new_ptl != old_ptl)
1171     spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1172     pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1173     - if (pmd_present(pmd) && pmd_dirty(pmd))
1174     + if (pmd_present(pmd))
1175     force_flush = true;
1176     VM_BUG_ON(!pmd_none(*new_pmd));
1177    
1178     @@ -1487,12 +1487,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1179     pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1180     }
1181     set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1182     - if (new_ptl != old_ptl)
1183     - spin_unlock(new_ptl);
1184     if (force_flush)
1185     flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1186     - else
1187     - *need_flush = true;
1188     + if (new_ptl != old_ptl)
1189     + spin_unlock(new_ptl);
1190     spin_unlock(old_ptl);
1191     return true;
1192     }
1193     diff --git a/mm/mremap.c b/mm/mremap.c
1194     index 15976716dd40..9e6035969d7b 100644
1195     --- a/mm/mremap.c
1196     +++ b/mm/mremap.c
1197     @@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
1198     static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1199     unsigned long old_addr, unsigned long old_end,
1200     struct vm_area_struct *new_vma, pmd_t *new_pmd,
1201     - unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
1202     + unsigned long new_addr, bool need_rmap_locks)
1203     {
1204     struct mm_struct *mm = vma->vm_mm;
1205     pte_t *old_pte, *new_pte, pte;
1206     @@ -152,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1207    
1208     pte = ptep_get_and_clear(mm, old_addr, old_pte);
1209     /*
1210     - * If we are remapping a dirty PTE, make sure
1211     + * If we are remapping a valid PTE, make sure
1212     * to flush TLB before we drop the PTL for the
1213     - * old PTE or we may race with page_mkclean().
1214     + * PTE.
1215     *
1216     - * This check has to be done after we removed the
1217     - * old PTE from page tables or another thread may
1218     - * dirty it after the check and before the removal.
1219     + * NOTE! Both old and new PTL matter: the old one
1220     + * for racing with page_mkclean(), the new one to
1221     + * make sure the physical page stays valid until
1222     + * the TLB entry for the old mapping has been
1223     + * flushed.
1224     */
1225     - if (pte_present(pte) && pte_dirty(pte))
1226     + if (pte_present(pte))
1227     force_flush = true;
1228     pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
1229     pte = move_soft_dirty_pte(pte);
1230     @@ -168,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1231     }
1232    
1233     arch_leave_lazy_mmu_mode();
1234     + if (force_flush)
1235     + flush_tlb_range(vma, old_end - len, old_end);
1236     if (new_ptl != old_ptl)
1237     spin_unlock(new_ptl);
1238     pte_unmap(new_pte - 1);
1239     - if (force_flush)
1240     - flush_tlb_range(vma, old_end - len, old_end);
1241     - else
1242     - *need_flush = true;
1243     pte_unmap_unlock(old_pte - 1, old_ptl);
1244     if (need_rmap_locks)
1245     drop_rmap_locks(vma);
1246     @@ -189,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1247     {
1248     unsigned long extent, next, old_end;
1249     pmd_t *old_pmd, *new_pmd;
1250     - bool need_flush = false;
1251     unsigned long mmun_start; /* For mmu_notifiers */
1252     unsigned long mmun_end; /* For mmu_notifiers */
1253    
1254     @@ -220,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1255     if (need_rmap_locks)
1256     take_rmap_locks(vma);
1257     moved = move_huge_pmd(vma, old_addr, new_addr,
1258     - old_end, old_pmd, new_pmd,
1259     - &need_flush);
1260     + old_end, old_pmd, new_pmd);
1261     if (need_rmap_locks)
1262     drop_rmap_locks(vma);
1263     if (moved)
1264     @@ -239,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1265     if (extent > LATENCY_LIMIT)
1266     extent = LATENCY_LIMIT;
1267     move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
1268     - new_pmd, new_addr, need_rmap_locks, &need_flush);
1269     + new_pmd, new_addr, need_rmap_locks);
1270     }
1271     - if (need_flush)
1272     - flush_tlb_range(vma, old_end-len, old_addr);
1273    
1274     mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1275    
1276     diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
1277     index ee08540ce503..5d79004de25c 100644
1278     --- a/net/batman-adv/bat_v_elp.c
1279     +++ b/net/batman-adv/bat_v_elp.c
1280     @@ -243,6 +243,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1281     struct batadv_priv *bat_priv;
1282     struct sk_buff *skb;
1283     u32 elp_interval;
1284     + bool ret;
1285    
1286     bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
1287     hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
1288     @@ -304,8 +305,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1289     * may sleep and that is not allowed in an rcu protected
1290     * context. Therefore schedule a task for that.
1291     */
1292     - queue_work(batadv_event_workqueue,
1293     - &hardif_neigh->bat_v.metric_work);
1294     + ret = queue_work(batadv_event_workqueue,
1295     + &hardif_neigh->bat_v.metric_work);
1296     +
1297     + if (!ret)
1298     + batadv_hardif_neigh_put(hardif_neigh);
1299     }
1300     rcu_read_unlock();
1301    
1302     diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
1303     index 582e27698bf0..8b6f654bc85d 100644
1304     --- a/net/batman-adv/bridge_loop_avoidance.c
1305     +++ b/net/batman-adv/bridge_loop_avoidance.c
1306     @@ -1767,6 +1767,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1307     {
1308     struct batadv_bla_backbone_gw *backbone_gw;
1309     struct ethhdr *ethhdr;
1310     + bool ret;
1311    
1312     ethhdr = eth_hdr(skb);
1313    
1314     @@ -1790,8 +1791,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1315     if (unlikely(!backbone_gw))
1316     return true;
1317    
1318     - queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1319     - /* backbone_gw is unreferenced in the report work function function */
1320     + ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1321     +
1322     + /* backbone_gw is unreferenced in the report work function function
1323     + * if queue_work() call was successful
1324     + */
1325     + if (!ret)
1326     + batadv_backbone_gw_put(backbone_gw);
1327    
1328     return true;
1329     }
1330     diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
1331     index e3baf697a35c..a7b5cf08d363 100644
1332     --- a/net/batman-adv/network-coding.c
1333     +++ b/net/batman-adv/network-coding.c
1334     @@ -845,16 +845,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1335     spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
1336     struct list_head *list;
1337    
1338     + /* Select ingoing or outgoing coding node */
1339     + if (in_coding) {
1340     + lock = &orig_neigh_node->in_coding_list_lock;
1341     + list = &orig_neigh_node->in_coding_list;
1342     + } else {
1343     + lock = &orig_neigh_node->out_coding_list_lock;
1344     + list = &orig_neigh_node->out_coding_list;
1345     + }
1346     +
1347     + spin_lock_bh(lock);
1348     +
1349     /* Check if nc_node is already added */
1350     nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
1351    
1352     /* Node found */
1353     if (nc_node)
1354     - return nc_node;
1355     + goto unlock;
1356    
1357     nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
1358     if (!nc_node)
1359     - return NULL;
1360     + goto unlock;
1361    
1362     /* Initialize nc_node */
1363     INIT_LIST_HEAD(&nc_node->list);
1364     @@ -863,22 +874,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1365     kref_get(&orig_neigh_node->refcount);
1366     nc_node->orig_node = orig_neigh_node;
1367    
1368     - /* Select ingoing or outgoing coding node */
1369     - if (in_coding) {
1370     - lock = &orig_neigh_node->in_coding_list_lock;
1371     - list = &orig_neigh_node->in_coding_list;
1372     - } else {
1373     - lock = &orig_neigh_node->out_coding_list_lock;
1374     - list = &orig_neigh_node->out_coding_list;
1375     - }
1376     -
1377     batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
1378     nc_node->addr, nc_node->orig_node->orig);
1379    
1380     /* Add nc_node to orig_node */
1381     - spin_lock_bh(lock);
1382     kref_get(&nc_node->refcount);
1383     list_add_tail_rcu(&nc_node->list, list);
1384     +
1385     +unlock:
1386     spin_unlock_bh(lock);
1387    
1388     return nc_node;
1389     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
1390     index 84c1b388d9ed..05bc176decf0 100644
1391     --- a/net/batman-adv/soft-interface.c
1392     +++ b/net/batman-adv/soft-interface.c
1393     @@ -565,15 +565,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1394     struct batadv_softif_vlan *vlan;
1395     int err;
1396    
1397     + spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1398     +
1399     vlan = batadv_softif_vlan_get(bat_priv, vid);
1400     if (vlan) {
1401     batadv_softif_vlan_put(vlan);
1402     + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1403     return -EEXIST;
1404     }
1405    
1406     vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
1407     - if (!vlan)
1408     + if (!vlan) {
1409     + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1410     return -ENOMEM;
1411     + }
1412    
1413     vlan->bat_priv = bat_priv;
1414     vlan->vid = vid;
1415     @@ -581,17 +586,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1416    
1417     atomic_set(&vlan->ap_isolation, 0);
1418    
1419     + kref_get(&vlan->refcount);
1420     + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1421     + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1422     +
1423     + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
1424     + * sleeping behavior of the sysfs functions and the fs_reclaim lock
1425     + */
1426     err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
1427     if (err) {
1428     - kfree(vlan);
1429     + /* ref for the function */
1430     + batadv_softif_vlan_put(vlan);
1431     +
1432     + /* ref for the list */
1433     + batadv_softif_vlan_put(vlan);
1434     return err;
1435     }
1436    
1437     - spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1438     - kref_get(&vlan->refcount);
1439     - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1440     - spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1441     -
1442     /* add a new TT local entry. This one will be marked with the NOPURGE
1443     * flag
1444     */
1445     diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
1446     index 02d96f224c60..31d7e239a1fd 100644
1447     --- a/net/batman-adv/sysfs.c
1448     +++ b/net/batman-adv/sysfs.c
1449     @@ -187,7 +187,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1450     \
1451     return __batadv_store_uint_attr(buff, count, _min, _max, \
1452     _post_func, attr, \
1453     - &bat_priv->_var, net_dev); \
1454     + &bat_priv->_var, net_dev, \
1455     + NULL); \
1456     }
1457    
1458     #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
1459     @@ -261,7 +262,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1460     \
1461     length = __batadv_store_uint_attr(buff, count, _min, _max, \
1462     _post_func, attr, \
1463     - &hard_iface->_var, net_dev); \
1464     + &hard_iface->_var, \
1465     + hard_iface->soft_iface, \
1466     + net_dev); \
1467     \
1468     batadv_hardif_put(hard_iface); \
1469     return length; \
1470     @@ -355,10 +358,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
1471    
1472     static int batadv_store_uint_attr(const char *buff, size_t count,
1473     struct net_device *net_dev,
1474     + struct net_device *slave_dev,
1475     const char *attr_name,
1476     unsigned int min, unsigned int max,
1477     atomic_t *attr)
1478     {
1479     + char ifname[IFNAMSIZ + 3] = "";
1480     unsigned long uint_val;
1481     int ret;
1482    
1483     @@ -384,8 +389,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
1484     if (atomic_read(attr) == uint_val)
1485     return count;
1486    
1487     - batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
1488     - attr_name, atomic_read(attr), uint_val);
1489     + if (slave_dev)
1490     + snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
1491     +
1492     + batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
1493     + attr_name, ifname, atomic_read(attr), uint_val);
1494    
1495     atomic_set(attr, uint_val);
1496     return count;
1497     @@ -396,12 +404,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
1498     void (*post_func)(struct net_device *),
1499     const struct attribute *attr,
1500     atomic_t *attr_store,
1501     - struct net_device *net_dev)
1502     + struct net_device *net_dev,
1503     + struct net_device *slave_dev)
1504     {
1505     int ret;
1506    
1507     - ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
1508     - attr_store);
1509     + ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
1510     + attr->name, min, max, attr_store);
1511     if (post_func && ret)
1512     post_func(net_dev);
1513    
1514     @@ -570,7 +579,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
1515     return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
1516     batadv_post_gw_reselect, attr,
1517     &bat_priv->gw.sel_class,
1518     - bat_priv->soft_iface);
1519     + bat_priv->soft_iface, NULL);
1520     }
1521    
1522     static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
1523     @@ -1084,8 +1093,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1524     if (old_tp_override == tp_override)
1525     goto out;
1526    
1527     - batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1528     - "throughput_override",
1529     + batadv_info(hard_iface->soft_iface,
1530     + "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1531     + "throughput_override", net_dev->name,
1532     old_tp_override / 10, old_tp_override % 10,
1533     tp_override / 10, tp_override % 10);
1534    
1535     diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
1536     index 0dc85eb1cb7a..b9f9a310eb78 100644
1537     --- a/net/batman-adv/translation-table.c
1538     +++ b/net/batman-adv/translation-table.c
1539     @@ -1550,6 +1550,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1540     {
1541     struct batadv_tt_orig_list_entry *orig_entry;
1542    
1543     + spin_lock_bh(&tt_global->list_lock);
1544     +
1545     orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
1546     if (orig_entry) {
1547     /* refresh the ttvn: the current value could be a bogus one that
1548     @@ -1570,16 +1572,16 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1549     orig_entry->ttvn = ttvn;
1550     kref_init(&orig_entry->refcount);
1551    
1552     - spin_lock_bh(&tt_global->list_lock);
1553     kref_get(&orig_entry->refcount);
1554     hlist_add_head_rcu(&orig_entry->list,
1555     &tt_global->orig_list);
1556     - spin_unlock_bh(&tt_global->list_lock);
1557     atomic_inc(&tt_global->orig_list_count);
1558    
1559     out:
1560     if (orig_entry)
1561     batadv_tt_orig_list_entry_put(orig_entry);
1562     +
1563     + spin_unlock_bh(&tt_global->list_lock);
1564     }
1565    
1566     /**
1567     diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
1568     index 77654f055f24..8e91a26e9b00 100644
1569     --- a/net/batman-adv/tvlv.c
1570     +++ b/net/batman-adv/tvlv.c
1571     @@ -528,15 +528,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1572     {
1573     struct batadv_tvlv_handler *tvlv_handler;
1574    
1575     + spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1576     +
1577     tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1578     if (tvlv_handler) {
1579     + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1580     batadv_tvlv_handler_put(tvlv_handler);
1581     return;
1582     }
1583    
1584     tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1585     - if (!tvlv_handler)
1586     + if (!tvlv_handler) {
1587     + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1588     return;
1589     + }
1590    
1591     tvlv_handler->ogm_handler = optr;
1592     tvlv_handler->unicast_handler = uptr;
1593     @@ -546,7 +551,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1594     kref_init(&tvlv_handler->refcount);
1595     INIT_HLIST_NODE(&tvlv_handler->list);
1596    
1597     - spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1598     kref_get(&tvlv_handler->refcount);
1599     hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1600     spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1601     diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
1602     index 624d6e4dcd5c..51b0d832bd07 100644
1603     --- a/net/netfilter/nf_nat_core.c
1604     +++ b/net/netfilter/nf_nat_core.c
1605     @@ -421,7 +421,7 @@ nf_nat_setup_info(struct nf_conn *ct,
1606     else
1607     ct->status |= IPS_DST_NAT;
1608    
1609     - if (nfct_help(ct))
1610     + if (nfct_help(ct) && !nfct_seqadj(ct))
1611     if (!nfct_seqadj_ext_add(ct))
1612     return NF_DROP;
1613     }