Contents of /trunk/kernel-alx-legacy/patches-4.9/0354-4.9.255-all-fixes.patch
Parent Directory | Revision Log
Revision 3656 -
(show annotations)
(download)
Mon Oct 24 14:07:34 2022 UTC (23 months ago) by niro
File size: 49113 byte(s)
Mon Oct 24 14:07:34 2022 UTC (23 months ago) by niro
File size: 49113 byte(s)
-linux-4.9.255
1 | diff --git a/Makefile b/Makefile |
2 | index ea9ea119460d4..4780b5f80b2a8 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 254 |
9 | +SUBLEVEL = 255 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S |
14 | index 7d84b617af481..99d2e296082c7 100644 |
15 | --- a/arch/arm/mach-imx/suspend-imx6.S |
16 | +++ b/arch/arm/mach-imx/suspend-imx6.S |
17 | @@ -73,6 +73,7 @@ |
18 | #define MX6Q_CCM_CCR 0x0 |
19 | |
20 | .align 3 |
21 | + .arm |
22 | |
23 | .macro sync_l2_cache |
24 | |
25 | diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c |
26 | index 84ae4dd261caf..cafdaabf062fc 100644 |
27 | --- a/arch/x86/kvm/pmu_intel.c |
28 | +++ b/arch/x86/kvm/pmu_intel.c |
29 | @@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { |
30 | [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, |
31 | [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
32 | [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, |
33 | - [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, |
34 | + [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, |
35 | }; |
36 | |
37 | /* mapping between fixed pmc index and intel_arch_events array */ |
38 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
39 | index 3c0f9be107e42..98fb3a7240371 100644 |
40 | --- a/arch/x86/kvm/x86.c |
41 | +++ b/arch/x86/kvm/x86.c |
42 | @@ -97,6 +97,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); |
43 | |
44 | static void update_cr8_intercept(struct kvm_vcpu *vcpu); |
45 | static void process_nmi(struct kvm_vcpu *vcpu); |
46 | +static void process_smi(struct kvm_vcpu *vcpu); |
47 | static void enter_smm(struct kvm_vcpu *vcpu); |
48 | static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); |
49 | |
50 | @@ -3199,6 +3200,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, |
51 | struct kvm_vcpu_events *events) |
52 | { |
53 | process_nmi(vcpu); |
54 | + |
55 | + if (kvm_check_request(KVM_REQ_SMI, vcpu)) |
56 | + process_smi(vcpu); |
57 | + |
58 | events->exception.injected = |
59 | vcpu->arch.exception.pending && |
60 | !kvm_exception_is_soft(vcpu->arch.exception.nr); |
61 | diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c |
62 | index 98b513d049f6a..fb610ad495f10 100644 |
63 | --- a/drivers/acpi/device_sysfs.c |
64 | +++ b/drivers/acpi/device_sysfs.c |
65 | @@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, |
66 | if (add_uevent_var(env, "MODALIAS=")) |
67 | return -ENOMEM; |
68 | |
69 | - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], |
70 | - sizeof(env->buf) - env->buflen); |
71 | - if (len < 0) |
72 | - return len; |
73 | - |
74 | - env->buflen += len; |
75 | - if (!adev->data.of_compatible) |
76 | - return 0; |
77 | - |
78 | - if (len > 0 && add_uevent_var(env, "MODALIAS=")) |
79 | - return -ENOMEM; |
80 | - |
81 | - len = create_of_modalias(adev, &env->buf[env->buflen - 1], |
82 | - sizeof(env->buf) - env->buflen); |
83 | + if (adev->data.of_compatible) |
84 | + len = create_of_modalias(adev, &env->buf[env->buflen - 1], |
85 | + sizeof(env->buf) - env->buflen); |
86 | + else |
87 | + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], |
88 | + sizeof(env->buf) - env->buflen); |
89 | if (len < 0) |
90 | return len; |
91 | |
92 | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c |
93 | index bb45eb22ba1f5..36bdb04f8f018 100644 |
94 | --- a/drivers/infiniband/hw/cxgb4/qp.c |
95 | +++ b/drivers/infiniband/hw/cxgb4/qp.c |
96 | @@ -1976,7 +1976,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
97 | init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; |
98 | init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; |
99 | init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; |
100 | - init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; |
101 | + init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; |
102 | init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; |
103 | init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; |
104 | return 0; |
105 | diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c |
106 | index 977070ce4fe97..9ad5a7019abfd 100644 |
107 | --- a/drivers/iommu/dmar.c |
108 | +++ b/drivers/iommu/dmar.c |
109 | @@ -1024,8 +1024,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) |
110 | { |
111 | struct intel_iommu *iommu; |
112 | u32 ver, sts; |
113 | - int agaw = 0; |
114 | - int msagaw = 0; |
115 | + int agaw = -1; |
116 | + int msagaw = -1; |
117 | int err; |
118 | |
119 | if (!drhd->reg_base_addr) { |
120 | @@ -1050,17 +1050,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) |
121 | } |
122 | |
123 | err = -EINVAL; |
124 | - agaw = iommu_calculate_agaw(iommu); |
125 | - if (agaw < 0) { |
126 | - pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", |
127 | - iommu->seq_id); |
128 | - goto err_unmap; |
129 | + if (cap_sagaw(iommu->cap) == 0) { |
130 | + pr_info("%s: No supported address widths. Not attempting DMA translation.\n", |
131 | + iommu->name); |
132 | + drhd->ignored = 1; |
133 | } |
134 | - msagaw = iommu_calculate_max_sagaw(iommu); |
135 | - if (msagaw < 0) { |
136 | - pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
137 | - iommu->seq_id); |
138 | - goto err_unmap; |
139 | + |
140 | + if (!drhd->ignored) { |
141 | + agaw = iommu_calculate_agaw(iommu); |
142 | + if (agaw < 0) { |
143 | + pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", |
144 | + iommu->seq_id); |
145 | + drhd->ignored = 1; |
146 | + } |
147 | + } |
148 | + if (!drhd->ignored) { |
149 | + msagaw = iommu_calculate_max_sagaw(iommu); |
150 | + if (msagaw < 0) { |
151 | + pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", |
152 | + iommu->seq_id); |
153 | + drhd->ignored = 1; |
154 | + agaw = -1; |
155 | + } |
156 | } |
157 | iommu->agaw = agaw; |
158 | iommu->msagaw = msagaw; |
159 | @@ -1087,7 +1098,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) |
160 | |
161 | raw_spin_lock_init(&iommu->register_lock); |
162 | |
163 | - if (intel_iommu_enabled) { |
164 | + if (intel_iommu_enabled && !drhd->ignored) { |
165 | iommu->iommu_dev = iommu_device_create(NULL, iommu, |
166 | intel_iommu_groups, |
167 | "%s", iommu->name); |
168 | @@ -1099,6 +1110,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) |
169 | } |
170 | |
171 | drhd->iommu = iommu; |
172 | + iommu->drhd = drhd; |
173 | |
174 | return 0; |
175 | |
176 | @@ -1113,7 +1125,8 @@ error: |
177 | |
178 | static void free_iommu(struct intel_iommu *iommu) |
179 | { |
180 | - iommu_device_destroy(iommu->iommu_dev); |
181 | + if (intel_iommu_enabled && !iommu->drhd->ignored) |
182 | + iommu_device_destroy(iommu->iommu_dev); |
183 | |
184 | if (iommu->irq) { |
185 | if (iommu->pr_irq) { |
186 | diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c |
187 | index 431123b048a27..573a5a80b23c6 100644 |
188 | --- a/drivers/leds/led-triggers.c |
189 | +++ b/drivers/leds/led-triggers.c |
190 | @@ -283,14 +283,15 @@ void led_trigger_event(struct led_trigger *trig, |
191 | enum led_brightness brightness) |
192 | { |
193 | struct led_classdev *led_cdev; |
194 | + unsigned long flags; |
195 | |
196 | if (!trig) |
197 | return; |
198 | |
199 | - read_lock(&trig->leddev_list_lock); |
200 | + read_lock_irqsave(&trig->leddev_list_lock, flags); |
201 | list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) |
202 | led_set_brightness(led_cdev, brightness); |
203 | - read_unlock(&trig->leddev_list_lock); |
204 | + read_unlock_irqrestore(&trig->leddev_list_lock, flags); |
205 | } |
206 | EXPORT_SYMBOL_GPL(led_trigger_event); |
207 | |
208 | @@ -301,11 +302,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, |
209 | int invert) |
210 | { |
211 | struct led_classdev *led_cdev; |
212 | + unsigned long flags; |
213 | |
214 | if (!trig) |
215 | return; |
216 | |
217 | - read_lock(&trig->leddev_list_lock); |
218 | + read_lock_irqsave(&trig->leddev_list_lock, flags); |
219 | list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { |
220 | if (oneshot) |
221 | led_blink_set_oneshot(led_cdev, delay_on, delay_off, |
222 | @@ -313,7 +315,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, |
223 | else |
224 | led_blink_set(led_cdev, delay_on, delay_off); |
225 | } |
226 | - read_unlock(&trig->leddev_list_lock); |
227 | + read_unlock_irqrestore(&trig->leddev_list_lock, flags); |
228 | } |
229 | |
230 | void led_trigger_blink(struct led_trigger *trig, |
231 | diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c |
232 | index 164078609f98e..ea38b67d0b737 100644 |
233 | --- a/drivers/net/can/dev.c |
234 | +++ b/drivers/net/can/dev.c |
235 | @@ -1017,7 +1017,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) |
236 | { |
237 | struct can_priv *priv = netdev_priv(dev); |
238 | struct can_ctrlmode cm = {.flags = priv->ctrlmode}; |
239 | - struct can_berr_counter bec; |
240 | + struct can_berr_counter bec = { }; |
241 | enum can_state state = priv->state; |
242 | |
243 | if (priv->do_get_state) |
244 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
245 | index a571024882d7c..1c0aec70ee5d2 100644 |
246 | --- a/drivers/net/usb/qmi_wwan.c |
247 | +++ b/drivers/net/usb/qmi_wwan.c |
248 | @@ -942,6 +942,7 @@ static const struct usb_device_id products[] = { |
249 | {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ |
250 | {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ |
251 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
252 | + {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ |
253 | {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ |
254 | {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ |
255 | {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ |
256 | diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
257 | index e1287c3421165..71edbf7a42ed4 100644 |
258 | --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
259 | +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
260 | @@ -1909,7 +1909,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, |
261 | |
262 | while (offs < dwords) { |
263 | /* limit the time we spin here under lock to 1/2s */ |
264 | - ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC); |
265 | + unsigned long end = jiffies + HZ / 2; |
266 | + bool resched = false; |
267 | |
268 | if (iwl_trans_grab_nic_access(trans, &flags)) { |
269 | iwl_write32(trans, HBUS_TARG_MEM_RADDR, |
270 | @@ -1920,14 +1921,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, |
271 | HBUS_TARG_MEM_RDAT); |
272 | offs++; |
273 | |
274 | - /* calling ktime_get is expensive so |
275 | - * do it once in 128 reads |
276 | - */ |
277 | - if (offs % 128 == 0 && ktime_after(ktime_get(), |
278 | - timeout)) |
279 | + if (time_after(jiffies, end)) { |
280 | + resched = true; |
281 | break; |
282 | + } |
283 | } |
284 | iwl_trans_release_nic_access(trans, &flags); |
285 | + |
286 | + if (resched) |
287 | + cond_resched(); |
288 | } else { |
289 | return -EBUSY; |
290 | } |
291 | diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c |
292 | index 56cad16e70ca6..1b68aef03fe2e 100644 |
293 | --- a/drivers/net/wireless/mediatek/mt7601u/dma.c |
294 | +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c |
295 | @@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) |
296 | |
297 | if (new_p) { |
298 | /* we have one extra ref from the allocator */ |
299 | - __free_pages(e->p, MT_RX_ORDER); |
300 | - |
301 | + put_page(e->p); |
302 | e->p = new_p; |
303 | } |
304 | } |
305 | @@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, |
306 | } |
307 | |
308 | e = &q->e[q->end]; |
309 | - e->skb = skb; |
310 | usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, |
311 | mt7601u_complete_tx, q); |
312 | ret = usb_submit_urb(e->urb, GFP_ATOMIC); |
313 | @@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, |
314 | |
315 | q->end = (q->end + 1) % q->entries; |
316 | q->used++; |
317 | + e->skb = skb; |
318 | |
319 | if (q->used >= q->entries) |
320 | ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); |
321 | diff --git a/fs/exec.c b/fs/exec.c |
322 | index cd5da140f94cb..319a1f5732fa9 100644 |
323 | --- a/fs/exec.c |
324 | +++ b/fs/exec.c |
325 | @@ -1021,7 +1021,7 @@ static int exec_mmap(struct mm_struct *mm) |
326 | /* Notify parent that we're no longer interested in the old VM */ |
327 | tsk = current; |
328 | old_mm = current->mm; |
329 | - mm_release(tsk, old_mm); |
330 | + exec_mm_release(tsk, old_mm); |
331 | |
332 | if (old_mm) { |
333 | sync_mm_rss(old_mm); |
334 | diff --git a/include/linux/compat.h b/include/linux/compat.h |
335 | index fab35daf87596..6b9d38a7adcaf 100644 |
336 | --- a/include/linux/compat.h |
337 | +++ b/include/linux/compat.h |
338 | @@ -311,8 +311,6 @@ struct compat_kexec_segment; |
339 | struct compat_mq_attr; |
340 | struct compat_msgbuf; |
341 | |
342 | -extern void compat_exit_robust_list(struct task_struct *curr); |
343 | - |
344 | asmlinkage long |
345 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, |
346 | compat_size_t len); |
347 | diff --git a/include/linux/futex.h b/include/linux/futex.h |
348 | index c015fa91e7cce..0f294ae63c78c 100644 |
349 | --- a/include/linux/futex.h |
350 | +++ b/include/linux/futex.h |
351 | @@ -1,6 +1,8 @@ |
352 | #ifndef _LINUX_FUTEX_H |
353 | #define _LINUX_FUTEX_H |
354 | |
355 | +#include <linux/sched.h> |
356 | + |
357 | #include <uapi/linux/futex.h> |
358 | |
359 | struct inode; |
360 | @@ -11,9 +13,6 @@ union ktime; |
361 | long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, |
362 | u32 __user *uaddr2, u32 val2, u32 val3); |
363 | |
364 | -extern int |
365 | -handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); |
366 | - |
367 | /* |
368 | * Futexes are matched on equal values of this key. |
369 | * The key type depends on whether it's a shared or private mapping. |
370 | @@ -56,19 +55,34 @@ union futex_key { |
371 | #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } |
372 | |
373 | #ifdef CONFIG_FUTEX |
374 | -extern void exit_robust_list(struct task_struct *curr); |
375 | -extern void exit_pi_state_list(struct task_struct *curr); |
376 | -#ifdef CONFIG_HAVE_FUTEX_CMPXCHG |
377 | -#define futex_cmpxchg_enabled 1 |
378 | -#else |
379 | -extern int futex_cmpxchg_enabled; |
380 | -#endif |
381 | -#else |
382 | -static inline void exit_robust_list(struct task_struct *curr) |
383 | -{ |
384 | -} |
385 | -static inline void exit_pi_state_list(struct task_struct *curr) |
386 | +enum { |
387 | + FUTEX_STATE_OK, |
388 | + FUTEX_STATE_EXITING, |
389 | + FUTEX_STATE_DEAD, |
390 | +}; |
391 | + |
392 | +static inline void futex_init_task(struct task_struct *tsk) |
393 | { |
394 | + tsk->robust_list = NULL; |
395 | +#ifdef CONFIG_COMPAT |
396 | + tsk->compat_robust_list = NULL; |
397 | +#endif |
398 | + INIT_LIST_HEAD(&tsk->pi_state_list); |
399 | + tsk->pi_state_cache = NULL; |
400 | + tsk->futex_state = FUTEX_STATE_OK; |
401 | + mutex_init(&tsk->futex_exit_mutex); |
402 | } |
403 | + |
404 | +void futex_exit_recursive(struct task_struct *tsk); |
405 | +void futex_exit_release(struct task_struct *tsk); |
406 | +void futex_exec_release(struct task_struct *tsk); |
407 | + |
408 | +long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
409 | + u32 __user *uaddr2, u32 val2, u32 val3); |
410 | +#else |
411 | +static inline void futex_init_task(struct task_struct *tsk) { } |
412 | +static inline void futex_exit_recursive(struct task_struct *tsk) { } |
413 | +static inline void futex_exit_release(struct task_struct *tsk) { } |
414 | +static inline void futex_exec_release(struct task_struct *tsk) { } |
415 | #endif |
416 | #endif |
417 | diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h |
418 | index d86ac620f0aac..188bd17689711 100644 |
419 | --- a/include/linux/intel-iommu.h |
420 | +++ b/include/linux/intel-iommu.h |
421 | @@ -447,6 +447,8 @@ struct intel_iommu { |
422 | struct device *iommu_dev; /* IOMMU-sysfs device */ |
423 | int node; |
424 | u32 flags; /* Software defined flags */ |
425 | + |
426 | + struct dmar_drhd_unit *drhd; |
427 | }; |
428 | |
429 | static inline void __iommu_flush_cache( |
430 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
431 | index 1872d4e9acbe1..f094882822a63 100644 |
432 | --- a/include/linux/sched.h |
433 | +++ b/include/linux/sched.h |
434 | @@ -1815,6 +1815,8 @@ struct task_struct { |
435 | #endif |
436 | struct list_head pi_state_list; |
437 | struct futex_pi_state *pi_state_cache; |
438 | + struct mutex futex_exit_mutex; |
439 | + unsigned int futex_state; |
440 | #endif |
441 | #ifdef CONFIG_PERF_EVENTS |
442 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
443 | @@ -2276,7 +2278,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, |
444 | * Per process flags |
445 | */ |
446 | #define PF_EXITING 0x00000004 /* getting shut down */ |
447 | -#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
448 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
449 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
450 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
451 | @@ -2955,8 +2956,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); |
452 | * succeeds. |
453 | */ |
454 | extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); |
455 | -/* Remove the current tasks stale references to the old mm_struct */ |
456 | -extern void mm_release(struct task_struct *, struct mm_struct *); |
457 | +/* Remove the current tasks stale references to the old mm_struct on exit() */ |
458 | +extern void exit_mm_release(struct task_struct *, struct mm_struct *); |
459 | +/* Remove the current tasks stale references to the old mm_struct on exec() */ |
460 | +extern void exec_mm_release(struct task_struct *, struct mm_struct *); |
461 | |
462 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS |
463 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, |
464 | diff --git a/kernel/Makefile b/kernel/Makefile |
465 | index 184fa9aa58027..92488cf6ad913 100644 |
466 | --- a/kernel/Makefile |
467 | +++ b/kernel/Makefile |
468 | @@ -47,9 +47,6 @@ obj-$(CONFIG_PROFILING) += profile.o |
469 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
470 | obj-y += time/ |
471 | obj-$(CONFIG_FUTEX) += futex.o |
472 | -ifeq ($(CONFIG_COMPAT),y) |
473 | -obj-$(CONFIG_FUTEX) += futex_compat.o |
474 | -endif |
475 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
476 | obj-$(CONFIG_SMP) += smp.o |
477 | ifneq ($(CONFIG_SMP),y) |
478 | diff --git a/kernel/exit.c b/kernel/exit.c |
479 | index f9943ef23fa82..8716f0780fe3d 100644 |
480 | --- a/kernel/exit.c |
481 | +++ b/kernel/exit.c |
482 | @@ -464,7 +464,7 @@ static void exit_mm(struct task_struct *tsk) |
483 | struct mm_struct *mm = tsk->mm; |
484 | struct core_state *core_state; |
485 | |
486 | - mm_release(tsk, mm); |
487 | + exit_mm_release(tsk, mm); |
488 | if (!mm) |
489 | return; |
490 | sync_mm_rss(mm); |
491 | @@ -785,31 +785,12 @@ void __noreturn do_exit(long code) |
492 | */ |
493 | if (unlikely(tsk->flags & PF_EXITING)) { |
494 | pr_alert("Fixing recursive fault but reboot is needed!\n"); |
495 | - /* |
496 | - * We can do this unlocked here. The futex code uses |
497 | - * this flag just to verify whether the pi state |
498 | - * cleanup has been done or not. In the worst case it |
499 | - * loops once more. We pretend that the cleanup was |
500 | - * done as there is no way to return. Either the |
501 | - * OWNER_DIED bit is set by now or we push the blocked |
502 | - * task into the wait for ever nirwana as well. |
503 | - */ |
504 | - tsk->flags |= PF_EXITPIDONE; |
505 | + futex_exit_recursive(tsk); |
506 | set_current_state(TASK_UNINTERRUPTIBLE); |
507 | schedule(); |
508 | } |
509 | |
510 | exit_signals(tsk); /* sets PF_EXITING */ |
511 | - /* |
512 | - * Ensure that all new tsk->pi_lock acquisitions must observe |
513 | - * PF_EXITING. Serializes against futex.c:attach_to_pi_owner(). |
514 | - */ |
515 | - smp_mb(); |
516 | - /* |
517 | - * Ensure that we must observe the pi_state in exit_mm() -> |
518 | - * mm_release() -> exit_pi_state_list(). |
519 | - */ |
520 | - raw_spin_unlock_wait(&tsk->pi_lock); |
521 | |
522 | /* sync mm's RSS info before statistics gathering */ |
523 | if (tsk->mm) |
524 | @@ -876,12 +857,6 @@ void __noreturn do_exit(long code) |
525 | * Make sure we are holding no locks: |
526 | */ |
527 | debug_check_no_locks_held(); |
528 | - /* |
529 | - * We can do this unlocked here. The futex code uses this flag |
530 | - * just to verify whether the pi state cleanup has been done |
531 | - * or not. In the worst case it loops once more. |
532 | - */ |
533 | - tsk->flags |= PF_EXITPIDONE; |
534 | |
535 | if (tsk->io_context) |
536 | exit_io_context(tsk); |
537 | diff --git a/kernel/fork.c b/kernel/fork.c |
538 | index b64efec4a6e6e..91349fd3e162d 100644 |
539 | --- a/kernel/fork.c |
540 | +++ b/kernel/fork.c |
541 | @@ -1082,24 +1082,8 @@ static int wait_for_vfork_done(struct task_struct *child, |
542 | * restoring the old one. . . |
543 | * Eric Biederman 10 January 1998 |
544 | */ |
545 | -void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
546 | +static void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
547 | { |
548 | - /* Get rid of any futexes when releasing the mm */ |
549 | -#ifdef CONFIG_FUTEX |
550 | - if (unlikely(tsk->robust_list)) { |
551 | - exit_robust_list(tsk); |
552 | - tsk->robust_list = NULL; |
553 | - } |
554 | -#ifdef CONFIG_COMPAT |
555 | - if (unlikely(tsk->compat_robust_list)) { |
556 | - compat_exit_robust_list(tsk); |
557 | - tsk->compat_robust_list = NULL; |
558 | - } |
559 | -#endif |
560 | - if (unlikely(!list_empty(&tsk->pi_state_list))) |
561 | - exit_pi_state_list(tsk); |
562 | -#endif |
563 | - |
564 | uprobe_free_utask(tsk); |
565 | |
566 | /* Get rid of any cached register state */ |
567 | @@ -1132,6 +1116,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
568 | complete_vfork_done(tsk); |
569 | } |
570 | |
571 | +void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
572 | +{ |
573 | + futex_exit_release(tsk); |
574 | + mm_release(tsk, mm); |
575 | +} |
576 | + |
577 | +void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) |
578 | +{ |
579 | + futex_exec_release(tsk); |
580 | + mm_release(tsk, mm); |
581 | +} |
582 | + |
583 | /* |
584 | * Allocate a new mm structure and copy contents from the |
585 | * mm structure of the passed in task structure. |
586 | @@ -1706,14 +1702,8 @@ static __latent_entropy struct task_struct *copy_process( |
587 | #ifdef CONFIG_BLOCK |
588 | p->plug = NULL; |
589 | #endif |
590 | -#ifdef CONFIG_FUTEX |
591 | - p->robust_list = NULL; |
592 | -#ifdef CONFIG_COMPAT |
593 | - p->compat_robust_list = NULL; |
594 | -#endif |
595 | - INIT_LIST_HEAD(&p->pi_state_list); |
596 | - p->pi_state_cache = NULL; |
597 | -#endif |
598 | + futex_init_task(p); |
599 | + |
600 | /* |
601 | * sigaltstack should be cleared when sharing the same VM |
602 | */ |
603 | diff --git a/kernel/futex.c b/kernel/futex.c |
604 | index 7123d9cab4568..2ef8c5aef35d0 100644 |
605 | --- a/kernel/futex.c |
606 | +++ b/kernel/futex.c |
607 | @@ -44,6 +44,7 @@ |
608 | * along with this program; if not, write to the Free Software |
609 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
610 | */ |
611 | +#include <linux/compat.h> |
612 | #include <linux/slab.h> |
613 | #include <linux/poll.h> |
614 | #include <linux/fs.h> |
615 | @@ -171,8 +172,10 @@ |
616 | * double_lock_hb() and double_unlock_hb(), respectively. |
617 | */ |
618 | |
619 | -#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
620 | -int __read_mostly futex_cmpxchg_enabled; |
621 | +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG |
622 | +#define futex_cmpxchg_enabled 1 |
623 | +#else |
624 | +static int __read_mostly futex_cmpxchg_enabled; |
625 | #endif |
626 | |
627 | /* |
628 | @@ -336,6 +339,12 @@ static inline bool should_fail_futex(bool fshared) |
629 | } |
630 | #endif /* CONFIG_FAIL_FUTEX */ |
631 | |
632 | +#ifdef CONFIG_COMPAT |
633 | +static void compat_exit_robust_list(struct task_struct *curr); |
634 | +#else |
635 | +static inline void compat_exit_robust_list(struct task_struct *curr) { } |
636 | +#endif |
637 | + |
638 | static inline void futex_get_mm(union futex_key *key) |
639 | { |
640 | atomic_inc(&key->private.mm->mm_count); |
641 | @@ -891,7 +900,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) |
642 | * Kernel cleans up PI-state, but userspace is likely hosed. |
643 | * (Robust-futex cleanup is separate and might save the day for userspace.) |
644 | */ |
645 | -void exit_pi_state_list(struct task_struct *curr) |
646 | +static void exit_pi_state_list(struct task_struct *curr) |
647 | { |
648 | struct list_head *next, *head = &curr->pi_state_list; |
649 | struct futex_pi_state *pi_state; |
650 | @@ -1063,12 +1072,43 @@ out_state: |
651 | return 0; |
652 | } |
653 | |
654 | +/** |
655 | + * wait_for_owner_exiting - Block until the owner has exited |
656 | + * @exiting: Pointer to the exiting task |
657 | + * |
658 | + * Caller must hold a refcount on @exiting. |
659 | + */ |
660 | +static void wait_for_owner_exiting(int ret, struct task_struct *exiting) |
661 | +{ |
662 | + if (ret != -EBUSY) { |
663 | + WARN_ON_ONCE(exiting); |
664 | + return; |
665 | + } |
666 | + |
667 | + if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) |
668 | + return; |
669 | + |
670 | + mutex_lock(&exiting->futex_exit_mutex); |
671 | + /* |
672 | + * No point in doing state checking here. If the waiter got here |
673 | + * while the task was in exec()->exec_futex_release() then it can |
674 | + * have any FUTEX_STATE_* value when the waiter has acquired the |
675 | + * mutex. OK, if running, EXITING or DEAD if it reached exit() |
676 | + * already. Highly unlikely and not a problem. Just one more round |
677 | + * through the futex maze. |
678 | + */ |
679 | + mutex_unlock(&exiting->futex_exit_mutex); |
680 | + |
681 | + put_task_struct(exiting); |
682 | +} |
683 | + |
684 | /* |
685 | * Lookup the task for the TID provided from user space and attach to |
686 | * it after doing proper sanity checks. |
687 | */ |
688 | static int attach_to_pi_owner(u32 uval, union futex_key *key, |
689 | - struct futex_pi_state **ps) |
690 | + struct futex_pi_state **ps, |
691 | + struct task_struct **exiting) |
692 | { |
693 | pid_t pid = uval & FUTEX_TID_MASK; |
694 | struct futex_pi_state *pi_state; |
695 | @@ -1090,22 +1130,33 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, |
696 | } |
697 | |
698 | /* |
699 | - * We need to look at the task state flags to figure out, |
700 | - * whether the task is exiting. To protect against the do_exit |
701 | - * change of the task flags, we do this protected by |
702 | - * p->pi_lock: |
703 | + * We need to look at the task state to figure out, whether the |
704 | + * task is exiting. To protect against the change of the task state |
705 | + * in futex_exit_release(), we do this protected by p->pi_lock: |
706 | */ |
707 | raw_spin_lock_irq(&p->pi_lock); |
708 | - if (unlikely(p->flags & PF_EXITING)) { |
709 | + if (unlikely(p->futex_state != FUTEX_STATE_OK)) { |
710 | /* |
711 | - * The task is on the way out. When PF_EXITPIDONE is |
712 | - * set, we know that the task has finished the |
713 | - * cleanup: |
714 | + * The task is on the way out. When the futex state is |
715 | + * FUTEX_STATE_DEAD, we know that the task has finished |
716 | + * the cleanup: |
717 | */ |
718 | - int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; |
719 | + int ret = (p->futex_state = FUTEX_STATE_DEAD) ? -ESRCH : -EAGAIN; |
720 | |
721 | raw_spin_unlock_irq(&p->pi_lock); |
722 | - put_task_struct(p); |
723 | + /* |
724 | + * If the owner task is between FUTEX_STATE_EXITING and |
725 | + * FUTEX_STATE_DEAD then store the task pointer and keep |
726 | + * the reference on the task struct. The calling code will |
727 | + * drop all locks, wait for the task to reach |
728 | + * FUTEX_STATE_DEAD and then drop the refcount. This is |
729 | + * required to prevent a live lock when the current task |
730 | + * preempted the exiting task between the two states. |
731 | + */ |
732 | + if (ret == -EBUSY) |
733 | + *exiting = p; |
734 | + else |
735 | + put_task_struct(p); |
736 | return ret; |
737 | } |
738 | |
739 | @@ -1136,7 +1187,8 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, |
740 | } |
741 | |
742 | static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
743 | - union futex_key *key, struct futex_pi_state **ps) |
744 | + union futex_key *key, struct futex_pi_state **ps, |
745 | + struct task_struct **exiting) |
746 | { |
747 | struct futex_q *match = futex_top_waiter(hb, key); |
748 | |
749 | @@ -1151,7 +1203,7 @@ static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
750 | * We are the first waiter - try to look up the owner based on |
751 | * @uval and attach to it. |
752 | */ |
753 | - return attach_to_pi_owner(uval, key, ps); |
754 | + return attach_to_pi_owner(uval, key, ps, exiting); |
755 | } |
756 | |
757 | static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
758 | @@ -1177,6 +1229,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
759 | * lookup |
760 | * @task: the task to perform the atomic lock work for. This will |
761 | * be "current" except in the case of requeue pi. |
762 | + * @exiting: Pointer to store the task pointer of the owner task |
763 | + * which is in the middle of exiting |
764 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
765 | * |
766 | * Return: |
767 | @@ -1185,11 +1239,17 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
768 | * <0 - error |
769 | * |
770 | * The hb->lock and futex_key refs shall be held by the caller. |
771 | + * |
772 | + * @exiting is only set when the return value is -EBUSY. If so, this holds |
773 | + * a refcount on the exiting task on return and the caller needs to drop it |
774 | + * after waiting for the exit to complete. |
775 | */ |
776 | static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
777 | union futex_key *key, |
778 | struct futex_pi_state **ps, |
779 | - struct task_struct *task, int set_waiters) |
780 | + struct task_struct *task, |
781 | + struct task_struct **exiting, |
782 | + int set_waiters) |
783 | { |
784 | u32 uval, newval, vpid = task_pid_vnr(task); |
785 | struct futex_q *match; |
786 | @@ -1259,7 +1319,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, |
787 | * attach to the owner. If that fails, no harm done, we only |
788 | * set the FUTEX_WAITERS bit in the user space variable. |
789 | */ |
790 | - return attach_to_pi_owner(uval, key, ps); |
791 | + return attach_to_pi_owner(uval, key, ps, exiting); |
792 | } |
793 | |
794 | /** |
795 | @@ -1685,6 +1745,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
796 | * @key1: the from futex key |
797 | * @key2: the to futex key |
798 | * @ps: address to store the pi_state pointer |
799 | + * @exiting: Pointer to store the task pointer of the owner task |
800 | + * which is in the middle of exiting |
801 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
802 | * |
803 | * Try and get the lock on behalf of the top waiter if we can do it atomically. |
804 | @@ -1692,16 +1754,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
805 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
806 | * hb1 and hb2 must be held by the caller. |
807 | * |
808 | + * @exiting is only set when the return value is -EBUSY. If so, this holds |
809 | + * a refcount on the exiting task on return and the caller needs to drop it |
810 | + * after waiting for the exit to complete. |
811 | + * |
812 | * Return: |
813 | * 0 - failed to acquire the lock atomically; |
814 | * >0 - acquired the lock, return value is vpid of the top_waiter |
815 | * <0 - error |
816 | */ |
817 | -static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
818 | - struct futex_hash_bucket *hb1, |
819 | - struct futex_hash_bucket *hb2, |
820 | - union futex_key *key1, union futex_key *key2, |
821 | - struct futex_pi_state **ps, int set_waiters) |
822 | +static int |
823 | +futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, |
824 | + struct futex_hash_bucket *hb2, union futex_key *key1, |
825 | + union futex_key *key2, struct futex_pi_state **ps, |
826 | + struct task_struct **exiting, int set_waiters) |
827 | { |
828 | struct futex_q *top_waiter = NULL; |
829 | u32 curval; |
830 | @@ -1738,7 +1804,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
831 | */ |
832 | vpid = task_pid_vnr(top_waiter->task); |
833 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
834 | - set_waiters); |
835 | + exiting, set_waiters); |
836 | if (ret == 1) { |
837 | requeue_pi_wake_futex(top_waiter, key2, hb2); |
838 | return vpid; |
839 | @@ -1858,6 +1924,8 @@ retry_private: |
840 | } |
841 | |
842 | if (requeue_pi && (task_count - nr_wake < nr_requeue)) { |
843 | + struct task_struct *exiting = NULL; |
844 | + |
845 | /* |
846 | * Attempt to acquire uaddr2 and wake the top waiter. If we |
847 | * intend to requeue waiters, force setting the FUTEX_WAITERS |
848 | @@ -1865,7 +1933,8 @@ retry_private: |
849 | * faults rather in the requeue loop below. |
850 | */ |
851 | ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
852 | - &key2, &pi_state, nr_requeue); |
853 | + &key2, &pi_state, |
854 | + &exiting, nr_requeue); |
855 | |
856 | /* |
857 | * At this point the top_waiter has either taken uaddr2 or is |
858 | @@ -1892,7 +1961,8 @@ retry_private: |
859 | * If that call succeeds then we have pi_state and an |
860 | * initial refcount on it. |
861 | */ |
862 | - ret = lookup_pi_state(ret, hb2, &key2, &pi_state); |
863 | + ret = lookup_pi_state(ret, hb2, &key2, |
864 | + &pi_state, &exiting); |
865 | } |
866 | |
867 | switch (ret) { |
868 | @@ -1910,17 +1980,24 @@ retry_private: |
869 | if (!ret) |
870 | goto retry; |
871 | goto out; |
872 | + case -EBUSY: |
873 | case -EAGAIN: |
874 | /* |
875 | * Two reasons for this: |
876 | - * - Owner is exiting and we just wait for the |
877 | + * - EBUSY: Owner is exiting and we just wait for the |
878 | * exit to complete. |
879 | - * - The user space value changed. |
880 | + * - EAGAIN: The user space value changed. |
881 | */ |
882 | double_unlock_hb(hb1, hb2); |
883 | hb_waiters_dec(hb2); |
884 | put_futex_key(&key2); |
885 | put_futex_key(&key1); |
886 | + /* |
887 | + * Handle the case where the owner is in the middle of |
888 | + * exiting. Wait for the exit to complete otherwise |
889 | + * this task might loop forever, aka. live lock. |
890 | + */ |
891 | + wait_for_owner_exiting(ret, exiting); |
892 | cond_resched(); |
893 | goto retry; |
894 | default: |
895 | @@ -2571,6 +2648,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
896 | ktime_t *time, int trylock) |
897 | { |
898 | struct hrtimer_sleeper timeout, *to = NULL; |
899 | + struct task_struct *exiting = NULL; |
900 | struct futex_hash_bucket *hb; |
901 | struct futex_q q = futex_q_init; |
902 | int res, ret; |
903 | @@ -2594,7 +2672,8 @@ retry: |
904 | retry_private: |
905 | hb = queue_lock(&q); |
906 | |
907 | - ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); |
908 | + ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, |
909 | + &exiting, 0); |
910 | if (unlikely(ret)) { |
911 | /* |
912 | * Atomic work succeeded and we got the lock, |
913 | @@ -2607,15 +2686,22 @@ retry_private: |
914 | goto out_unlock_put_key; |
915 | case -EFAULT: |
916 | goto uaddr_faulted; |
917 | + case -EBUSY: |
918 | case -EAGAIN: |
919 | /* |
920 | * Two reasons for this: |
921 | - * - Task is exiting and we just wait for the |
922 | + * - EBUSY: Task is exiting and we just wait for the |
923 | * exit to complete. |
924 | - * - The user space value changed. |
925 | + * - EAGAIN: The user space value changed. |
926 | */ |
927 | queue_unlock(hb); |
928 | put_futex_key(&q.key); |
929 | + /* |
930 | + * Handle the case where the owner is in the middle of |
931 | + * exiting. Wait for the exit to complete otherwise |
932 | + * this task might loop forever, aka. live lock. |
933 | + */ |
934 | + wait_for_owner_exiting(ret, exiting); |
935 | cond_resched(); |
936 | goto retry; |
937 | default: |
938 | @@ -3123,7 +3209,7 @@ err_unlock: |
939 | * Process a futex-list entry, check whether it's owned by the |
940 | * dying task, and do notification if so: |
941 | */ |
942 | -int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
943 | +static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
944 | { |
945 | u32 uval, uninitialized_var(nval), mval; |
946 | |
947 | @@ -3198,7 +3284,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, |
948 | * |
949 | * We silently return on any sign of list-walking problem. |
950 | */ |
951 | -void exit_robust_list(struct task_struct *curr) |
952 | +static void exit_robust_list(struct task_struct *curr) |
953 | { |
954 | struct robust_list_head __user *head = curr->robust_list; |
955 | struct robust_list __user *entry, *next_entry, *pending; |
956 | @@ -3261,6 +3347,114 @@ void exit_robust_list(struct task_struct *curr) |
957 | curr, pip); |
958 | } |
959 | |
960 | +static void futex_cleanup(struct task_struct *tsk) |
961 | +{ |
962 | + if (unlikely(tsk->robust_list)) { |
963 | + exit_robust_list(tsk); |
964 | + tsk->robust_list = NULL; |
965 | + } |
966 | + |
967 | +#ifdef CONFIG_COMPAT |
968 | + if (unlikely(tsk->compat_robust_list)) { |
969 | + compat_exit_robust_list(tsk); |
970 | + tsk->compat_robust_list = NULL; |
971 | + } |
972 | +#endif |
973 | + |
974 | + if (unlikely(!list_empty(&tsk->pi_state_list))) |
975 | + exit_pi_state_list(tsk); |
976 | +} |
977 | + |
978 | +/** |
979 | + * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD |
980 | + * @tsk: task to set the state on |
981 | + * |
982 | + * Set the futex exit state of the task lockless. The futex waiter code |
983 | + * observes that state when a task is exiting and loops until the task has |
984 | + * actually finished the futex cleanup. The worst case for this is that the |
985 | + * waiter runs through the wait loop until the state becomes visible. |
986 | + * |
987 | + * This is called from the recursive fault handling path in do_exit(). |
988 | + * |
989 | + * This is best effort. Either the futex exit code has run already or |
990 | + * not. If the OWNER_DIED bit has been set on the futex then the waiter can |
991 | + * take it over. If not, the problem is pushed back to user space. If the |
992 | + * futex exit code did not run yet, then an already queued waiter might |
993 | + * block forever, but there is nothing which can be done about that. |
994 | + */ |
995 | +void futex_exit_recursive(struct task_struct *tsk) |
996 | +{ |
997 | + /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ |
998 | + if (tsk->futex_state == FUTEX_STATE_EXITING) |
999 | + mutex_unlock(&tsk->futex_exit_mutex); |
1000 | + tsk->futex_state = FUTEX_STATE_DEAD; |
1001 | +} |
1002 | + |
1003 | +static void futex_cleanup_begin(struct task_struct *tsk) |
1004 | +{ |
1005 | + /* |
1006 | + * Prevent various race issues against a concurrent incoming waiter |
1007 | + * including live locks by forcing the waiter to block on |
1008 | + * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in |
1009 | + * attach_to_pi_owner(). |
1010 | + */ |
1011 | + mutex_lock(&tsk->futex_exit_mutex); |
1012 | + |
1013 | + /* |
1014 | + * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. |
1015 | + * |
1016 | + * This ensures that all subsequent checks of tsk->futex_state in |
1017 | + * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with |
1018 | + * tsk->pi_lock held. |
1019 | + * |
1020 | + * It guarantees also that a pi_state which was queued right before |
1021 | + * the state change under tsk->pi_lock by a concurrent waiter must |
1022 | + * be observed in exit_pi_state_list(). |
1023 | + */ |
1024 | + raw_spin_lock_irq(&tsk->pi_lock); |
1025 | + tsk->futex_state = FUTEX_STATE_EXITING; |
1026 | + raw_spin_unlock_irq(&tsk->pi_lock); |
1027 | +} |
1028 | + |
1029 | +static void futex_cleanup_end(struct task_struct *tsk, int state) |
1030 | +{ |
1031 | + /* |
1032 | + * Lockless store. The only side effect is that an observer might |
1033 | + * take another loop until it becomes visible. |
1034 | + */ |
1035 | + tsk->futex_state = state; |
1036 | + /* |
1037 | + * Drop the exit protection. This unblocks waiters which observed |
1038 | + * FUTEX_STATE_EXITING to reevaluate the state. |
1039 | + */ |
1040 | + mutex_unlock(&tsk->futex_exit_mutex); |
1041 | +} |
1042 | + |
1043 | +void futex_exec_release(struct task_struct *tsk) |
1044 | +{ |
1045 | + /* |
1046 | + * The state handling is done for consistency, but in the case of |
1047 | + * exec() there is no way to prevent futher damage as the PID stays |
1048 | + * the same. But for the unlikely and arguably buggy case that a |
1049 | + * futex is held on exec(), this provides at least as much state |
1050 | + * consistency protection which is possible. |
1051 | + */ |
1052 | + futex_cleanup_begin(tsk); |
1053 | + futex_cleanup(tsk); |
1054 | + /* |
1055 | + * Reset the state to FUTEX_STATE_OK. The task is alive and about |
1056 | + * exec a new binary. |
1057 | + */ |
1058 | + futex_cleanup_end(tsk, FUTEX_STATE_OK); |
1059 | +} |
1060 | + |
1061 | +void futex_exit_release(struct task_struct *tsk) |
1062 | +{ |
1063 | + futex_cleanup_begin(tsk); |
1064 | + futex_cleanup(tsk); |
1065 | + futex_cleanup_end(tsk, FUTEX_STATE_DEAD); |
1066 | +} |
1067 | + |
1068 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
1069 | u32 __user *uaddr2, u32 val2, u32 val3) |
1070 | { |
1071 | @@ -3354,6 +3548,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
1072 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
1073 | } |
1074 | |
1075 | +#ifdef CONFIG_COMPAT |
1076 | +/* |
1077 | + * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
1078 | + */ |
1079 | +static inline int |
1080 | +compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
1081 | + compat_uptr_t __user *head, unsigned int *pi) |
1082 | +{ |
1083 | + if (get_user(*uentry, head)) |
1084 | + return -EFAULT; |
1085 | + |
1086 | + *entry = compat_ptr((*uentry) & ~1); |
1087 | + *pi = (unsigned int)(*uentry) & 1; |
1088 | + |
1089 | + return 0; |
1090 | +} |
1091 | + |
1092 | +static void __user *futex_uaddr(struct robust_list __user *entry, |
1093 | + compat_long_t futex_offset) |
1094 | +{ |
1095 | + compat_uptr_t base = ptr_to_compat(entry); |
1096 | + void __user *uaddr = compat_ptr(base + futex_offset); |
1097 | + |
1098 | + return uaddr; |
1099 | +} |
1100 | + |
1101 | +/* |
1102 | + * Walk curr->robust_list (very carefully, it's a userspace list!) |
1103 | + * and mark any locks found there dead, and notify any waiters. |
1104 | + * |
1105 | + * We silently return on any sign of list-walking problem. |
1106 | + */ |
1107 | +void compat_exit_robust_list(struct task_struct *curr) |
1108 | +{ |
1109 | + struct compat_robust_list_head __user *head = curr->compat_robust_list; |
1110 | + struct robust_list __user *entry, *next_entry, *pending; |
1111 | + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
1112 | + unsigned int uninitialized_var(next_pi); |
1113 | + compat_uptr_t uentry, next_uentry, upending; |
1114 | + compat_long_t futex_offset; |
1115 | + int rc; |
1116 | + |
1117 | + if (!futex_cmpxchg_enabled) |
1118 | + return; |
1119 | + |
1120 | + /* |
1121 | + * Fetch the list head (which was registered earlier, via |
1122 | + * sys_set_robust_list()): |
1123 | + */ |
1124 | + if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
1125 | + return; |
1126 | + /* |
1127 | + * Fetch the relative futex offset: |
1128 | + */ |
1129 | + if (get_user(futex_offset, &head->futex_offset)) |
1130 | + return; |
1131 | + /* |
1132 | + * Fetch any possibly pending lock-add first, and handle it |
1133 | + * if it exists: |
1134 | + */ |
1135 | + if (compat_fetch_robust_entry(&upending, &pending, |
1136 | + &head->list_op_pending, &pip)) |
1137 | + return; |
1138 | + |
1139 | + next_entry = NULL; /* avoid warning with gcc */ |
1140 | + while (entry != (struct robust_list __user *) &head->list) { |
1141 | + /* |
1142 | + * Fetch the next entry in the list before calling |
1143 | + * handle_futex_death: |
1144 | + */ |
1145 | + rc = compat_fetch_robust_entry(&next_uentry, &next_entry, |
1146 | + (compat_uptr_t __user *)&entry->next, &next_pi); |
1147 | + /* |
1148 | + * A pending lock might already be on the list, so |
1149 | + * dont process it twice: |
1150 | + */ |
1151 | + if (entry != pending) { |
1152 | + void __user *uaddr = futex_uaddr(entry, futex_offset); |
1153 | + |
1154 | + if (handle_futex_death(uaddr, curr, pi)) |
1155 | + return; |
1156 | + } |
1157 | + if (rc) |
1158 | + return; |
1159 | + uentry = next_uentry; |
1160 | + entry = next_entry; |
1161 | + pi = next_pi; |
1162 | + /* |
1163 | + * Avoid excessively long or circular lists: |
1164 | + */ |
1165 | + if (!--limit) |
1166 | + break; |
1167 | + |
1168 | + cond_resched(); |
1169 | + } |
1170 | + if (pending) { |
1171 | + void __user *uaddr = futex_uaddr(pending, futex_offset); |
1172 | + |
1173 | + handle_futex_death(uaddr, curr, pip); |
1174 | + } |
1175 | +} |
1176 | + |
1177 | +COMPAT_SYSCALL_DEFINE2(set_robust_list, |
1178 | + struct compat_robust_list_head __user *, head, |
1179 | + compat_size_t, len) |
1180 | +{ |
1181 | + if (!futex_cmpxchg_enabled) |
1182 | + return -ENOSYS; |
1183 | + |
1184 | + if (unlikely(len != sizeof(*head))) |
1185 | + return -EINVAL; |
1186 | + |
1187 | + current->compat_robust_list = head; |
1188 | + |
1189 | + return 0; |
1190 | +} |
1191 | + |
1192 | +COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, |
1193 | + compat_uptr_t __user *, head_ptr, |
1194 | + compat_size_t __user *, len_ptr) |
1195 | +{ |
1196 | + struct compat_robust_list_head __user *head; |
1197 | + unsigned long ret; |
1198 | + struct task_struct *p; |
1199 | + |
1200 | + if (!futex_cmpxchg_enabled) |
1201 | + return -ENOSYS; |
1202 | + |
1203 | + rcu_read_lock(); |
1204 | + |
1205 | + ret = -ESRCH; |
1206 | + if (!pid) |
1207 | + p = current; |
1208 | + else { |
1209 | + p = find_task_by_vpid(pid); |
1210 | + if (!p) |
1211 | + goto err_unlock; |
1212 | + } |
1213 | + |
1214 | + ret = -EPERM; |
1215 | + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
1216 | + goto err_unlock; |
1217 | + |
1218 | + head = p->compat_robust_list; |
1219 | + rcu_read_unlock(); |
1220 | + |
1221 | + if (put_user(sizeof(*head), len_ptr)) |
1222 | + return -EFAULT; |
1223 | + return put_user(ptr_to_compat(head), head_ptr); |
1224 | + |
1225 | +err_unlock: |
1226 | + rcu_read_unlock(); |
1227 | + |
1228 | + return ret; |
1229 | +} |
1230 | + |
1231 | +COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
1232 | + struct compat_timespec __user *, utime, u32 __user *, uaddr2, |
1233 | + u32, val3) |
1234 | +{ |
1235 | + struct timespec ts; |
1236 | + ktime_t t, *tp = NULL; |
1237 | + int val2 = 0; |
1238 | + int cmd = op & FUTEX_CMD_MASK; |
1239 | + |
1240 | + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
1241 | + cmd == FUTEX_WAIT_BITSET || |
1242 | + cmd == FUTEX_WAIT_REQUEUE_PI)) { |
1243 | + if (compat_get_timespec(&ts, utime)) |
1244 | + return -EFAULT; |
1245 | + if (!timespec_valid(&ts)) |
1246 | + return -EINVAL; |
1247 | + |
1248 | + t = timespec_to_ktime(ts); |
1249 | + if (cmd == FUTEX_WAIT) |
1250 | + t = ktime_add_safe(ktime_get(), t); |
1251 | + tp = &t; |
1252 | + } |
1253 | + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
1254 | + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
1255 | + val2 = (int) (unsigned long) utime; |
1256 | + |
1257 | + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
1258 | +} |
1259 | +#endif /* CONFIG_COMPAT */ |
1260 | + |
1261 | static void __init futex_detect_cmpxchg(void) |
1262 | { |
1263 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
1264 | diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c |
1265 | deleted file mode 100644 |
1266 | index 4ae3232e7a28a..0000000000000 |
1267 | --- a/kernel/futex_compat.c |
1268 | +++ /dev/null |
1269 | @@ -1,201 +0,0 @@ |
1270 | -/* |
1271 | - * linux/kernel/futex_compat.c |
1272 | - * |
1273 | - * Futex compatibililty routines. |
1274 | - * |
1275 | - * Copyright 2006, Red Hat, Inc., Ingo Molnar |
1276 | - */ |
1277 | - |
1278 | -#include <linux/linkage.h> |
1279 | -#include <linux/compat.h> |
1280 | -#include <linux/nsproxy.h> |
1281 | -#include <linux/futex.h> |
1282 | -#include <linux/ptrace.h> |
1283 | -#include <linux/syscalls.h> |
1284 | - |
1285 | -#include <asm/uaccess.h> |
1286 | - |
1287 | - |
1288 | -/* |
1289 | - * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
1290 | - */ |
1291 | -static inline int |
1292 | -fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
1293 | - compat_uptr_t __user *head, unsigned int *pi) |
1294 | -{ |
1295 | - if (get_user(*uentry, head)) |
1296 | - return -EFAULT; |
1297 | - |
1298 | - *entry = compat_ptr((*uentry) & ~1); |
1299 | - *pi = (unsigned int)(*uentry) & 1; |
1300 | - |
1301 | - return 0; |
1302 | -} |
1303 | - |
1304 | -static void __user *futex_uaddr(struct robust_list __user *entry, |
1305 | - compat_long_t futex_offset) |
1306 | -{ |
1307 | - compat_uptr_t base = ptr_to_compat(entry); |
1308 | - void __user *uaddr = compat_ptr(base + futex_offset); |
1309 | - |
1310 | - return uaddr; |
1311 | -} |
1312 | - |
1313 | -/* |
1314 | - * Walk curr->robust_list (very carefully, it's a userspace list!) |
1315 | - * and mark any locks found there dead, and notify any waiters. |
1316 | - * |
1317 | - * We silently return on any sign of list-walking problem. |
1318 | - */ |
1319 | -void compat_exit_robust_list(struct task_struct *curr) |
1320 | -{ |
1321 | - struct compat_robust_list_head __user *head = curr->compat_robust_list; |
1322 | - struct robust_list __user *entry, *next_entry, *pending; |
1323 | - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
1324 | - unsigned int uninitialized_var(next_pi); |
1325 | - compat_uptr_t uentry, next_uentry, upending; |
1326 | - compat_long_t futex_offset; |
1327 | - int rc; |
1328 | - |
1329 | - if (!futex_cmpxchg_enabled) |
1330 | - return; |
1331 | - |
1332 | - /* |
1333 | - * Fetch the list head (which was registered earlier, via |
1334 | - * sys_set_robust_list()): |
1335 | - */ |
1336 | - if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
1337 | - return; |
1338 | - /* |
1339 | - * Fetch the relative futex offset: |
1340 | - */ |
1341 | - if (get_user(futex_offset, &head->futex_offset)) |
1342 | - return; |
1343 | - /* |
1344 | - * Fetch any possibly pending lock-add first, and handle it |
1345 | - * if it exists: |
1346 | - */ |
1347 | - if (fetch_robust_entry(&upending, &pending, |
1348 | - &head->list_op_pending, &pip)) |
1349 | - return; |
1350 | - |
1351 | - next_entry = NULL; /* avoid warning with gcc */ |
1352 | - while (entry != (struct robust_list __user *) &head->list) { |
1353 | - /* |
1354 | - * Fetch the next entry in the list before calling |
1355 | - * handle_futex_death: |
1356 | - */ |
1357 | - rc = fetch_robust_entry(&next_uentry, &next_entry, |
1358 | - (compat_uptr_t __user *)&entry->next, &next_pi); |
1359 | - /* |
1360 | - * A pending lock might already be on the list, so |
1361 | - * dont process it twice: |
1362 | - */ |
1363 | - if (entry != pending) { |
1364 | - void __user *uaddr = futex_uaddr(entry, futex_offset); |
1365 | - |
1366 | - if (handle_futex_death(uaddr, curr, pi)) |
1367 | - return; |
1368 | - } |
1369 | - if (rc) |
1370 | - return; |
1371 | - uentry = next_uentry; |
1372 | - entry = next_entry; |
1373 | - pi = next_pi; |
1374 | - /* |
1375 | - * Avoid excessively long or circular lists: |
1376 | - */ |
1377 | - if (!--limit) |
1378 | - break; |
1379 | - |
1380 | - cond_resched(); |
1381 | - } |
1382 | - if (pending) { |
1383 | - void __user *uaddr = futex_uaddr(pending, futex_offset); |
1384 | - |
1385 | - handle_futex_death(uaddr, curr, pip); |
1386 | - } |
1387 | -} |
1388 | - |
1389 | -COMPAT_SYSCALL_DEFINE2(set_robust_list, |
1390 | - struct compat_robust_list_head __user *, head, |
1391 | - compat_size_t, len) |
1392 | -{ |
1393 | - if (!futex_cmpxchg_enabled) |
1394 | - return -ENOSYS; |
1395 | - |
1396 | - if (unlikely(len != sizeof(*head))) |
1397 | - return -EINVAL; |
1398 | - |
1399 | - current->compat_robust_list = head; |
1400 | - |
1401 | - return 0; |
1402 | -} |
1403 | - |
1404 | -COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, |
1405 | - compat_uptr_t __user *, head_ptr, |
1406 | - compat_size_t __user *, len_ptr) |
1407 | -{ |
1408 | - struct compat_robust_list_head __user *head; |
1409 | - unsigned long ret; |
1410 | - struct task_struct *p; |
1411 | - |
1412 | - if (!futex_cmpxchg_enabled) |
1413 | - return -ENOSYS; |
1414 | - |
1415 | - rcu_read_lock(); |
1416 | - |
1417 | - ret = -ESRCH; |
1418 | - if (!pid) |
1419 | - p = current; |
1420 | - else { |
1421 | - p = find_task_by_vpid(pid); |
1422 | - if (!p) |
1423 | - goto err_unlock; |
1424 | - } |
1425 | - |
1426 | - ret = -EPERM; |
1427 | - if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
1428 | - goto err_unlock; |
1429 | - |
1430 | - head = p->compat_robust_list; |
1431 | - rcu_read_unlock(); |
1432 | - |
1433 | - if (put_user(sizeof(*head), len_ptr)) |
1434 | - return -EFAULT; |
1435 | - return put_user(ptr_to_compat(head), head_ptr); |
1436 | - |
1437 | -err_unlock: |
1438 | - rcu_read_unlock(); |
1439 | - |
1440 | - return ret; |
1441 | -} |
1442 | - |
1443 | -COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
1444 | - struct compat_timespec __user *, utime, u32 __user *, uaddr2, |
1445 | - u32, val3) |
1446 | -{ |
1447 | - struct timespec ts; |
1448 | - ktime_t t, *tp = NULL; |
1449 | - int val2 = 0; |
1450 | - int cmd = op & FUTEX_CMD_MASK; |
1451 | - |
1452 | - if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
1453 | - cmd == FUTEX_WAIT_BITSET || |
1454 | - cmd == FUTEX_WAIT_REQUEUE_PI)) { |
1455 | - if (compat_get_timespec(&ts, utime)) |
1456 | - return -EFAULT; |
1457 | - if (!timespec_valid(&ts)) |
1458 | - return -EINVAL; |
1459 | - |
1460 | - t = timespec_to_ktime(ts); |
1461 | - if (cmd == FUTEX_WAIT) |
1462 | - t = ktime_add_safe(ktime_get(), t); |
1463 | - tp = &t; |
1464 | - } |
1465 | - if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
1466 | - cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
1467 | - val2 = (int) (unsigned long) utime; |
1468 | - |
1469 | - return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
1470 | -} |
1471 | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h |
1472 | index 0b0de3030e0dc..9c20c53f6729e 100644 |
1473 | --- a/net/mac80211/ieee80211_i.h |
1474 | +++ b/net/mac80211/ieee80211_i.h |
1475 | @@ -1046,6 +1046,7 @@ enum queue_stop_reason { |
1476 | IEEE80211_QUEUE_STOP_REASON_FLUSH, |
1477 | IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, |
1478 | IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, |
1479 | + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, |
1480 | |
1481 | IEEE80211_QUEUE_STOP_REASONS, |
1482 | }; |
1483 | diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c |
1484 | index ad03331ee7855..7d43e0085cfc7 100644 |
1485 | --- a/net/mac80211/iface.c |
1486 | +++ b/net/mac80211/iface.c |
1487 | @@ -1577,6 +1577,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, |
1488 | if (ret) |
1489 | return ret; |
1490 | |
1491 | + ieee80211_stop_vif_queues(local, sdata, |
1492 | + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); |
1493 | + synchronize_net(); |
1494 | + |
1495 | ieee80211_do_stop(sdata, false); |
1496 | |
1497 | ieee80211_teardown_sdata(sdata); |
1498 | @@ -1597,6 +1601,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, |
1499 | err = ieee80211_do_open(&sdata->wdev, false); |
1500 | WARN(err, "type change: do_open returned %d", err); |
1501 | |
1502 | + ieee80211_wake_vif_queues(local, sdata, |
1503 | + IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); |
1504 | return ret; |
1505 | } |
1506 | |
1507 | diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c |
1508 | index b9dd4e9604261..81adbfaffe38f 100644 |
1509 | --- a/net/netfilter/nft_dynset.c |
1510 | +++ b/net/netfilter/nft_dynset.c |
1511 | @@ -210,8 +210,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, |
1512 | nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, |
1513 | priv->expr->ops->size); |
1514 | if (set->flags & NFT_SET_TIMEOUT) { |
1515 | - if (timeout || set->timeout) |
1516 | + if (timeout || set->timeout) { |
1517 | + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); |
1518 | nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); |
1519 | + } |
1520 | } |
1521 | |
1522 | priv->timeout = timeout; |
1523 | diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c |
1524 | index 0afae9f73ebb4..f326a6ea35fc7 100644 |
1525 | --- a/net/nfc/netlink.c |
1526 | +++ b/net/nfc/netlink.c |
1527 | @@ -887,6 +887,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) |
1528 | |
1529 | if (!dev->polling) { |
1530 | device_unlock(&dev->dev); |
1531 | + nfc_put_device(dev); |
1532 | return -EINVAL; |
1533 | } |
1534 | |
1535 | diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c |
1536 | index 574af981806fa..92a3cfae4de87 100644 |
1537 | --- a/net/nfc/rawsock.c |
1538 | +++ b/net/nfc/rawsock.c |
1539 | @@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, |
1540 | if (addr->target_idx > dev->target_next_idx - 1 || |
1541 | addr->target_idx < dev->target_next_idx - dev->n_targets) { |
1542 | rc = -EINVAL; |
1543 | - goto error; |
1544 | + goto put_dev; |
1545 | } |
1546 | |
1547 | rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); |
1548 | diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c |
1549 | index 4bf0296a7c433..0e809ae17f381 100644 |
1550 | --- a/net/wireless/wext-core.c |
1551 | +++ b/net/wireless/wext-core.c |
1552 | @@ -898,8 +898,9 @@ out: |
1553 | int call_commit_handler(struct net_device *dev) |
1554 | { |
1555 | #ifdef CONFIG_WIRELESS_EXT |
1556 | - if ((netif_running(dev)) && |
1557 | - (dev->wireless_handlers->standard[0] != NULL)) |
1558 | + if (netif_running(dev) && |
1559 | + dev->wireless_handlers && |
1560 | + dev->wireless_handlers->standard[0]) |
1561 | /* Call the commit handler on the driver */ |
1562 | return dev->wireless_handlers->standard[0](dev, NULL, |
1563 | NULL, NULL); |
1564 | diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c |
1565 | index 1e87639f2c270..d613bf77cc0f9 100644 |
1566 | --- a/net/xfrm/xfrm_input.c |
1567 | +++ b/net/xfrm/xfrm_input.c |
1568 | @@ -315,7 +315,7 @@ resume: |
1569 | /* only the first xfrm gets the encap type */ |
1570 | encap_type = 0; |
1571 | |
1572 | - if (async && x->repl->recheck(x, skb, seq)) { |
1573 | + if (x->repl->recheck(x, skb, seq)) { |
1574 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); |
1575 | goto drop_unlock; |
1576 | } |