Contents of /trunk/kernel-alx/patches-4.4/0151-4.4.52-all-fixes.patch
Parent Directory | Revision Log
Revision 2887 -
(show annotations)
(download)
Mon Mar 27 13:49:24 2017 UTC (7 years, 6 months ago) by niro
File size: 24573 byte(s)
Mon Mar 27 13:49:24 2017 UTC (7 years, 6 months ago) by niro
File size: 24573 byte(s)
linux-4.4.52
1 | diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
2 | index c360f80c3473..ca64ca566099 100644 |
3 | --- a/Documentation/kernel-parameters.txt |
4 | +++ b/Documentation/kernel-parameters.txt |
5 | @@ -1255,6 +1255,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
6 | When zero, profiling data is discarded and associated |
7 | debugfs files are removed at module unload time. |
8 | |
9 | + goldfish [X86] Enable the goldfish android emulator platform. |
10 | + Don't use this when you are not running on the |
11 | + android emulator |
12 | + |
13 | gpt [EFI] Forces disk with valid GPT signature but |
14 | invalid Protective MBR to be treated as GPT. If the |
15 | primary GPT is corrupted, it enables the backup/alternate |
16 | diff --git a/Makefile b/Makefile |
17 | index 117357188f01..671e183bd507 100644 |
18 | --- a/Makefile |
19 | +++ b/Makefile |
20 | @@ -1,6 +1,6 @@ |
21 | VERSION = 4 |
22 | PATCHLEVEL = 4 |
23 | -SUBLEVEL = 51 |
24 | +SUBLEVEL = 52 |
25 | EXTRAVERSION = |
26 | NAME = Blurry Fish Butt |
27 | |
28 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
29 | index bb620df05d0d..64f60a48def1 100644 |
30 | --- a/arch/x86/kvm/vmx.c |
31 | +++ b/arch/x86/kvm/vmx.c |
32 | @@ -4867,6 +4867,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) |
33 | if (vmx_xsaves_supported()) |
34 | vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); |
35 | |
36 | + if (enable_pml) { |
37 | + ASSERT(vmx->pml_pg); |
38 | + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); |
39 | + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
40 | + } |
41 | + |
42 | return 0; |
43 | } |
44 | |
45 | @@ -7839,22 +7845,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) |
46 | *info2 = vmcs_read32(VM_EXIT_INTR_INFO); |
47 | } |
48 | |
49 | -static int vmx_create_pml_buffer(struct vcpu_vmx *vmx) |
50 | -{ |
51 | - struct page *pml_pg; |
52 | - |
53 | - pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); |
54 | - if (!pml_pg) |
55 | - return -ENOMEM; |
56 | - |
57 | - vmx->pml_pg = pml_pg; |
58 | - |
59 | - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); |
60 | - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
61 | - |
62 | - return 0; |
63 | -} |
64 | - |
65 | static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) |
66 | { |
67 | if (vmx->pml_pg) { |
68 | @@ -8789,14 +8779,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) |
69 | if (err) |
70 | goto free_vcpu; |
71 | |
72 | + err = -ENOMEM; |
73 | + |
74 | + /* |
75 | + * If PML is turned on, failure on enabling PML just results in failure |
76 | + * of creating the vcpu, therefore we can simplify PML logic (by |
77 | + * avoiding dealing with cases, such as enabling PML partially on vcpus |
78 | + * for the guest, etc. |
79 | + */ |
80 | + if (enable_pml) { |
81 | + vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); |
82 | + if (!vmx->pml_pg) |
83 | + goto uninit_vcpu; |
84 | + } |
85 | + |
86 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
87 | BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) |
88 | > PAGE_SIZE); |
89 | |
90 | - err = -ENOMEM; |
91 | - if (!vmx->guest_msrs) { |
92 | - goto uninit_vcpu; |
93 | - } |
94 | + if (!vmx->guest_msrs) |
95 | + goto free_pml; |
96 | |
97 | vmx->loaded_vmcs = &vmx->vmcs01; |
98 | vmx->loaded_vmcs->vmcs = alloc_vmcs(); |
99 | @@ -8840,18 +8842,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) |
100 | vmx->nested.current_vmptr = -1ull; |
101 | vmx->nested.current_vmcs12 = NULL; |
102 | |
103 | - /* |
104 | - * If PML is turned on, failure on enabling PML just results in failure |
105 | - * of creating the vcpu, therefore we can simplify PML logic (by |
106 | - * avoiding dealing with cases, such as enabling PML partially on vcpus |
107 | - * for the guest, etc. |
108 | - */ |
109 | - if (enable_pml) { |
110 | - err = vmx_create_pml_buffer(vmx); |
111 | - if (err) |
112 | - goto free_vmcs; |
113 | - } |
114 | - |
115 | return &vmx->vcpu; |
116 | |
117 | free_vmcs: |
118 | @@ -8859,6 +8849,8 @@ free_vmcs: |
119 | free_loaded_vmcs(vmx->loaded_vmcs); |
120 | free_msrs: |
121 | kfree(vmx->guest_msrs); |
122 | +free_pml: |
123 | + vmx_destroy_pml_buffer(vmx); |
124 | uninit_vcpu: |
125 | kvm_vcpu_uninit(&vmx->vcpu); |
126 | free_vcpu: |
127 | diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c |
128 | index 1693107a518e..0d17c0aafeb1 100644 |
129 | --- a/arch/x86/platform/goldfish/goldfish.c |
130 | +++ b/arch/x86/platform/goldfish/goldfish.c |
131 | @@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = { |
132 | } |
133 | }; |
134 | |
135 | +static bool goldfish_enable __initdata; |
136 | + |
137 | +static int __init goldfish_setup(char *str) |
138 | +{ |
139 | + goldfish_enable = true; |
140 | + return 0; |
141 | +} |
142 | +__setup("goldfish", goldfish_setup); |
143 | + |
144 | static int __init goldfish_init(void) |
145 | { |
146 | + if (!goldfish_enable) |
147 | + return -ENODEV; |
148 | + |
149 | platform_device_register_simple("goldfish_pdev_bus", -1, |
150 | - goldfish_pdev_bus_resources, 2); |
151 | + goldfish_pdev_bus_resources, 2); |
152 | return 0; |
153 | } |
154 | device_initcall(goldfish_init); |
155 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
156 | index 6cfc6b200366..d8d63c38bf29 100644 |
157 | --- a/block/blk-mq.c |
158 | +++ b/block/blk-mq.c |
159 | @@ -1259,12 +1259,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
160 | |
161 | blk_queue_split(q, &bio, q->bio_split); |
162 | |
163 | - if (!is_flush_fua && !blk_queue_nomerges(q)) { |
164 | - if (blk_attempt_plug_merge(q, bio, &request_count, |
165 | - &same_queue_rq)) |
166 | - return BLK_QC_T_NONE; |
167 | - } else |
168 | - request_count = blk_plug_queued_count(q); |
169 | + if (!is_flush_fua && !blk_queue_nomerges(q) && |
170 | + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) |
171 | + return BLK_QC_T_NONE; |
172 | |
173 | rq = blk_mq_map_request(q, bio, &data); |
174 | if (unlikely(!rq)) |
175 | @@ -1355,9 +1352,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) |
176 | |
177 | blk_queue_split(q, &bio, q->bio_split); |
178 | |
179 | - if (!is_flush_fua && !blk_queue_nomerges(q) && |
180 | - blk_attempt_plug_merge(q, bio, &request_count, NULL)) |
181 | - return BLK_QC_T_NONE; |
182 | + if (!is_flush_fua && !blk_queue_nomerges(q)) { |
183 | + if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) |
184 | + return BLK_QC_T_NONE; |
185 | + } else |
186 | + request_count = blk_plug_queued_count(q); |
187 | |
188 | rq = blk_mq_map_request(q, bio, &data); |
189 | if (unlikely(!rq)) |
190 | diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c |
191 | index aac1ed3f7bb4..ad8390d2997b 100644 |
192 | --- a/drivers/net/wireless/realtek/rtlwifi/usb.c |
193 | +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c |
194 | @@ -834,12 +834,30 @@ static void rtl_usb_stop(struct ieee80211_hw *hw) |
195 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
196 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
197 | struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); |
198 | + struct urb *urb; |
199 | |
200 | /* should after adapter start and interrupt enable. */ |
201 | set_hal_stop(rtlhal); |
202 | cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); |
203 | /* Enable software */ |
204 | SET_USB_STOP(rtlusb); |
205 | + |
206 | + /* free pre-allocated URBs from rtl_usb_start() */ |
207 | + usb_kill_anchored_urbs(&rtlusb->rx_submitted); |
208 | + |
209 | + tasklet_kill(&rtlusb->rx_work_tasklet); |
210 | + cancel_work_sync(&rtlpriv->works.lps_change_work); |
211 | + |
212 | + flush_workqueue(rtlpriv->works.rtl_wq); |
213 | + |
214 | + skb_queue_purge(&rtlusb->rx_queue); |
215 | + |
216 | + while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { |
217 | + usb_free_coherent(urb->dev, urb->transfer_buffer_length, |
218 | + urb->transfer_buffer, urb->transfer_dma); |
219 | + usb_free_urb(urb); |
220 | + } |
221 | + |
222 | rtlpriv->cfg->ops->hw_disable(hw); |
223 | } |
224 | |
225 | @@ -1073,6 +1091,7 @@ int rtl_usb_probe(struct usb_interface *intf, |
226 | return -ENOMEM; |
227 | } |
228 | rtlpriv = hw->priv; |
229 | + rtlpriv->hw = hw; |
230 | rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32), |
231 | GFP_KERNEL); |
232 | if (!rtlpriv->usb_data) |
233 | diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c |
234 | index 1f52462f4cdd..dd9ea463c2a4 100644 |
235 | --- a/drivers/platform/goldfish/pdev_bus.c |
236 | +++ b/drivers/platform/goldfish/pdev_bus.c |
237 | @@ -157,23 +157,26 @@ static int goldfish_new_pdev(void) |
238 | static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id) |
239 | { |
240 | irqreturn_t ret = IRQ_NONE; |
241 | + |
242 | while (1) { |
243 | u32 op = readl(pdev_bus_base + PDEV_BUS_OP); |
244 | - switch (op) { |
245 | - case PDEV_BUS_OP_DONE: |
246 | - return IRQ_NONE; |
247 | |
248 | + switch (op) { |
249 | case PDEV_BUS_OP_REMOVE_DEV: |
250 | goldfish_pdev_remove(); |
251 | + ret = IRQ_HANDLED; |
252 | break; |
253 | |
254 | case PDEV_BUS_OP_ADD_DEV: |
255 | goldfish_new_pdev(); |
256 | + ret = IRQ_HANDLED; |
257 | break; |
258 | + |
259 | + case PDEV_BUS_OP_DONE: |
260 | + default: |
261 | + return ret; |
262 | } |
263 | - ret = IRQ_HANDLED; |
264 | } |
265 | - return ret; |
266 | } |
267 | |
268 | static int goldfish_pdev_bus_probe(struct platform_device *pdev) |
269 | diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c |
270 | index 5836751b8203..9bb934ed2a7a 100644 |
271 | --- a/drivers/rtc/interface.c |
272 | +++ b/drivers/rtc/interface.c |
273 | @@ -748,9 +748,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq); |
274 | */ |
275 | static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) |
276 | { |
277 | + struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); |
278 | + struct rtc_time tm; |
279 | + ktime_t now; |
280 | + |
281 | timer->enabled = 1; |
282 | + __rtc_read_time(rtc, &tm); |
283 | + now = rtc_tm_to_ktime(tm); |
284 | + |
285 | + /* Skip over expired timers */ |
286 | + while (next) { |
287 | + if (next->expires.tv64 >= now.tv64) |
288 | + break; |
289 | + next = timerqueue_iterate_next(next); |
290 | + } |
291 | + |
292 | timerqueue_add(&rtc->timerqueue, &timer->node); |
293 | - if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { |
294 | + if (!next) { |
295 | struct rtc_wkalrm alarm; |
296 | int err; |
297 | alarm.time = rtc_ktime_to_tm(timer->node.expires); |
298 | diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c |
299 | index e1de4944e0ce..8c4707d5778e 100644 |
300 | --- a/drivers/tty/serial/msm_serial.c |
301 | +++ b/drivers/tty/serial/msm_serial.c |
302 | @@ -1615,6 +1615,7 @@ static const struct of_device_id msm_match_table[] = { |
303 | { .compatible = "qcom,msm-uartdm" }, |
304 | {} |
305 | }; |
306 | +MODULE_DEVICE_TABLE(of, msm_match_table); |
307 | |
308 | static struct platform_driver msm_platform_driver = { |
309 | .remove = msm_serial_remove, |
310 | diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c |
311 | index 5a048b7b92e8..2949289bb3c5 100644 |
312 | --- a/drivers/usb/chipidea/ci_hdrc_imx.c |
313 | +++ b/drivers/usb/chipidea/ci_hdrc_imx.c |
314 | @@ -244,7 +244,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) |
315 | struct ci_hdrc_platform_data pdata = { |
316 | .name = dev_name(&pdev->dev), |
317 | .capoffset = DEF_CAPOFFSET, |
318 | - .flags = CI_HDRC_SET_NON_ZERO_TTHA, |
319 | }; |
320 | int ret; |
321 | const struct of_device_id *of_id; |
322 | diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c |
323 | index 1532cde8a437..7812052dc700 100644 |
324 | --- a/drivers/usb/serial/ark3116.c |
325 | +++ b/drivers/usb/serial/ark3116.c |
326 | @@ -99,10 +99,17 @@ static int ark3116_read_reg(struct usb_serial *serial, |
327 | usb_rcvctrlpipe(serial->dev, 0), |
328 | 0xfe, 0xc0, 0, reg, |
329 | buf, 1, ARK_TIMEOUT); |
330 | - if (result < 0) |
331 | + if (result < 1) { |
332 | + dev_err(&serial->interface->dev, |
333 | + "failed to read register %u: %d\n", |
334 | + reg, result); |
335 | + if (result >= 0) |
336 | + result = -EIO; |
337 | + |
338 | return result; |
339 | - else |
340 | - return buf[0]; |
341 | + } |
342 | + |
343 | + return buf[0]; |
344 | } |
345 | |
346 | static inline int calc_divisor(int bps) |
347 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
348 | index fe7452f0f38a..33cec50978b8 100644 |
349 | --- a/drivers/usb/serial/cp210x.c |
350 | +++ b/drivers/usb/serial/cp210x.c |
351 | @@ -171,6 +171,8 @@ static const struct usb_device_id id_table[] = { |
352 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
353 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
354 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
355 | + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ |
356 | + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ |
357 | { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ |
358 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
359 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
360 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
361 | index d3d6ec455151..19a98116c2ab 100644 |
362 | --- a/drivers/usb/serial/ftdi_sio.c |
363 | +++ b/drivers/usb/serial/ftdi_sio.c |
364 | @@ -1807,8 +1807,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) |
365 | |
366 | mutex_init(&priv->cfg_lock); |
367 | |
368 | - priv->flags = ASYNC_LOW_LATENCY; |
369 | - |
370 | if (quirk && quirk->port_probe) |
371 | quirk->port_probe(priv); |
372 | |
373 | @@ -2072,6 +2070,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, |
374 | priv->prev_status = status; |
375 | } |
376 | |
377 | + /* save if the transmitter is empty or not */ |
378 | + if (packet[1] & FTDI_RS_TEMT) |
379 | + priv->transmit_empty = 1; |
380 | + else |
381 | + priv->transmit_empty = 0; |
382 | + |
383 | + len -= 2; |
384 | + if (!len) |
385 | + return 0; /* status only */ |
386 | + |
387 | + /* |
388 | + * Break and error status must only be processed for packets with |
389 | + * data payload to avoid over-reporting. |
390 | + */ |
391 | flag = TTY_NORMAL; |
392 | if (packet[1] & FTDI_RS_ERR_MASK) { |
393 | /* Break takes precedence over parity, which takes precedence |
394 | @@ -2094,15 +2106,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, |
395 | } |
396 | } |
397 | |
398 | - /* save if the transmitter is empty or not */ |
399 | - if (packet[1] & FTDI_RS_TEMT) |
400 | - priv->transmit_empty = 1; |
401 | - else |
402 | - priv->transmit_empty = 0; |
403 | - |
404 | - len -= 2; |
405 | - if (!len) |
406 | - return 0; /* status only */ |
407 | port->icount.rx += len; |
408 | ch = packet + 2; |
409 | |
410 | @@ -2433,8 +2436,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port, |
411 | FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, |
412 | 0, priv->interface, |
413 | buf, len, WDR_TIMEOUT); |
414 | - if (ret < 0) { |
415 | + |
416 | + /* NOTE: We allow short responses and handle that below. */ |
417 | + if (ret < 1) { |
418 | dev_err(&port->dev, "failed to get modem status: %d\n", ret); |
419 | + if (ret >= 0) |
420 | + ret = -EIO; |
421 | ret = usb_translate_errors(ret); |
422 | goto out; |
423 | } |
424 | diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c |
425 | index 97ea52b5cfd4..d17685cc00c9 100644 |
426 | --- a/drivers/usb/serial/mos7840.c |
427 | +++ b/drivers/usb/serial/mos7840.c |
428 | @@ -1024,6 +1024,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) |
429 | * (can't set it up in mos7840_startup as the structures * |
430 | * were not set up at that time.) */ |
431 | if (port0->open_ports == 1) { |
432 | + /* FIXME: Buffer never NULL, so URB is not submitted. */ |
433 | if (serial->port[0]->interrupt_in_buffer == NULL) { |
434 | /* set up interrupt urb */ |
435 | usb_fill_int_urb(serial->port[0]->interrupt_in_urb, |
436 | @@ -2119,7 +2120,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) |
437 | static int mos7840_attach(struct usb_serial *serial) |
438 | { |
439 | if (serial->num_bulk_in < serial->num_ports || |
440 | - serial->num_bulk_out < serial->num_ports) { |
441 | + serial->num_bulk_out < serial->num_ports || |
442 | + serial->num_interrupt_in < 1) { |
443 | dev_err(&serial->interface->dev, "missing endpoints\n"); |
444 | return -ENODEV; |
445 | } |
446 | diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c |
447 | index 4b7bfb394a32..64bf258e7e00 100644 |
448 | --- a/drivers/usb/serial/opticon.c |
449 | +++ b/drivers/usb/serial/opticon.c |
450 | @@ -142,7 +142,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port) |
451 | usb_clear_halt(port->serial->dev, port->read_urb->pipe); |
452 | |
453 | res = usb_serial_generic_open(tty, port); |
454 | - if (!res) |
455 | + if (res) |
456 | return res; |
457 | |
458 | /* Request CTS line state, sometimes during opening the current |
459 | diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c |
460 | index 475e6c31b266..ddfd787c461c 100644 |
461 | --- a/drivers/usb/serial/spcp8x5.c |
462 | +++ b/drivers/usb/serial/spcp8x5.c |
463 | @@ -232,11 +232,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status) |
464 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), |
465 | GET_UART_STATUS, GET_UART_STATUS_TYPE, |
466 | 0, GET_UART_STATUS_MSR, buf, 1, 100); |
467 | - if (ret < 0) |
468 | + if (ret < 1) { |
469 | dev_err(&port->dev, "failed to get modem status: %d\n", ret); |
470 | + if (ret >= 0) |
471 | + ret = -EIO; |
472 | + goto out; |
473 | + } |
474 | |
475 | dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf); |
476 | *status = *buf; |
477 | + ret = 0; |
478 | +out: |
479 | kfree(buf); |
480 | |
481 | return ret; |
482 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
483 | index 9ef80bf441b3..a988d4ef39da 100644 |
484 | --- a/mm/backing-dev.c |
485 | +++ b/mm/backing-dev.c |
486 | @@ -757,15 +757,20 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) |
487 | if (!bdi->wb_congested) |
488 | return -ENOMEM; |
489 | |
490 | + atomic_set(&bdi->wb_congested->refcnt, 1); |
491 | + |
492 | err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); |
493 | if (err) { |
494 | - kfree(bdi->wb_congested); |
495 | + wb_congested_put(bdi->wb_congested); |
496 | return err; |
497 | } |
498 | return 0; |
499 | } |
500 | |
501 | -static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } |
502 | +static void cgwb_bdi_destroy(struct backing_dev_info *bdi) |
503 | +{ |
504 | + wb_congested_put(bdi->wb_congested); |
505 | +} |
506 | |
507 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
508 | |
509 | diff --git a/net/dccp/input.c b/net/dccp/input.c |
510 | index 3bd14e885396..dbe2573f6ba1 100644 |
511 | --- a/net/dccp/input.c |
512 | +++ b/net/dccp/input.c |
513 | @@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
514 | if (inet_csk(sk)->icsk_af_ops->conn_request(sk, |
515 | skb) < 0) |
516 | return 1; |
517 | - goto discard; |
518 | + consume_skb(skb); |
519 | + return 0; |
520 | } |
521 | if (dh->dccph_type == DCCP_PKT_RESET) |
522 | goto discard; |
523 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
524 | index bc14c5bb124b..f300d1cbfa91 100644 |
525 | --- a/net/ipv4/ip_sockglue.c |
526 | +++ b/net/ipv4/ip_sockglue.c |
527 | @@ -105,10 +105,10 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, |
528 | if (skb->ip_summed != CHECKSUM_COMPLETE) |
529 | return; |
530 | |
531 | - if (offset != 0) |
532 | - csum = csum_sub(csum, |
533 | - csum_partial(skb->data + tlen, |
534 | - offset, 0)); |
535 | + if (offset != 0) { |
536 | + int tend_off = skb_transport_offset(skb) + tlen; |
537 | + csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); |
538 | + } |
539 | |
540 | put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); |
541 | } |
542 | diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c |
543 | index acbe61c7e683..160dc89335e2 100644 |
544 | --- a/net/irda/irqueue.c |
545 | +++ b/net/irda/irqueue.c |
546 | @@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new); |
547 | * for deallocating this structure if it's complex. If not the user can |
548 | * just supply kfree, which should take care of the job. |
549 | */ |
550 | -#ifdef CONFIG_LOCKDEP |
551 | -static int hashbin_lock_depth = 0; |
552 | -#endif |
553 | int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) |
554 | { |
555 | irda_queue_t* queue; |
556 | @@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) |
557 | IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); |
558 | |
559 | /* Synchronize */ |
560 | - if ( hashbin->hb_type & HB_LOCK ) { |
561 | - spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, |
562 | - hashbin_lock_depth++); |
563 | - } |
564 | + if (hashbin->hb_type & HB_LOCK) |
565 | + spin_lock_irqsave(&hashbin->hb_spinlock, flags); |
566 | |
567 | /* |
568 | * Free the entries in the hashbin, TODO: use hashbin_clear when |
569 | * it has been shown to work |
570 | */ |
571 | for (i = 0; i < HASHBIN_SIZE; i ++ ) { |
572 | - queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); |
573 | - while (queue ) { |
574 | - if (free_func) |
575 | - (*free_func)(queue); |
576 | - queue = dequeue_first( |
577 | - (irda_queue_t**) &hashbin->hb_queue[i]); |
578 | + while (1) { |
579 | + queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); |
580 | + |
581 | + if (!queue) |
582 | + break; |
583 | + |
584 | + if (free_func) { |
585 | + if (hashbin->hb_type & HB_LOCK) |
586 | + spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); |
587 | + free_func(queue); |
588 | + if (hashbin->hb_type & HB_LOCK) |
589 | + spin_lock_irqsave(&hashbin->hb_spinlock, flags); |
590 | + } |
591 | } |
592 | } |
593 | |
594 | @@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) |
595 | hashbin->magic = ~HB_MAGIC; |
596 | |
597 | /* Release lock */ |
598 | - if ( hashbin->hb_type & HB_LOCK) { |
599 | + if (hashbin->hb_type & HB_LOCK) |
600 | spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); |
601 | -#ifdef CONFIG_LOCKDEP |
602 | - hashbin_lock_depth--; |
603 | -#endif |
604 | - } |
605 | |
606 | /* |
607 | * Free the hashbin structure |
608 | diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c |
609 | index 3e821daf9dd4..8bc5a1bd2d45 100644 |
610 | --- a/net/llc/llc_conn.c |
611 | +++ b/net/llc/llc_conn.c |
612 | @@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) |
613 | * another trick required to cope with how the PROCOM state |
614 | * machine works. -acme |
615 | */ |
616 | + skb_orphan(skb); |
617 | + sock_hold(sk); |
618 | skb->sk = sk; |
619 | + skb->destructor = sock_efree; |
620 | } |
621 | if (!sock_owned_by_user(sk)) |
622 | llc_conn_rcv(sk, skb); |
623 | diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c |
624 | index d0e1e804ebd7..5404d0d195cc 100644 |
625 | --- a/net/llc/llc_sap.c |
626 | +++ b/net/llc/llc_sap.c |
627 | @@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb, |
628 | |
629 | ev->type = LLC_SAP_EV_TYPE_PDU; |
630 | ev->reason = 0; |
631 | + skb_orphan(skb); |
632 | + sock_hold(sk); |
633 | skb->sk = sk; |
634 | + skb->destructor = sock_efree; |
635 | llc_sap_state_process(sap, skb); |
636 | } |
637 | |
638 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
639 | index f2d28ed74a0a..d805cd577a60 100644 |
640 | --- a/net/packet/af_packet.c |
641 | +++ b/net/packet/af_packet.c |
642 | @@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po) |
643 | f->arr[f->num_members] = sk; |
644 | smp_wmb(); |
645 | f->num_members++; |
646 | + if (f->num_members == 1) |
647 | + dev_add_pack(&f->prot_hook); |
648 | spin_unlock(&f->lock); |
649 | } |
650 | |
651 | @@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) |
652 | BUG_ON(i >= f->num_members); |
653 | f->arr[i] = f->arr[f->num_members - 1]; |
654 | f->num_members--; |
655 | + if (f->num_members == 0) |
656 | + __dev_remove_pack(&f->prot_hook); |
657 | spin_unlock(&f->lock); |
658 | } |
659 | |
660 | @@ -1623,6 +1627,7 @@ static void fanout_release_data(struct packet_fanout *f) |
661 | |
662 | static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
663 | { |
664 | + struct packet_rollover *rollover = NULL; |
665 | struct packet_sock *po = pkt_sk(sk); |
666 | struct packet_fanout *f, *match; |
667 | u8 type = type_flags & 0xff; |
668 | @@ -1645,23 +1650,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
669 | return -EINVAL; |
670 | } |
671 | |
672 | + mutex_lock(&fanout_mutex); |
673 | + |
674 | + err = -EINVAL; |
675 | if (!po->running) |
676 | - return -EINVAL; |
677 | + goto out; |
678 | |
679 | + err = -EALREADY; |
680 | if (po->fanout) |
681 | - return -EALREADY; |
682 | + goto out; |
683 | |
684 | if (type == PACKET_FANOUT_ROLLOVER || |
685 | (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { |
686 | - po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); |
687 | - if (!po->rollover) |
688 | - return -ENOMEM; |
689 | - atomic_long_set(&po->rollover->num, 0); |
690 | - atomic_long_set(&po->rollover->num_huge, 0); |
691 | - atomic_long_set(&po->rollover->num_failed, 0); |
692 | + err = -ENOMEM; |
693 | + rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); |
694 | + if (!rollover) |
695 | + goto out; |
696 | + atomic_long_set(&rollover->num, 0); |
697 | + atomic_long_set(&rollover->num_huge, 0); |
698 | + atomic_long_set(&rollover->num_failed, 0); |
699 | + po->rollover = rollover; |
700 | } |
701 | |
702 | - mutex_lock(&fanout_mutex); |
703 | match = NULL; |
704 | list_for_each_entry(f, &fanout_list, list) { |
705 | if (f->id == id && |
706 | @@ -1691,7 +1701,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
707 | match->prot_hook.func = packet_rcv_fanout; |
708 | match->prot_hook.af_packet_priv = match; |
709 | match->prot_hook.id_match = match_fanout_group; |
710 | - dev_add_pack(&match->prot_hook); |
711 | list_add(&match->list, &fanout_list); |
712 | } |
713 | err = -EINVAL; |
714 | @@ -1708,36 +1717,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) |
715 | } |
716 | } |
717 | out: |
718 | - mutex_unlock(&fanout_mutex); |
719 | - if (err) { |
720 | - kfree(po->rollover); |
721 | + if (err && rollover) { |
722 | + kfree(rollover); |
723 | po->rollover = NULL; |
724 | } |
725 | + mutex_unlock(&fanout_mutex); |
726 | return err; |
727 | } |
728 | |
729 | -static void fanout_release(struct sock *sk) |
730 | +/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes |
731 | + * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. |
732 | + * It is the responsibility of the caller to call fanout_release_data() and |
733 | + * free the returned packet_fanout (after synchronize_net()) |
734 | + */ |
735 | +static struct packet_fanout *fanout_release(struct sock *sk) |
736 | { |
737 | struct packet_sock *po = pkt_sk(sk); |
738 | struct packet_fanout *f; |
739 | |
740 | + mutex_lock(&fanout_mutex); |
741 | f = po->fanout; |
742 | - if (!f) |
743 | - return; |
744 | + if (f) { |
745 | + po->fanout = NULL; |
746 | |
747 | - mutex_lock(&fanout_mutex); |
748 | - po->fanout = NULL; |
749 | + if (atomic_dec_and_test(&f->sk_ref)) |
750 | + list_del(&f->list); |
751 | + else |
752 | + f = NULL; |
753 | |
754 | - if (atomic_dec_and_test(&f->sk_ref)) { |
755 | - list_del(&f->list); |
756 | - dev_remove_pack(&f->prot_hook); |
757 | - fanout_release_data(f); |
758 | - kfree(f); |
759 | + if (po->rollover) |
760 | + kfree_rcu(po->rollover, rcu); |
761 | } |
762 | mutex_unlock(&fanout_mutex); |
763 | |
764 | - if (po->rollover) |
765 | - kfree_rcu(po->rollover, rcu); |
766 | + return f; |
767 | } |
768 | |
769 | static bool packet_extra_vlan_len_allowed(const struct net_device *dev, |
770 | @@ -2846,6 +2859,7 @@ static int packet_release(struct socket *sock) |
771 | { |
772 | struct sock *sk = sock->sk; |
773 | struct packet_sock *po; |
774 | + struct packet_fanout *f; |
775 | struct net *net; |
776 | union tpacket_req_u req_u; |
777 | |
778 | @@ -2885,9 +2899,14 @@ static int packet_release(struct socket *sock) |
779 | packet_set_ring(sk, &req_u, 1, 1); |
780 | } |
781 | |
782 | - fanout_release(sk); |
783 | + f = fanout_release(sk); |
784 | |
785 | synchronize_net(); |
786 | + |
787 | + if (f) { |
788 | + fanout_release_data(f); |
789 | + kfree(f); |
790 | + } |
791 | /* |
792 | * Now the socket is dead. No more input will appear. |
793 | */ |
794 | @@ -3861,7 +3880,6 @@ static int packet_notifier(struct notifier_block *this, |
795 | } |
796 | if (msg == NETDEV_UNREGISTER) { |
797 | packet_cached_dev_reset(po); |
798 | - fanout_release(sk); |
799 | po->ifindex = -1; |
800 | if (po->prot_hook.dev) |
801 | dev_put(po->prot_hook.dev); |
802 | diff --git a/net/socket.c b/net/socket.c |
803 | index 0090225eeb1e..fbfa9d2492cf 100644 |
804 | --- a/net/socket.c |
805 | +++ b/net/socket.c |
806 | @@ -2185,8 +2185,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, |
807 | return err; |
808 | |
809 | err = sock_error(sock->sk); |
810 | - if (err) |
811 | + if (err) { |
812 | + datagrams = err; |
813 | goto out_put; |
814 | + } |
815 | |
816 | entry = mmsg; |
817 | compat_entry = (struct compat_mmsghdr __user *)mmsg; |