Contents of /trunk/kernel26-alx/patches-3.10/0149-3.10.50-all-fixes.patch
Parent Directory | Revision Log
Revision 2672 -
(show annotations)
(download)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 54055 byte(s)
Tue Jul 21 16:46:35 2015 UTC (9 years, 2 months ago) by niro
File size: 54055 byte(s)
-3.10.84-alx-r1
1 | diff --git a/Makefile b/Makefile |
2 | index b8b8d33eab55..8d891c66803c 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 49 |
9 | +SUBLEVEL = 50 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h |
14 | index 30333cec0fef..ef9d79a3db25 100644 |
15 | --- a/arch/arc/include/uapi/asm/ptrace.h |
16 | +++ b/arch/arc/include/uapi/asm/ptrace.h |
17 | @@ -11,6 +11,7 @@ |
18 | #ifndef _UAPI__ASM_ARC_PTRACE_H |
19 | #define _UAPI__ASM_ARC_PTRACE_H |
20 | |
21 | +#define PTRACE_GET_THREAD_AREA 25 |
22 | |
23 | #ifndef __ASSEMBLY__ |
24 | /* |
25 | diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c |
26 | index 0851604bb9cd..f8a36ed9e0d5 100644 |
27 | --- a/arch/arc/kernel/ptrace.c |
28 | +++ b/arch/arc/kernel/ptrace.c |
29 | @@ -136,6 +136,10 @@ long arch_ptrace(struct task_struct *child, long request, |
30 | pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data); |
31 | |
32 | switch (request) { |
33 | + case PTRACE_GET_THREAD_AREA: |
34 | + ret = put_user(task_thread_info(child)->thr_ptr, |
35 | + (unsigned long __user *)data); |
36 | + break; |
37 | default: |
38 | ret = ptrace_request(child, request, addr, data); |
39 | break; |
40 | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig |
41 | index 18a9f5ef643a..d41951246cd6 100644 |
42 | --- a/arch/arm/Kconfig |
43 | +++ b/arch/arm/Kconfig |
44 | @@ -4,6 +4,7 @@ config ARM |
45 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE |
46 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
47 | select ARCH_HAVE_CUSTOM_GPIO_H |
48 | + select ARCH_SUPPORTS_ATOMIC_RMW |
49 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
50 | select ARCH_WANT_IPC_PARSE_VERSION |
51 | select BUILDTIME_EXTABLE_SORT if MMU |
52 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
53 | index 56b3f6d447ae..0677ff4814fa 100644 |
54 | --- a/arch/arm64/Kconfig |
55 | +++ b/arch/arm64/Kconfig |
56 | @@ -1,6 +1,7 @@ |
57 | config ARM64 |
58 | def_bool y |
59 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
60 | + select ARCH_SUPPORTS_ATOMIC_RMW |
61 | select ARCH_WANT_OPTIONAL_GPIOLIB |
62 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
63 | select ARCH_WANT_FRAME_POINTERS |
64 | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
65 | index fe404e77246e..7f656f119ea6 100644 |
66 | --- a/arch/powerpc/Kconfig |
67 | +++ b/arch/powerpc/Kconfig |
68 | @@ -138,6 +138,7 @@ config PPC |
69 | select ARCH_USE_BUILTIN_BSWAP |
70 | select OLD_SIGSUSPEND |
71 | select OLD_SIGACTION if PPC32 |
72 | + select ARCH_SUPPORTS_ATOMIC_RMW |
73 | |
74 | config EARLY_PRINTK |
75 | bool |
76 | diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig |
77 | index 2668b3142fa2..03a1bc3c3dde 100644 |
78 | --- a/arch/sparc/Kconfig |
79 | +++ b/arch/sparc/Kconfig |
80 | @@ -77,6 +77,7 @@ config SPARC64 |
81 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
82 | select HAVE_C_RECORDMCOUNT |
83 | select NO_BOOTMEM |
84 | + select ARCH_SUPPORTS_ATOMIC_RMW |
85 | |
86 | config ARCH_DEFCONFIG |
87 | string |
88 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
89 | index fe120da25625..af88b27ce313 100644 |
90 | --- a/arch/x86/Kconfig |
91 | +++ b/arch/x86/Kconfig |
92 | @@ -121,6 +121,7 @@ config X86 |
93 | select OLD_SIGACTION if X86_32 |
94 | select COMPAT_OLD_SIGACTION if IA32_EMULATION |
95 | select RTC_LIB |
96 | + select ARCH_SUPPORTS_ATOMIC_RMW |
97 | |
98 | config INSTRUCTION_DECODER |
99 | def_bool y |
100 | diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c |
101 | index a9e22073bd56..b45ac6affa9c 100644 |
102 | --- a/arch/x86/kernel/cpu/perf_event_intel.c |
103 | +++ b/arch/x86/kernel/cpu/perf_event_intel.c |
104 | @@ -1199,6 +1199,15 @@ again: |
105 | intel_pmu_lbr_read(); |
106 | |
107 | /* |
108 | + * CondChgd bit 63 doesn't mean any overflow status. Ignore |
109 | + * and clear the bit. |
110 | + */ |
111 | + if (__test_and_clear_bit(63, (unsigned long *)&status)) { |
112 | + if (!status) |
113 | + goto done; |
114 | + } |
115 | + |
116 | + /* |
117 | * PEBS overflow sets bit 62 in the global status register |
118 | */ |
119 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
120 | diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c |
121 | index b6154d5a07a5..db0be2fb05fe 100644 |
122 | --- a/drivers/bluetooth/hci_h5.c |
123 | +++ b/drivers/bluetooth/hci_h5.c |
124 | @@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) |
125 | H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { |
126 | BT_ERR("Non-link packet received in non-active state"); |
127 | h5_reset_rx(h5); |
128 | + return 0; |
129 | } |
130 | |
131 | h5->rx_func = h5_rx_payload; |
132 | diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c |
133 | index 21393dc4700a..f4b6b89b98f3 100644 |
134 | --- a/drivers/gpu/drm/qxl/qxl_irq.c |
135 | +++ b/drivers/gpu/drm/qxl/qxl_irq.c |
136 | @@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS) |
137 | |
138 | pending = xchg(&qdev->ram_header->int_pending, 0); |
139 | |
140 | + if (!pending) |
141 | + return IRQ_NONE; |
142 | + |
143 | atomic_inc(&qdev->irq_received); |
144 | |
145 | if (pending & QXL_INTERRUPT_DISPLAY) { |
146 | diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c |
147 | index 5802d7486354..1b564d7e4191 100644 |
148 | --- a/drivers/gpu/drm/radeon/atombios_encoders.c |
149 | +++ b/drivers/gpu/drm/radeon/atombios_encoders.c |
150 | @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, |
151 | struct backlight_properties props; |
152 | struct radeon_backlight_privdata *pdata; |
153 | struct radeon_encoder_atom_dig *dig; |
154 | - u8 backlight_level; |
155 | char bl_name[16]; |
156 | |
157 | /* Mac laptops with multiple GPUs use the gmux driver for backlight |
158 | @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, |
159 | |
160 | pdata->encoder = radeon_encoder; |
161 | |
162 | - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); |
163 | - |
164 | dig = radeon_encoder->enc_priv; |
165 | dig->bl_dev = bd; |
166 | |
167 | bd->props.brightness = radeon_atom_backlight_get_brightness(bd); |
168 | + /* Set a reasonable default here if the level is 0 otherwise |
169 | + * fbdev will attempt to turn the backlight on after console |
170 | + * unblanking and it will try and restore 0 which turns the backlight |
171 | + * off again. |
172 | + */ |
173 | + if (bd->props.brightness == 0) |
174 | + bd->props.brightness = RADEON_MAX_BL_LEVEL; |
175 | bd->props.power = FB_BLANK_UNBLANK; |
176 | backlight_update_status(bd); |
177 | |
178 | diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c |
179 | index 06ccfe477650..a84de32a91f5 100644 |
180 | --- a/drivers/gpu/drm/radeon/radeon_display.c |
181 | +++ b/drivers/gpu/drm/radeon/radeon_display.c |
182 | @@ -688,6 +688,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) |
183 | struct radeon_device *rdev = dev->dev_private; |
184 | int ret = 0; |
185 | |
186 | + /* don't leak the edid if we already fetched it in detect() */ |
187 | + if (radeon_connector->edid) |
188 | + goto got_edid; |
189 | + |
190 | /* on hw with routers, select right port */ |
191 | if (radeon_connector->router.ddc_valid) |
192 | radeon_router_select_ddc_port(radeon_connector); |
193 | @@ -727,6 +731,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) |
194 | radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); |
195 | } |
196 | if (radeon_connector->edid) { |
197 | +got_edid: |
198 | drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); |
199 | ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); |
200 | drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); |
201 | diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c |
202 | index ed50e9e83c61..0e8c1ea4dd53 100644 |
203 | --- a/drivers/hv/hv_kvp.c |
204 | +++ b/drivers/hv/hv_kvp.c |
205 | @@ -111,6 +111,15 @@ kvp_work_func(struct work_struct *dummy) |
206 | kvp_respond_to_host(NULL, HV_E_FAIL); |
207 | } |
208 | |
209 | +static void poll_channel(struct vmbus_channel *channel) |
210 | +{ |
211 | + unsigned long flags; |
212 | + |
213 | + spin_lock_irqsave(&channel->inbound_lock, flags); |
214 | + hv_kvp_onchannelcallback(channel); |
215 | + spin_unlock_irqrestore(&channel->inbound_lock, flags); |
216 | +} |
217 | + |
218 | static int kvp_handle_handshake(struct hv_kvp_msg *msg) |
219 | { |
220 | int ret = 1; |
221 | @@ -139,7 +148,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg) |
222 | kvp_register(dm_reg_value); |
223 | kvp_transaction.active = false; |
224 | if (kvp_transaction.kvp_context) |
225 | - hv_kvp_onchannelcallback(kvp_transaction.kvp_context); |
226 | + poll_channel(kvp_transaction.kvp_context); |
227 | } |
228 | return ret; |
229 | } |
230 | @@ -552,6 +561,7 @@ response_done: |
231 | |
232 | vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, |
233 | VM_PKT_DATA_INBAND, 0); |
234 | + poll_channel(channel); |
235 | |
236 | } |
237 | |
238 | @@ -585,7 +595,7 @@ void hv_kvp_onchannelcallback(void *context) |
239 | return; |
240 | } |
241 | |
242 | - vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, |
243 | + vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen, |
244 | &requestid); |
245 | |
246 | if (recvlen > 0) { |
247 | diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c |
248 | index 2f561c5dfe24..64c778f7756f 100644 |
249 | --- a/drivers/hv/hv_util.c |
250 | +++ b/drivers/hv/hv_util.c |
251 | @@ -279,7 +279,7 @@ static int util_probe(struct hv_device *dev, |
252 | (struct hv_util_service *)dev_id->driver_data; |
253 | int ret; |
254 | |
255 | - srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); |
256 | + srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); |
257 | if (!srv->recv_buffer) |
258 | return -ENOMEM; |
259 | if (srv->util_init) { |
260 | diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c |
261 | index 58637355c1f6..79610bdf1d35 100644 |
262 | --- a/drivers/hwmon/adt7470.c |
263 | +++ b/drivers/hwmon/adt7470.c |
264 | @@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev, |
265 | return -EINVAL; |
266 | |
267 | temp = DIV_ROUND_CLOSEST(temp, 1000); |
268 | - temp = clamp_val(temp, 0, 255); |
269 | + temp = clamp_val(temp, -128, 127); |
270 | |
271 | mutex_lock(&data->lock); |
272 | data->temp_min[attr->index] = temp; |
273 | @@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev, |
274 | return -EINVAL; |
275 | |
276 | temp = DIV_ROUND_CLOSEST(temp, 1000); |
277 | - temp = clamp_val(temp, 0, 255); |
278 | + temp = clamp_val(temp, -128, 127); |
279 | |
280 | mutex_lock(&data->lock); |
281 | data->temp_max[attr->index] = temp; |
282 | @@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev, |
283 | return -EINVAL; |
284 | |
285 | temp = DIV_ROUND_CLOSEST(temp, 1000); |
286 | - temp = clamp_val(temp, 0, 255); |
287 | + temp = clamp_val(temp, -128, 127); |
288 | |
289 | mutex_lock(&data->lock); |
290 | data->pwm_tmin[attr->index] = temp; |
291 | diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c |
292 | index 960fac3fb166..48044b044b7a 100644 |
293 | --- a/drivers/hwmon/da9052-hwmon.c |
294 | +++ b/drivers/hwmon/da9052-hwmon.c |
295 | @@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev, |
296 | struct device_attribute *devattr, |
297 | char *buf) |
298 | { |
299 | - return sprintf(buf, "da9052-hwmon\n"); |
300 | + return sprintf(buf, "da9052\n"); |
301 | } |
302 | |
303 | static ssize_t show_label(struct device *dev, |
304 | diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c |
305 | index 029ecabc4380..1b275a2881d6 100644 |
306 | --- a/drivers/hwmon/da9055-hwmon.c |
307 | +++ b/drivers/hwmon/da9055-hwmon.c |
308 | @@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev, |
309 | struct device_attribute *devattr, |
310 | char *buf) |
311 | { |
312 | - return sprintf(buf, "da9055-hwmon\n"); |
313 | + return sprintf(buf, "da9055\n"); |
314 | } |
315 | |
316 | static ssize_t show_label(struct device *dev, |
317 | diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c |
318 | index 4e11218d644e..c8ee1cb023b8 100644 |
319 | --- a/drivers/irqchip/irq-gic.c |
320 | +++ b/drivers/irqchip/irq-gic.c |
321 | @@ -42,6 +42,7 @@ |
322 | #include <linux/irqchip/chained_irq.h> |
323 | #include <linux/irqchip/arm-gic.h> |
324 | |
325 | +#include <asm/cputype.h> |
326 | #include <asm/irq.h> |
327 | #include <asm/exception.h> |
328 | #include <asm/smp_plat.h> |
329 | @@ -754,7 +755,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
330 | } |
331 | |
332 | for_each_possible_cpu(cpu) { |
333 | - unsigned long offset = percpu_offset * cpu_logical_map(cpu); |
334 | + u32 mpidr = cpu_logical_map(cpu); |
335 | + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
336 | + unsigned long offset = percpu_offset * core_id; |
337 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; |
338 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; |
339 | } |
340 | @@ -858,6 +861,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) |
341 | } |
342 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); |
343 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); |
344 | +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); |
345 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); |
346 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); |
347 | |
348 | diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c |
349 | index a33e07f4222e..de737ba1d351 100644 |
350 | --- a/drivers/md/dm-cache-metadata.c |
351 | +++ b/drivers/md/dm-cache-metadata.c |
352 | @@ -384,6 +384,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd) |
353 | |
354 | disk_super = dm_block_data(sblock); |
355 | |
356 | + /* Verify the data block size hasn't changed */ |
357 | + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { |
358 | + DMERR("changing the data block size (from %u to %llu) is not supported", |
359 | + le32_to_cpu(disk_super->data_block_size), |
360 | + (unsigned long long)cmd->data_block_size); |
361 | + r = -EINVAL; |
362 | + goto bad; |
363 | + } |
364 | + |
365 | r = __check_incompat_features(disk_super, cmd); |
366 | if (r < 0) |
367 | goto bad; |
368 | diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c |
369 | index 5f49d704f275..3b1503dc1f13 100644 |
370 | --- a/drivers/md/dm-thin-metadata.c |
371 | +++ b/drivers/md/dm-thin-metadata.c |
372 | @@ -591,6 +591,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) |
373 | |
374 | disk_super = dm_block_data(sblock); |
375 | |
376 | + /* Verify the data block size hasn't changed */ |
377 | + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { |
378 | + DMERR("changing the data block size (from %u to %llu) is not supported", |
379 | + le32_to_cpu(disk_super->data_block_size), |
380 | + (unsigned long long)pmd->data_block_size); |
381 | + r = -EINVAL; |
382 | + goto bad_unlock_sblock; |
383 | + } |
384 | + |
385 | r = __check_incompat_features(disk_super, pmd); |
386 | if (r < 0) |
387 | goto bad_unlock_sblock; |
388 | diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c |
389 | index 6008c8d546a3..20d9c15a305d 100644 |
390 | --- a/drivers/media/usb/gspca/pac7302.c |
391 | +++ b/drivers/media/usb/gspca/pac7302.c |
392 | @@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = { |
393 | {USB_DEVICE(0x093a, 0x2620)}, |
394 | {USB_DEVICE(0x093a, 0x2621)}, |
395 | {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, |
396 | + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, |
397 | {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, |
398 | {USB_DEVICE(0x093a, 0x2625)}, |
399 | {USB_DEVICE(0x093a, 0x2626)}, |
400 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
401 | index 70be100feeb4..b04f7f128f49 100644 |
402 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
403 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
404 | @@ -745,7 +745,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
405 | |
406 | return; |
407 | } |
408 | - bnx2x_frag_free(fp, new_data); |
409 | + if (new_data) |
410 | + bnx2x_frag_free(fp, new_data); |
411 | drop: |
412 | /* drop the packet and keep the buffer in the bin */ |
413 | DP(NETIF_MSG_RX_STATUS, |
414 | diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c |
415 | index 7371626c56a1..d81a7dbfeef6 100644 |
416 | --- a/drivers/net/ethernet/emulex/benet/be_main.c |
417 | +++ b/drivers/net/ethernet/emulex/benet/be_main.c |
418 | @@ -2663,7 +2663,7 @@ static int be_open(struct net_device *netdev) |
419 | |
420 | for_all_evt_queues(adapter, eqo, i) { |
421 | napi_enable(&eqo->napi); |
422 | - be_eq_notify(adapter, eqo->q.id, true, false, 0); |
423 | + be_eq_notify(adapter, eqo->q.id, true, true, 0); |
424 | } |
425 | adapter->flags |= BE_FLAGS_NAPI_ENABLED; |
426 | |
427 | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
428 | index 64cbe0dfe043..4d3c8122e2aa 100644 |
429 | --- a/drivers/net/ethernet/intel/igb/igb_main.c |
430 | +++ b/drivers/net/ethernet/intel/igb/igb_main.c |
431 | @@ -7229,6 +7229,8 @@ static int igb_sriov_reinit(struct pci_dev *dev) |
432 | |
433 | if (netif_running(netdev)) |
434 | igb_close(netdev); |
435 | + else |
436 | + igb_reset(adapter); |
437 | |
438 | igb_clear_interrupt_scheme(adapter); |
439 | |
440 | diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c |
441 | index a602aeeb3acb..658613021919 100644 |
442 | --- a/drivers/net/ethernet/marvell/mvneta.c |
443 | +++ b/drivers/net/ethernet/marvell/mvneta.c |
444 | @@ -1145,7 +1145,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, |
445 | command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; |
446 | command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; |
447 | |
448 | - if (l3_proto == swab16(ETH_P_IP)) |
449 | + if (l3_proto == htons(ETH_P_IP)) |
450 | command |= MVNETA_TXD_IP_CSUM; |
451 | else |
452 | command |= MVNETA_TX_L3_IP6; |
453 | @@ -2306,7 +2306,7 @@ static void mvneta_adjust_link(struct net_device *ndev) |
454 | |
455 | if (phydev->speed == SPEED_1000) |
456 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
457 | - else |
458 | + else if (phydev->speed == SPEED_100) |
459 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
460 | |
461 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
462 | diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c |
463 | index 3df56840a3b9..398faff8be7a 100644 |
464 | --- a/drivers/net/ethernet/sun/sunvnet.c |
465 | +++ b/drivers/net/ethernet/sun/sunvnet.c |
466 | @@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) |
467 | return vp; |
468 | } |
469 | |
470 | +static void vnet_cleanup(void) |
471 | +{ |
472 | + struct vnet *vp; |
473 | + struct net_device *dev; |
474 | + |
475 | + mutex_lock(&vnet_list_mutex); |
476 | + while (!list_empty(&vnet_list)) { |
477 | + vp = list_first_entry(&vnet_list, struct vnet, list); |
478 | + list_del(&vp->list); |
479 | + dev = vp->dev; |
480 | + /* vio_unregister_driver() should have cleaned up port_list */ |
481 | + BUG_ON(!list_empty(&vp->port_list)); |
482 | + unregister_netdev(dev); |
483 | + free_netdev(dev); |
484 | + } |
485 | + mutex_unlock(&vnet_list_mutex); |
486 | +} |
487 | + |
488 | static const char *local_mac_prop = "local-mac-address"; |
489 | |
490 | static struct vnet *vnet_find_parent(struct mdesc_handle *hp, |
491 | @@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev) |
492 | |
493 | kfree(port); |
494 | |
495 | - unregister_netdev(vp->dev); |
496 | } |
497 | return 0; |
498 | } |
499 | @@ -1268,6 +1285,7 @@ static int __init vnet_init(void) |
500 | static void __exit vnet_exit(void) |
501 | { |
502 | vio_unregister_driver(&vnet_port_driver); |
503 | + vnet_cleanup(); |
504 | } |
505 | |
506 | module_init(vnet_init); |
507 | diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c |
508 | index 6839fb07a4c9..becfa3ef7fdc 100644 |
509 | --- a/drivers/net/ppp/pppoe.c |
510 | +++ b/drivers/net/ppp/pppoe.c |
511 | @@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, |
512 | po->chan.hdrlen = (sizeof(struct pppoe_hdr) + |
513 | dev->hard_header_len); |
514 | |
515 | - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); |
516 | + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; |
517 | po->chan.private = sk; |
518 | po->chan.ops = &pppoe_chan_ops; |
519 | |
520 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
521 | index 6fb0082b3308..6c584f8a2268 100644 |
522 | --- a/drivers/net/usb/qmi_wwan.c |
523 | +++ b/drivers/net/usb/qmi_wwan.c |
524 | @@ -647,6 +647,7 @@ static const struct usb_device_id products[] = { |
525 | {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, |
526 | {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, |
527 | {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, |
528 | + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, |
529 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
530 | {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ |
531 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ |
532 | @@ -721,6 +722,7 @@ static const struct usb_device_id products[] = { |
533 | {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, |
534 | {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, |
535 | {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ |
536 | + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ |
537 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
538 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
539 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
540 | @@ -733,6 +735,7 @@ static const struct usb_device_id products[] = { |
541 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
542 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
543 | {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ |
544 | + {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, |
545 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
546 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
547 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
548 | diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c |
549 | index cd1ad0019185..ca17e4c9eca2 100644 |
550 | --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c |
551 | +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c |
552 | @@ -1072,13 +1072,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) |
553 | /* recalculate basic rates */ |
554 | iwl_calc_basic_rates(priv, ctx); |
555 | |
556 | - /* |
557 | - * force CTS-to-self frames protection if RTS-CTS is not preferred |
558 | - * one aggregation protection method |
559 | - */ |
560 | - if (!priv->hw_params.use_rts_for_aggregation) |
561 | - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; |
562 | - |
563 | if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || |
564 | !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) |
565 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; |
566 | @@ -1484,11 +1477,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, |
567 | else |
568 | ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; |
569 | |
570 | - if (bss_conf->use_cts_prot) |
571 | - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; |
572 | - else |
573 | - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; |
574 | - |
575 | memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); |
576 | |
577 | if (vif->type == NL80211_IFTYPE_AP || |
578 | diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c |
579 | index fc3fe8ddcf62..83c61964d082 100644 |
580 | --- a/drivers/net/wireless/mwifiex/main.c |
581 | +++ b/drivers/net/wireless/mwifiex/main.c |
582 | @@ -501,6 +501,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
583 | } |
584 | |
585 | tx_info = MWIFIEX_SKB_TXCB(skb); |
586 | + memset(tx_info, 0, sizeof(*tx_info)); |
587 | tx_info->bss_num = priv->bss_num; |
588 | tx_info->bss_type = priv->bss_type; |
589 | |
590 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
591 | index 46efdca96952..63c217053668 100644 |
592 | --- a/drivers/usb/core/hub.c |
593 | +++ b/drivers/usb/core/hub.c |
594 | @@ -887,6 +887,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1) |
595 | if (!hub_is_superspeed(hub->hdev)) |
596 | return -EINVAL; |
597 | |
598 | + ret = hub_port_status(hub, port1, &portstatus, &portchange); |
599 | + if (ret < 0) |
600 | + return ret; |
601 | + |
602 | + /* |
603 | + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI |
604 | + * Controller [1022:7814] will have spurious result making the following |
605 | + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized |
606 | + * as high-speed device if we set the usb 3.0 port link state to |
607 | + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we |
608 | + * check the state here to avoid the bug. |
609 | + */ |
610 | + if ((portstatus & USB_PORT_STAT_LINK_STATE) == |
611 | + USB_SS_PORT_LS_RX_DETECT) { |
612 | + dev_dbg(&hub->ports[port1 - 1]->dev, |
613 | + "Not disabling port; link state is RxDetect\n"); |
614 | + return ret; |
615 | + } |
616 | + |
617 | ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); |
618 | if (ret) |
619 | return ret; |
620 | diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c |
621 | index b5718516825b..39a986e1da9e 100644 |
622 | --- a/fs/fuse/inode.c |
623 | +++ b/fs/fuse/inode.c |
624 | @@ -461,6 +461,17 @@ static const match_table_t tokens = { |
625 | {OPT_ERR, NULL} |
626 | }; |
627 | |
628 | +static int fuse_match_uint(substring_t *s, unsigned int *res) |
629 | +{ |
630 | + int err = -ENOMEM; |
631 | + char *buf = match_strdup(s); |
632 | + if (buf) { |
633 | + err = kstrtouint(buf, 10, res); |
634 | + kfree(buf); |
635 | + } |
636 | + return err; |
637 | +} |
638 | + |
639 | static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) |
640 | { |
641 | char *p; |
642 | @@ -471,6 +482,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) |
643 | while ((p = strsep(&opt, ",")) != NULL) { |
644 | int token; |
645 | int value; |
646 | + unsigned uv; |
647 | substring_t args[MAX_OPT_ARGS]; |
648 | if (!*p) |
649 | continue; |
650 | @@ -494,18 +506,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) |
651 | break; |
652 | |
653 | case OPT_USER_ID: |
654 | - if (match_int(&args[0], &value)) |
655 | + if (fuse_match_uint(&args[0], &uv)) |
656 | return 0; |
657 | - d->user_id = make_kuid(current_user_ns(), value); |
658 | + d->user_id = make_kuid(current_user_ns(), uv); |
659 | if (!uid_valid(d->user_id)) |
660 | return 0; |
661 | d->user_id_present = 1; |
662 | break; |
663 | |
664 | case OPT_GROUP_ID: |
665 | - if (match_int(&args[0], &value)) |
666 | + if (fuse_match_uint(&args[0], &uv)) |
667 | return 0; |
668 | - d->group_id = make_kgid(current_user_ns(), value); |
669 | + d->group_id = make_kgid(current_user_ns(), uv); |
670 | if (!gid_valid(d->group_id)) |
671 | return 0; |
672 | d->group_id_present = 1; |
673 | diff --git a/include/net/sock.h b/include/net/sock.h |
674 | index 72f710d2f75a..26b15c0780be 100644 |
675 | --- a/include/net/sock.h |
676 | +++ b/include/net/sock.h |
677 | @@ -1727,8 +1727,8 @@ sk_dst_get(struct sock *sk) |
678 | |
679 | rcu_read_lock(); |
680 | dst = rcu_dereference(sk->sk_dst_cache); |
681 | - if (dst) |
682 | - dst_hold(dst); |
683 | + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) |
684 | + dst = NULL; |
685 | rcu_read_unlock(); |
686 | return dst; |
687 | } |
688 | @@ -1767,9 +1767,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) |
689 | static inline void |
690 | sk_dst_set(struct sock *sk, struct dst_entry *dst) |
691 | { |
692 | - spin_lock(&sk->sk_dst_lock); |
693 | - __sk_dst_set(sk, dst); |
694 | - spin_unlock(&sk->sk_dst_lock); |
695 | + struct dst_entry *old_dst; |
696 | + |
697 | + sk_tx_queue_clear(sk); |
698 | + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); |
699 | + dst_release(old_dst); |
700 | } |
701 | |
702 | static inline void |
703 | @@ -1781,9 +1783,7 @@ __sk_dst_reset(struct sock *sk) |
704 | static inline void |
705 | sk_dst_reset(struct sock *sk) |
706 | { |
707 | - spin_lock(&sk->sk_dst_lock); |
708 | - __sk_dst_reset(sk); |
709 | - spin_unlock(&sk->sk_dst_lock); |
710 | + sk_dst_set(sk, NULL); |
711 | } |
712 | |
713 | extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); |
714 | diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks |
715 | index 44511d100eaa..e4d30533c562 100644 |
716 | --- a/kernel/Kconfig.locks |
717 | +++ b/kernel/Kconfig.locks |
718 | @@ -220,6 +220,9 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE |
719 | |
720 | endif |
721 | |
722 | +config ARCH_SUPPORTS_ATOMIC_RMW |
723 | + bool |
724 | + |
725 | config MUTEX_SPIN_ON_OWNER |
726 | def_bool y |
727 | - depends on SMP && !DEBUG_MUTEXES |
728 | + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW |
729 | diff --git a/kernel/power/process.c b/kernel/power/process.c |
730 | index 98088e0e71e8..1b212bee1510 100644 |
731 | --- a/kernel/power/process.c |
732 | +++ b/kernel/power/process.c |
733 | @@ -174,6 +174,7 @@ void thaw_processes(void) |
734 | |
735 | printk("Restarting tasks ... "); |
736 | |
737 | + __usermodehelper_set_disable_depth(UMH_FREEZING); |
738 | thaw_workqueues(); |
739 | |
740 | read_lock(&tasklist_lock); |
741 | diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c |
742 | index e745a1548367..701b6c8a4b12 100644 |
743 | --- a/kernel/sched/debug.c |
744 | +++ b/kernel/sched/debug.c |
745 | @@ -551,7 +551,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) |
746 | |
747 | avg_atom = p->se.sum_exec_runtime; |
748 | if (nr_switches) |
749 | - do_div(avg_atom, nr_switches); |
750 | + avg_atom = div64_ul(avg_atom, nr_switches); |
751 | else |
752 | avg_atom = -1LL; |
753 | |
754 | diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c |
755 | index a8f5084dcde7..294bf4ef1f47 100644 |
756 | --- a/kernel/time/alarmtimer.c |
757 | +++ b/kernel/time/alarmtimer.c |
758 | @@ -540,9 +540,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, |
759 | struct itimerspec *new_setting, |
760 | struct itimerspec *old_setting) |
761 | { |
762 | + ktime_t exp; |
763 | + |
764 | if (!rtcdev) |
765 | return -ENOTSUPP; |
766 | |
767 | + if (flags & ~TIMER_ABSTIME) |
768 | + return -EINVAL; |
769 | + |
770 | if (old_setting) |
771 | alarm_timer_get(timr, old_setting); |
772 | |
773 | @@ -552,8 +557,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, |
774 | |
775 | /* start the timer */ |
776 | timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); |
777 | - alarm_start(&timr->it.alarm.alarmtimer, |
778 | - timespec_to_ktime(new_setting->it_value)); |
779 | + exp = timespec_to_ktime(new_setting->it_value); |
780 | + /* Convert (if necessary) to absolute time */ |
781 | + if (flags != TIMER_ABSTIME) { |
782 | + ktime_t now; |
783 | + |
784 | + now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); |
785 | + exp = ktime_add(now, exp); |
786 | + } |
787 | + |
788 | + alarm_start(&timr->it.alarm.alarmtimer, exp); |
789 | return 0; |
790 | } |
791 | |
792 | @@ -685,6 +698,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, |
793 | if (!alarmtimer_get_rtcdev()) |
794 | return -ENOTSUPP; |
795 | |
796 | + if (flags & ~TIMER_ABSTIME) |
797 | + return -EINVAL; |
798 | + |
799 | if (!capable(CAP_WAKE_ALARM)) |
800 | return -EPERM; |
801 | |
802 | diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
803 | index 797d3b91a30b..401d9bd1fe42 100644 |
804 | --- a/kernel/trace/ftrace.c |
805 | +++ b/kernel/trace/ftrace.c |
806 | @@ -331,12 +331,12 @@ static void update_ftrace_function(void) |
807 | func = ftrace_ops_list_func; |
808 | } |
809 | |
810 | + update_function_graph_func(); |
811 | + |
812 | /* If there's no change, then do nothing more here */ |
813 | if (ftrace_trace_function == func) |
814 | return; |
815 | |
816 | - update_function_graph_func(); |
817 | - |
818 | /* |
819 | * If we are using the list function, it doesn't care |
820 | * about the function_trace_ops. |
821 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
822 | index 8e94c1102636..4063d5fe5e44 100644 |
823 | --- a/kernel/trace/ring_buffer.c |
824 | +++ b/kernel/trace/ring_buffer.c |
825 | @@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
826 | struct ring_buffer_per_cpu *cpu_buffer; |
827 | struct rb_irq_work *work; |
828 | |
829 | - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || |
830 | - (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) |
831 | - return POLLIN | POLLRDNORM; |
832 | - |
833 | if (cpu == RING_BUFFER_ALL_CPUS) |
834 | work = &buffer->irq_work; |
835 | else { |
836 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
837 | index 8fe92ce43f39..98a830d079b9 100644 |
838 | --- a/kernel/trace/trace.c |
839 | +++ b/kernel/trace/trace.c |
840 | @@ -423,6 +423,9 @@ int __trace_puts(unsigned long ip, const char *str, int size) |
841 | struct print_entry *entry; |
842 | unsigned long irq_flags; |
843 | int alloc; |
844 | + int pc; |
845 | + |
846 | + pc = preempt_count(); |
847 | |
848 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
849 | return 0; |
850 | @@ -432,7 +435,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) |
851 | local_save_flags(irq_flags); |
852 | buffer = global_trace.trace_buffer.buffer; |
853 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
854 | - irq_flags, preempt_count()); |
855 | + irq_flags, pc); |
856 | if (!event) |
857 | return 0; |
858 | |
859 | @@ -449,6 +452,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) |
860 | entry->buf[size] = '\0'; |
861 | |
862 | __buffer_unlock_commit(buffer, event); |
863 | + ftrace_trace_stack(buffer, irq_flags, 4, pc); |
864 | |
865 | return size; |
866 | } |
867 | @@ -466,6 +470,9 @@ int __trace_bputs(unsigned long ip, const char *str) |
868 | struct bputs_entry *entry; |
869 | unsigned long irq_flags; |
870 | int size = sizeof(struct bputs_entry); |
871 | + int pc; |
872 | + |
873 | + pc = preempt_count(); |
874 | |
875 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
876 | return 0; |
877 | @@ -473,7 +480,7 @@ int __trace_bputs(unsigned long ip, const char *str) |
878 | local_save_flags(irq_flags); |
879 | buffer = global_trace.trace_buffer.buffer; |
880 | event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, |
881 | - irq_flags, preempt_count()); |
882 | + irq_flags, pc); |
883 | if (!event) |
884 | return 0; |
885 | |
886 | @@ -482,6 +489,7 @@ int __trace_bputs(unsigned long ip, const char *str) |
887 | entry->str = str; |
888 | |
889 | __buffer_unlock_commit(buffer, event); |
890 | + ftrace_trace_stack(buffer, irq_flags, 4, pc); |
891 | |
892 | return 1; |
893 | } |
894 | diff --git a/mm/shmem.c b/mm/shmem.c |
895 | index 509b393eceeb..16cc1d77f70a 100644 |
896 | --- a/mm/shmem.c |
897 | +++ b/mm/shmem.c |
898 | @@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt; |
899 | #define SHORT_SYMLINK_LEN 128 |
900 | |
901 | /* |
902 | - * shmem_fallocate and shmem_writepage communicate via inode->i_private |
903 | - * (with i_mutex making sure that it has only one user at a time): |
904 | - * we would prefer not to enlarge the shmem inode just for that. |
905 | + * shmem_fallocate communicates with shmem_fault or shmem_writepage via |
906 | + * inode->i_private (with i_mutex making sure that it has only one user at |
907 | + * a time): we would prefer not to enlarge the shmem inode just for that. |
908 | */ |
909 | struct shmem_falloc { |
910 | + wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ |
911 | pgoff_t start; /* start of range currently being fallocated */ |
912 | pgoff_t next; /* the next page offset to be fallocated */ |
913 | pgoff_t nr_falloced; /* how many new pages have been fallocated */ |
914 | @@ -533,22 +534,19 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, |
915 | return; |
916 | |
917 | index = start; |
918 | - for ( ; ; ) { |
919 | + while (index < end) { |
920 | cond_resched(); |
921 | pvec.nr = shmem_find_get_pages_and_swap(mapping, index, |
922 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
923 | pvec.pages, indices); |
924 | if (!pvec.nr) { |
925 | - if (index == start || unfalloc) |
926 | + /* If all gone or hole-punch or unfalloc, we're done */ |
927 | + if (index == start || end != -1) |
928 | break; |
929 | + /* But if truncating, restart to make sure all gone */ |
930 | index = start; |
931 | continue; |
932 | } |
933 | - if ((index == start || unfalloc) && indices[0] >= end) { |
934 | - shmem_deswap_pagevec(&pvec); |
935 | - pagevec_release(&pvec); |
936 | - break; |
937 | - } |
938 | mem_cgroup_uncharge_start(); |
939 | for (i = 0; i < pagevec_count(&pvec); i++) { |
940 | struct page *page = pvec.pages[i]; |
941 | @@ -560,8 +558,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, |
942 | if (radix_tree_exceptional_entry(page)) { |
943 | if (unfalloc) |
944 | continue; |
945 | - nr_swaps_freed += !shmem_free_swap(mapping, |
946 | - index, page); |
947 | + if (shmem_free_swap(mapping, index, page)) { |
948 | + /* Swap was replaced by page: retry */ |
949 | + index--; |
950 | + break; |
951 | + } |
952 | + nr_swaps_freed++; |
953 | continue; |
954 | } |
955 | |
956 | @@ -570,6 +572,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, |
957 | if (page->mapping == mapping) { |
958 | VM_BUG_ON(PageWriteback(page)); |
959 | truncate_inode_page(mapping, page); |
960 | + } else { |
961 | + /* Page was replaced by swap: retry */ |
962 | + unlock_page(page); |
963 | + index--; |
964 | + break; |
965 | } |
966 | } |
967 | unlock_page(page); |
968 | @@ -826,6 +833,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) |
969 | spin_lock(&inode->i_lock); |
970 | shmem_falloc = inode->i_private; |
971 | if (shmem_falloc && |
972 | + !shmem_falloc->waitq && |
973 | index >= shmem_falloc->start && |
974 | index < shmem_falloc->next) |
975 | shmem_falloc->nr_unswapped++; |
976 | @@ -1300,6 +1308,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
977 | int error; |
978 | int ret = VM_FAULT_LOCKED; |
979 | |
980 | + /* |
981 | + * Trinity finds that probing a hole which tmpfs is punching can |
982 | + * prevent the hole-punch from ever completing: which in turn |
983 | + * locks writers out with its hold on i_mutex. So refrain from |
984 | + * faulting pages into the hole while it's being punched. Although |
985 | + * shmem_undo_range() does remove the additions, it may be unable to |
986 | + * keep up, as each new page needs its own unmap_mapping_range() call, |
987 | + * and the i_mmap tree grows ever slower to scan if new vmas are added. |
988 | + * |
989 | + * It does not matter if we sometimes reach this check just before the |
990 | + * hole-punch begins, so that one fault then races with the punch: |
991 | + * we just need to make racing faults a rare case. |
992 | + * |
993 | + * The implementation below would be much simpler if we just used a |
994 | + * standard mutex or completion: but we cannot take i_mutex in fault, |
995 | + * and bloating every shmem inode for this unlikely case would be sad. |
996 | + */ |
997 | + if (unlikely(inode->i_private)) { |
998 | + struct shmem_falloc *shmem_falloc; |
999 | + |
1000 | + spin_lock(&inode->i_lock); |
1001 | + shmem_falloc = inode->i_private; |
1002 | + if (shmem_falloc && |
1003 | + shmem_falloc->waitq && |
1004 | + vmf->pgoff >= shmem_falloc->start && |
1005 | + vmf->pgoff < shmem_falloc->next) { |
1006 | + wait_queue_head_t *shmem_falloc_waitq; |
1007 | + DEFINE_WAIT(shmem_fault_wait); |
1008 | + |
1009 | + ret = VM_FAULT_NOPAGE; |
1010 | + if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && |
1011 | + !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
1012 | + /* It's polite to up mmap_sem if we can */ |
1013 | + up_read(&vma->vm_mm->mmap_sem); |
1014 | + ret = VM_FAULT_RETRY; |
1015 | + } |
1016 | + |
1017 | + shmem_falloc_waitq = shmem_falloc->waitq; |
1018 | + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, |
1019 | + TASK_UNINTERRUPTIBLE); |
1020 | + spin_unlock(&inode->i_lock); |
1021 | + schedule(); |
1022 | + |
1023 | + /* |
1024 | + * shmem_falloc_waitq points into the shmem_fallocate() |
1025 | + * stack of the hole-punching task: shmem_falloc_waitq |
1026 | + * is usually invalid by the time we reach here, but |
1027 | + * finish_wait() does not dereference it in that case; |
1028 | + * though i_lock needed lest racing with wake_up_all(). |
1029 | + */ |
1030 | + spin_lock(&inode->i_lock); |
1031 | + finish_wait(shmem_falloc_waitq, &shmem_fault_wait); |
1032 | + spin_unlock(&inode->i_lock); |
1033 | + return ret; |
1034 | + } |
1035 | + spin_unlock(&inode->i_lock); |
1036 | + } |
1037 | + |
1038 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); |
1039 | if (error) |
1040 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); |
1041 | @@ -1821,12 +1887,25 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, |
1042 | struct address_space *mapping = file->f_mapping; |
1043 | loff_t unmap_start = round_up(offset, PAGE_SIZE); |
1044 | loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; |
1045 | + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); |
1046 | + |
1047 | + shmem_falloc.waitq = &shmem_falloc_waitq; |
1048 | + shmem_falloc.start = unmap_start >> PAGE_SHIFT; |
1049 | + shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; |
1050 | + spin_lock(&inode->i_lock); |
1051 | + inode->i_private = &shmem_falloc; |
1052 | + spin_unlock(&inode->i_lock); |
1053 | |
1054 | if ((u64)unmap_end > (u64)unmap_start) |
1055 | unmap_mapping_range(mapping, unmap_start, |
1056 | 1 + unmap_end - unmap_start, 0); |
1057 | shmem_truncate_range(inode, offset, offset + len - 1); |
1058 | /* No need to unmap again: hole-punching leaves COWed pages */ |
1059 | + |
1060 | + spin_lock(&inode->i_lock); |
1061 | + inode->i_private = NULL; |
1062 | + wake_up_all(&shmem_falloc_waitq); |
1063 | + spin_unlock(&inode->i_lock); |
1064 | error = 0; |
1065 | goto out; |
1066 | } |
1067 | @@ -1844,6 +1923,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, |
1068 | goto out; |
1069 | } |
1070 | |
1071 | + shmem_falloc.waitq = NULL; |
1072 | shmem_falloc.start = start; |
1073 | shmem_falloc.next = start; |
1074 | shmem_falloc.nr_falloced = 0; |
1075 | diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c |
1076 | index 4a78c4de9f20..42ef36a85e69 100644 |
1077 | --- a/net/8021q/vlan_core.c |
1078 | +++ b/net/8021q/vlan_core.c |
1079 | @@ -103,8 +103,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id); |
1080 | |
1081 | static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) |
1082 | { |
1083 | - if (skb_cow(skb, skb_headroom(skb)) < 0) |
1084 | + if (skb_cow(skb, skb_headroom(skb)) < 0) { |
1085 | + kfree_skb(skb); |
1086 | return NULL; |
1087 | + } |
1088 | + |
1089 | memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); |
1090 | skb->mac_header += VLAN_HLEN; |
1091 | return skb; |
1092 | diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c |
1093 | index 0018daccdea9..8799e171addf 100644 |
1094 | --- a/net/appletalk/ddp.c |
1095 | +++ b/net/appletalk/ddp.c |
1096 | @@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, |
1097 | goto drop; |
1098 | |
1099 | /* Queue packet (standard) */ |
1100 | - skb->sk = sock; |
1101 | - |
1102 | if (sock_queue_rcv_skb(sock, skb) < 0) |
1103 | goto drop; |
1104 | |
1105 | @@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr |
1106 | if (!skb) |
1107 | goto out; |
1108 | |
1109 | - skb->sk = sk; |
1110 | skb_reserve(skb, ddp_dl->header_length); |
1111 | skb_reserve(skb, dev->hard_header_len); |
1112 | skb->dev = dev; |
1113 | diff --git a/net/core/dst.c b/net/core/dst.c |
1114 | index df9cc810ec8e..c0e021871df8 100644 |
1115 | --- a/net/core/dst.c |
1116 | +++ b/net/core/dst.c |
1117 | @@ -267,6 +267,15 @@ again: |
1118 | } |
1119 | EXPORT_SYMBOL(dst_destroy); |
1120 | |
1121 | +static void dst_destroy_rcu(struct rcu_head *head) |
1122 | +{ |
1123 | + struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); |
1124 | + |
1125 | + dst = dst_destroy(dst); |
1126 | + if (dst) |
1127 | + __dst_free(dst); |
1128 | +} |
1129 | + |
1130 | void dst_release(struct dst_entry *dst) |
1131 | { |
1132 | if (dst) { |
1133 | @@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst) |
1134 | |
1135 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
1136 | WARN_ON(newrefcnt < 0); |
1137 | - if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { |
1138 | - dst = dst_destroy(dst); |
1139 | - if (dst) |
1140 | - __dst_free(dst); |
1141 | - } |
1142 | + if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) |
1143 | + call_rcu(&dst->rcu_head, dst_destroy_rcu); |
1144 | } |
1145 | } |
1146 | EXPORT_SYMBOL(dst_release); |
1147 | diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c |
1148 | index c32be292c7e3..2022b46ab38f 100644 |
1149 | --- a/net/dns_resolver/dns_query.c |
1150 | +++ b/net/dns_resolver/dns_query.c |
1151 | @@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen, |
1152 | if (!*_result) |
1153 | goto put; |
1154 | |
1155 | - memcpy(*_result, upayload->data, len + 1); |
1156 | + memcpy(*_result, upayload->data, len); |
1157 | + (*_result)[len] = '\0'; |
1158 | + |
1159 | if (_expiry) |
1160 | *_expiry = rkey->expiry; |
1161 | |
1162 | diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c |
1163 | index 76e10b47e053..ea78ef5ac352 100644 |
1164 | --- a/net/ipv4/icmp.c |
1165 | +++ b/net/ipv4/icmp.c |
1166 | @@ -697,8 +697,6 @@ static void icmp_unreach(struct sk_buff *skb) |
1167 | &iph->daddr); |
1168 | } else { |
1169 | info = ntohs(icmph->un.frag.mtu); |
1170 | - if (!info) |
1171 | - goto out; |
1172 | } |
1173 | break; |
1174 | case ICMP_SR_FAILED: |
1175 | diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
1176 | index 089b4af4fecc..38d63ca8a6b5 100644 |
1177 | --- a/net/ipv4/igmp.c |
1178 | +++ b/net/ipv4/igmp.c |
1179 | @@ -1874,6 +1874,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) |
1180 | |
1181 | rtnl_lock(); |
1182 | in_dev = ip_mc_find_dev(net, imr); |
1183 | + if (!in_dev) { |
1184 | + ret = -ENODEV; |
1185 | + goto out; |
1186 | + } |
1187 | ifindex = imr->imr_ifindex; |
1188 | for (imlp = &inet->mc_list; |
1189 | (iml = rtnl_dereference(*imlp)) != NULL; |
1190 | @@ -1891,16 +1895,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) |
1191 | |
1192 | *imlp = iml->next_rcu; |
1193 | |
1194 | - if (in_dev) |
1195 | - ip_mc_dec_group(in_dev, group); |
1196 | + ip_mc_dec_group(in_dev, group); |
1197 | rtnl_unlock(); |
1198 | /* decrease mem now to avoid the memleak warning */ |
1199 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
1200 | kfree_rcu(iml, rcu); |
1201 | return 0; |
1202 | } |
1203 | - if (!in_dev) |
1204 | - ret = -ENODEV; |
1205 | +out: |
1206 | rtnl_unlock(); |
1207 | return ret; |
1208 | } |
1209 | diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c |
1210 | index ec7264514a82..089ed81d1878 100644 |
1211 | --- a/net/ipv4/ip_options.c |
1212 | +++ b/net/ipv4/ip_options.c |
1213 | @@ -288,6 +288,10 @@ int ip_options_compile(struct net *net, |
1214 | optptr++; |
1215 | continue; |
1216 | } |
1217 | + if (unlikely(l < 2)) { |
1218 | + pp_ptr = optptr; |
1219 | + goto error; |
1220 | + } |
1221 | optlen = optptr[1]; |
1222 | if (optlen<2 || optlen>l) { |
1223 | pp_ptr = optptr; |
1224 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
1225 | index fa6573264c8a..5642374cb751 100644 |
1226 | --- a/net/ipv4/ip_tunnel.c |
1227 | +++ b/net/ipv4/ip_tunnel.c |
1228 | @@ -166,6 +166,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, |
1229 | |
1230 | hlist_for_each_entry_rcu(t, head, hash_node) { |
1231 | if (remote != t->parms.iph.daddr || |
1232 | + t->parms.iph.saddr != 0 || |
1233 | !(t->dev->flags & IFF_UP)) |
1234 | continue; |
1235 | |
1236 | @@ -182,10 +183,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, |
1237 | head = &itn->tunnels[hash]; |
1238 | |
1239 | hlist_for_each_entry_rcu(t, head, hash_node) { |
1240 | - if ((local != t->parms.iph.saddr && |
1241 | - (local != t->parms.iph.daddr || |
1242 | - !ipv4_is_multicast(local))) || |
1243 | - !(t->dev->flags & IFF_UP)) |
1244 | + if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) && |
1245 | + (local != t->parms.iph.daddr || !ipv4_is_multicast(local))) |
1246 | + continue; |
1247 | + |
1248 | + if (!(t->dev->flags & IFF_UP)) |
1249 | continue; |
1250 | |
1251 | if (!ip_tunnel_key_match(&t->parms, flags, key)) |
1252 | @@ -202,6 +204,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, |
1253 | |
1254 | hlist_for_each_entry_rcu(t, head, hash_node) { |
1255 | if (t->parms.i_key != key || |
1256 | + t->parms.iph.saddr != 0 || |
1257 | + t->parms.iph.daddr != 0 || |
1258 | !(t->dev->flags & IFF_UP)) |
1259 | continue; |
1260 | |
1261 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
1262 | index 7256eef088b2..2b9887becb5c 100644 |
1263 | --- a/net/ipv4/route.c |
1264 | +++ b/net/ipv4/route.c |
1265 | @@ -985,20 +985,21 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) |
1266 | const struct iphdr *iph = (const struct iphdr *) skb->data; |
1267 | struct flowi4 fl4; |
1268 | struct rtable *rt; |
1269 | - struct dst_entry *dst; |
1270 | + struct dst_entry *odst = NULL; |
1271 | bool new = false; |
1272 | |
1273 | bh_lock_sock(sk); |
1274 | - rt = (struct rtable *) __sk_dst_get(sk); |
1275 | + odst = sk_dst_get(sk); |
1276 | |
1277 | - if (sock_owned_by_user(sk) || !rt) { |
1278 | + if (sock_owned_by_user(sk) || !odst) { |
1279 | __ipv4_sk_update_pmtu(skb, sk, mtu); |
1280 | goto out; |
1281 | } |
1282 | |
1283 | __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); |
1284 | |
1285 | - if (!__sk_dst_check(sk, 0)) { |
1286 | + rt = (struct rtable *)odst; |
1287 | + if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { |
1288 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); |
1289 | if (IS_ERR(rt)) |
1290 | goto out; |
1291 | @@ -1008,8 +1009,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) |
1292 | |
1293 | __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); |
1294 | |
1295 | - dst = dst_check(&rt->dst, 0); |
1296 | - if (!dst) { |
1297 | + if (!dst_check(&rt->dst, 0)) { |
1298 | if (new) |
1299 | dst_release(&rt->dst); |
1300 | |
1301 | @@ -1021,10 +1021,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) |
1302 | } |
1303 | |
1304 | if (new) |
1305 | - __sk_dst_set(sk, &rt->dst); |
1306 | + sk_dst_set(sk, &rt->dst); |
1307 | |
1308 | out: |
1309 | bh_unlock_sock(sk); |
1310 | + dst_release(odst); |
1311 | } |
1312 | EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); |
1313 | |
1314 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
1315 | index 39bdb14b3214..5d4bd6ca3ab1 100644 |
1316 | --- a/net/ipv4/tcp.c |
1317 | +++ b/net/ipv4/tcp.c |
1318 | @@ -1065,7 +1065,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
1319 | if (unlikely(tp->repair)) { |
1320 | if (tp->repair_queue == TCP_RECV_QUEUE) { |
1321 | copied = tcp_send_rcvq(sk, msg, size); |
1322 | - goto out; |
1323 | + goto out_nopush; |
1324 | } |
1325 | |
1326 | err = -EINVAL; |
1327 | @@ -1238,6 +1238,7 @@ wait_for_memory: |
1328 | out: |
1329 | if (copied) |
1330 | tcp_push(sk, flags, mss_now, tp->nonagle); |
1331 | +out_nopush: |
1332 | release_sock(sk); |
1333 | return copied + copied_syn; |
1334 | |
1335 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1336 | index ba7d2b7ad9f9..ea7f52f3062d 100644 |
1337 | --- a/net/ipv4/tcp_input.c |
1338 | +++ b/net/ipv4/tcp_input.c |
1339 | @@ -1075,7 +1075,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, |
1340 | } |
1341 | |
1342 | /* D-SACK for already forgotten data... Do dumb counting. */ |
1343 | - if (dup_sack && tp->undo_marker && tp->undo_retrans && |
1344 | + if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && |
1345 | !after(end_seq_0, prior_snd_una) && |
1346 | after(end_seq_0, tp->undo_marker)) |
1347 | tp->undo_retrans--; |
1348 | @@ -1130,7 +1130,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, |
1349 | unsigned int new_len = (pkt_len / mss) * mss; |
1350 | if (!in_sack && new_len < pkt_len) { |
1351 | new_len += mss; |
1352 | - if (new_len > skb->len) |
1353 | + if (new_len >= skb->len) |
1354 | return 0; |
1355 | } |
1356 | pkt_len = new_len; |
1357 | @@ -1154,7 +1154,7 @@ static u8 tcp_sacktag_one(struct sock *sk, |
1358 | |
1359 | /* Account D-SACK for retransmitted packet. */ |
1360 | if (dup_sack && (sacked & TCPCB_RETRANS)) { |
1361 | - if (tp->undo_marker && tp->undo_retrans && |
1362 | + if (tp->undo_marker && tp->undo_retrans > 0 && |
1363 | after(end_seq, tp->undo_marker)) |
1364 | tp->undo_retrans--; |
1365 | if (sacked & TCPCB_SACKED_ACKED) |
1366 | @@ -1850,7 +1850,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp) |
1367 | tp->lost_out = 0; |
1368 | |
1369 | tp->undo_marker = 0; |
1370 | - tp->undo_retrans = 0; |
1371 | + tp->undo_retrans = -1; |
1372 | } |
1373 | |
1374 | void tcp_clear_retrans(struct tcp_sock *tp) |
1375 | @@ -2700,7 +2700,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) |
1376 | |
1377 | tp->prior_ssthresh = 0; |
1378 | tp->undo_marker = tp->snd_una; |
1379 | - tp->undo_retrans = tp->retrans_out; |
1380 | + tp->undo_retrans = tp->retrans_out ? : -1; |
1381 | |
1382 | if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { |
1383 | if (!ece_ack) |
1384 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1385 | index 4a4e8746d1b2..56e29f0e230e 100644 |
1386 | --- a/net/ipv4/tcp_output.c |
1387 | +++ b/net/ipv4/tcp_output.c |
1388 | @@ -2428,13 +2428,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
1389 | if (!tp->retrans_stamp) |
1390 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; |
1391 | |
1392 | - tp->undo_retrans += tcp_skb_pcount(skb); |
1393 | - |
1394 | /* snd_nxt is stored to detect loss of retransmitted segment, |
1395 | * see tcp_input.c tcp_sacktag_write_queue(). |
1396 | */ |
1397 | TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; |
1398 | } |
1399 | + |
1400 | + if (tp->undo_retrans < 0) |
1401 | + tp->undo_retrans = 0; |
1402 | + tp->undo_retrans += tcp_skb_pcount(skb); |
1403 | return err; |
1404 | } |
1405 | |
1406 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
1407 | index be34adde692f..5ed562dfe743 100644 |
1408 | --- a/net/netlink/af_netlink.c |
1409 | +++ b/net/netlink/af_netlink.c |
1410 | @@ -500,7 +500,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock, |
1411 | while (nlk->cb != NULL && netlink_dump_space(nlk)) { |
1412 | err = netlink_dump(sk); |
1413 | if (err < 0) { |
1414 | - sk->sk_err = err; |
1415 | + sk->sk_err = -err; |
1416 | sk->sk_error_report(sk); |
1417 | break; |
1418 | } |
1419 | @@ -2272,7 +2272,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, |
1420 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { |
1421 | ret = netlink_dump(sk); |
1422 | if (ret) { |
1423 | - sk->sk_err = ret; |
1424 | + sk->sk_err = -ret; |
1425 | sk->sk_error_report(sk); |
1426 | } |
1427 | } |
1428 | diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c |
1429 | index fe0ba7488bdf..29299dcabfbb 100644 |
1430 | --- a/net/sctp/sysctl.c |
1431 | +++ b/net/sctp/sysctl.c |
1432 | @@ -368,8 +368,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write, |
1433 | tbl.data = &net->sctp.auth_enable; |
1434 | |
1435 | ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); |
1436 | - |
1437 | - if (write) { |
1438 | + if (write && ret == 0) { |
1439 | struct sock *sk = net->sctp.ctl_sock; |
1440 | |
1441 | net->sctp.auth_enable = new_value; |
1442 | diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c |
1443 | index 10c018a5b9fe..ca907f2f5e5a 100644 |
1444 | --- a/net/sctp/ulpevent.c |
1445 | +++ b/net/sctp/ulpevent.c |
1446 | @@ -373,9 +373,10 @@ fail: |
1447 | * specification [SCTP] and any extensions for a list of possible |
1448 | * error formats. |
1449 | */ |
1450 | -struct sctp_ulpevent *sctp_ulpevent_make_remote_error( |
1451 | - const struct sctp_association *asoc, struct sctp_chunk *chunk, |
1452 | - __u16 flags, gfp_t gfp) |
1453 | +struct sctp_ulpevent * |
1454 | +sctp_ulpevent_make_remote_error(const struct sctp_association *asoc, |
1455 | + struct sctp_chunk *chunk, __u16 flags, |
1456 | + gfp_t gfp) |
1457 | { |
1458 | struct sctp_ulpevent *event; |
1459 | struct sctp_remote_error *sre; |
1460 | @@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( |
1461 | /* Copy the skb to a new skb with room for us to prepend |
1462 | * notification with. |
1463 | */ |
1464 | - skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), |
1465 | - 0, gfp); |
1466 | + skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp); |
1467 | |
1468 | /* Pull off the rest of the cause TLV from the chunk. */ |
1469 | skb_pull(chunk->skb, elen); |
1470 | @@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( |
1471 | event = sctp_skb2event(skb); |
1472 | sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); |
1473 | |
1474 | - sre = (struct sctp_remote_error *) |
1475 | - skb_push(skb, sizeof(struct sctp_remote_error)); |
1476 | + sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre)); |
1477 | |
1478 | /* Trim the buffer to the right length. */ |
1479 | - skb_trim(skb, sizeof(struct sctp_remote_error) + elen); |
1480 | + skb_trim(skb, sizeof(*sre) + elen); |
1481 | |
1482 | - /* Socket Extensions for SCTP |
1483 | - * 5.3.1.3 SCTP_REMOTE_ERROR |
1484 | - * |
1485 | - * sre_type: |
1486 | - * It should be SCTP_REMOTE_ERROR. |
1487 | - */ |
1488 | + /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */ |
1489 | + memset(sre, 0, sizeof(*sre)); |
1490 | sre->sre_type = SCTP_REMOTE_ERROR; |
1491 | - |
1492 | - /* |
1493 | - * Socket Extensions for SCTP |
1494 | - * 5.3.1.3 SCTP_REMOTE_ERROR |
1495 | - * |
1496 | - * sre_flags: 16 bits (unsigned integer) |
1497 | - * Currently unused. |
1498 | - */ |
1499 | sre->sre_flags = 0; |
1500 | - |
1501 | - /* Socket Extensions for SCTP |
1502 | - * 5.3.1.3 SCTP_REMOTE_ERROR |
1503 | - * |
1504 | - * sre_length: sizeof (__u32) |
1505 | - * |
1506 | - * This field is the total length of the notification data, |
1507 | - * including the notification header. |
1508 | - */ |
1509 | sre->sre_length = skb->len; |
1510 | - |
1511 | - /* Socket Extensions for SCTP |
1512 | - * 5.3.1.3 SCTP_REMOTE_ERROR |
1513 | - * |
1514 | - * sre_error: 16 bits (unsigned integer) |
1515 | - * This value represents one of the Operational Error causes defined in |
1516 | - * the SCTP specification, in network byte order. |
1517 | - */ |
1518 | sre->sre_error = cause; |
1519 | - |
1520 | - /* Socket Extensions for SCTP |
1521 | - * 5.3.1.3 SCTP_REMOTE_ERROR |
1522 | - * |
1523 | - * sre_assoc_id: sizeof (sctp_assoc_t) |
1524 | - * |
1525 | - * The association id field, holds the identifier for the association. |
1526 | - * All notifications for a given association have the same association |
1527 | - * identifier. For TCP style socket, this field is ignored. |
1528 | - */ |
1529 | sctp_ulpevent_set_owner(event, asoc); |
1530 | sre->sre_assoc_id = sctp_assoc2id(asoc); |
1531 | |
1532 | return event; |
1533 | - |
1534 | fail: |
1535 | return NULL; |
1536 | } |
1537 | @@ -906,7 +865,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) |
1538 | return notification->sn_header.sn_type; |
1539 | } |
1540 | |
1541 | -/* Copy out the sndrcvinfo into a msghdr. */ |
1542 | +/* RFC6458, Section 5.3.2. SCTP Header Information Structure |
1543 | + * (SCTP_SNDRCV, DEPRECATED) |
1544 | + */ |
1545 | void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, |
1546 | struct msghdr *msghdr) |
1547 | { |
1548 | @@ -915,74 +876,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, |
1549 | if (sctp_ulpevent_is_notification(event)) |
1550 | return; |
1551 | |
1552 | - /* Sockets API Extensions for SCTP |
1553 | - * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) |
1554 | - * |
1555 | - * sinfo_stream: 16 bits (unsigned integer) |
1556 | - * |
1557 | - * For recvmsg() the SCTP stack places the message's stream number in |
1558 | - * this value. |
1559 | - */ |
1560 | + memset(&sinfo, 0, sizeof(sinfo)); |
1561 | sinfo.sinfo_stream = event->stream; |
1562 | - /* sinfo_ssn: 16 bits (unsigned integer) |
1563 | - * |
1564 | - * For recvmsg() this value contains the stream sequence number that |
1565 | - * the remote endpoint placed in the DATA chunk. For fragmented |
1566 | - * messages this is the same number for all deliveries of the message |
1567 | - * (if more than one recvmsg() is needed to read the message). |
1568 | - */ |
1569 | sinfo.sinfo_ssn = event->ssn; |
1570 | - /* sinfo_ppid: 32 bits (unsigned integer) |
1571 | - * |
1572 | - * In recvmsg() this value is |
1573 | - * the same information that was passed by the upper layer in the peer |
1574 | - * application. Please note that byte order issues are NOT accounted |
1575 | - * for and this information is passed opaquely by the SCTP stack from |
1576 | - * one end to the other. |
1577 | - */ |
1578 | sinfo.sinfo_ppid = event->ppid; |
1579 | - /* sinfo_flags: 16 bits (unsigned integer) |
1580 | - * |
1581 | - * This field may contain any of the following flags and is composed of |
1582 | - * a bitwise OR of these values. |
1583 | - * |
1584 | - * recvmsg() flags: |
1585 | - * |
1586 | - * SCTP_UNORDERED - This flag is present when the message was sent |
1587 | - * non-ordered. |
1588 | - */ |
1589 | sinfo.sinfo_flags = event->flags; |
1590 | - /* sinfo_tsn: 32 bit (unsigned integer) |
1591 | - * |
1592 | - * For the receiving side, this field holds a TSN that was |
1593 | - * assigned to one of the SCTP Data Chunks. |
1594 | - */ |
1595 | sinfo.sinfo_tsn = event->tsn; |
1596 | - /* sinfo_cumtsn: 32 bit (unsigned integer) |
1597 | - * |
1598 | - * This field will hold the current cumulative TSN as |
1599 | - * known by the underlying SCTP layer. Note this field is |
1600 | - * ignored when sending and only valid for a receive |
1601 | - * operation when sinfo_flags are set to SCTP_UNORDERED. |
1602 | - */ |
1603 | sinfo.sinfo_cumtsn = event->cumtsn; |
1604 | - /* sinfo_assoc_id: sizeof (sctp_assoc_t) |
1605 | - * |
1606 | - * The association handle field, sinfo_assoc_id, holds the identifier |
1607 | - * for the association announced in the COMMUNICATION_UP notification. |
1608 | - * All notifications for a given association have the same identifier. |
1609 | - * Ignored for one-to-one style sockets. |
1610 | - */ |
1611 | sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); |
1612 | - |
1613 | - /* context value that is set via SCTP_CONTEXT socket option. */ |
1614 | + /* Context value that is set via SCTP_CONTEXT socket option. */ |
1615 | sinfo.sinfo_context = event->asoc->default_rcv_context; |
1616 | - |
1617 | /* These fields are not used while receiving. */ |
1618 | sinfo.sinfo_timetolive = 0; |
1619 | |
1620 | put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, |
1621 | - sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); |
1622 | + sizeof(sinfo), &sinfo); |
1623 | } |
1624 | |
1625 | /* Do accounting for bytes received and hold a reference to the association |
1626 | diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c |
1627 | index e5f3da507823..bf2755419ec6 100644 |
1628 | --- a/net/tipc/bcast.c |
1629 | +++ b/net/tipc/bcast.c |
1630 | @@ -531,6 +531,7 @@ receive: |
1631 | |
1632 | buf = node->bclink.deferred_head; |
1633 | node->bclink.deferred_head = buf->next; |
1634 | + buf->next = NULL; |
1635 | node->bclink.deferred_size--; |
1636 | goto receive; |
1637 | } |