Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0156-3.4.57-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2264 - (show annotations) (download)
Mon Aug 19 09:33:43 2013 UTC (10 years, 8 months ago) by niro
File size: 24452 byte(s)
-fixed patches
1 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
2 index 2d6e649..6610e81 100644
3 --- a/arch/x86/kernel/i387.c
4 +++ b/arch/x86/kernel/i387.c
5 @@ -132,7 +132,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
6 clts();
7 if (cpu_has_fxsr) {
8 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
9 - asm volatile("fxsave %0" : : "m" (fx_scratch));
10 + asm volatile("fxsave %0" : "+m" (fx_scratch));
11 mask = fx_scratch.mxcsr_mask;
12 if (mask == 0)
13 mask = 0x0000ffbf;
14 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
15 index 6ea287e2..9bdfcf5 100644
16 --- a/drivers/acpi/battery.c
17 +++ b/drivers/acpi/battery.c
18 @@ -117,6 +117,7 @@ struct acpi_battery {
19 struct acpi_device *device;
20 struct notifier_block pm_nb;
21 unsigned long update_time;
22 + int revision;
23 int rate_now;
24 int capacity_now;
25 int voltage_now;
26 @@ -350,6 +351,7 @@ static struct acpi_offsets info_offsets[] = {
27 };
28
29 static struct acpi_offsets extended_info_offsets[] = {
30 + {offsetof(struct acpi_battery, revision), 0},
31 {offsetof(struct acpi_battery, power_unit), 0},
32 {offsetof(struct acpi_battery, design_capacity), 0},
33 {offsetof(struct acpi_battery, full_charge_capacity), 0},
34 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35 index 45c5cf8..232119a 100644
36 --- a/drivers/gpu/drm/i915/i915_drv.h
37 +++ b/drivers/gpu/drm/i915/i915_drv.h
38 @@ -296,6 +296,7 @@ enum intel_pch {
39
40 #define QUIRK_PIPEA_FORCE (1<<0)
41 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
42 +#define QUIRK_NO_PCH_PWM_ENABLE (1<<2)
43
44 struct intel_fbdev;
45 struct intel_fbc_work;
46 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
47 index 84867a8..0e35922 100644
48 --- a/drivers/gpu/drm/i915/intel_display.c
49 +++ b/drivers/gpu/drm/i915/intel_display.c
50 @@ -9160,6 +9160,17 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
51 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
52 }
53
54 +/*
55 + * Some machines (Dell XPS13) suffer broken backlight controls if
56 + * BLM_PCH_PWM_ENABLE is set.
57 + */
58 +static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
59 +{
60 + struct drm_i915_private *dev_priv = dev->dev_private;
61 + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
62 + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
63 +}
64 +
65 struct intel_quirk {
66 int device;
67 int subsystem_vendor;
68 @@ -9192,6 +9203,11 @@ struct intel_quirk intel_quirks[] = {
69
70 /* Sony Vaio Y cannot use SSC on LVDS */
71 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
72 +
73 + /* Dell XPS13 HD Sandy Bridge */
74 + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
75 + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
76 + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
77 };
78
79 static void intel_init_quirks(struct drm_device *dev)
80 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
81 index 207180d..ab4d990 100644
82 --- a/drivers/gpu/drm/i915/intel_lvds.c
83 +++ b/drivers/gpu/drm/i915/intel_lvds.c
84 @@ -1097,7 +1097,8 @@ bool intel_lvds_init(struct drm_device *dev)
85 goto failed;
86
87 out:
88 - if (HAS_PCH_SPLIT(dev)) {
89 + if (HAS_PCH_SPLIT(dev) &&
90 + !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
91 u32 pwm;
92
93 pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
94 diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
95 index a746ba2..a956053 100644
96 --- a/drivers/net/arcnet/arcnet.c
97 +++ b/drivers/net/arcnet/arcnet.c
98 @@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
99
100 soft = &pkt.soft.rfc1201;
101
102 - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
103 + lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
104 if (pkt.hard.offset[0]) {
105 ofs = pkt.hard.offset[0];
106 length = 256 - ofs;
107 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
108 index d3695ed..a061e37 100644
109 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
110 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
111 @@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
112
113 /* Enable arbiter */
114 reg &= ~IXGBE_DPMCS_ARBDIS;
115 - /* Enable DFP and Recycle mode */
116 - reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
117 reg |= IXGBE_DPMCS_TSOEF;
118 +
119 /* Configure Max TSO packet size 34KB including payload and headers */
120 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
121
122 diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
123 index 4ce981c..2205db7 100644
124 --- a/drivers/net/ethernet/realtek/8139cp.c
125 +++ b/drivers/net/ethernet/realtek/8139cp.c
126 @@ -478,7 +478,7 @@ rx_status_loop:
127
128 while (1) {
129 u32 status, len;
130 - dma_addr_t mapping;
131 + dma_addr_t mapping, new_mapping;
132 struct sk_buff *skb, *new_skb;
133 struct cp_desc *desc;
134 const unsigned buflen = cp->rx_buf_sz;
135 @@ -520,6 +520,13 @@ rx_status_loop:
136 goto rx_next;
137 }
138
139 + new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
140 + PCI_DMA_FROMDEVICE);
141 + if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
142 + dev->stats.rx_dropped++;
143 + goto rx_next;
144 + }
145 +
146 dma_unmap_single(&cp->pdev->dev, mapping,
147 buflen, PCI_DMA_FROMDEVICE);
148
149 @@ -531,12 +538,11 @@ rx_status_loop:
150
151 skb_put(skb, len);
152
153 - mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
154 - PCI_DMA_FROMDEVICE);
155 cp->rx_skb[rx_tail] = new_skb;
156
157 cp_rx_skb(cp, skb, desc);
158 rx++;
159 + mapping = new_mapping;
160
161 rx_next:
162 cp->rx_ring[rx_tail].opts2 = 0;
163 @@ -704,6 +710,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
164 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
165 }
166
167 +static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
168 + int first, int entry_last)
169 +{
170 + int frag, index;
171 + struct cp_desc *txd;
172 + skb_frag_t *this_frag;
173 + for (frag = 0; frag+first < entry_last; frag++) {
174 + index = first+frag;
175 + cp->tx_skb[index] = NULL;
176 + txd = &cp->tx_ring[index];
177 + this_frag = &skb_shinfo(skb)->frags[frag];
178 + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
179 + skb_frag_size(this_frag), PCI_DMA_TODEVICE);
180 + }
181 +}
182 +
183 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
184 struct net_device *dev)
185 {
186 @@ -737,6 +759,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
187
188 len = skb->len;
189 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
190 + if (dma_mapping_error(&cp->pdev->dev, mapping))
191 + goto out_dma_error;
192 +
193 txd->opts2 = opts2;
194 txd->addr = cpu_to_le64(mapping);
195 wmb();
196 @@ -774,6 +799,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
197 first_len = skb_headlen(skb);
198 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
199 first_len, PCI_DMA_TODEVICE);
200 + if (dma_mapping_error(&cp->pdev->dev, first_mapping))
201 + goto out_dma_error;
202 +
203 cp->tx_skb[entry] = skb;
204 entry = NEXT_TX(entry);
205
206 @@ -787,6 +815,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
207 mapping = dma_map_single(&cp->pdev->dev,
208 skb_frag_address(this_frag),
209 len, PCI_DMA_TODEVICE);
210 + if (dma_mapping_error(&cp->pdev->dev, mapping)) {
211 + unwind_tx_frag_mapping(cp, skb, first_entry, entry);
212 + goto out_dma_error;
213 + }
214 +
215 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
216
217 ctrl = eor | len | DescOwn;
218 @@ -845,11 +878,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
219 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
220 netif_stop_queue(dev);
221
222 +out_unlock:
223 spin_unlock_irqrestore(&cp->lock, intr_flags);
224
225 cpw8(TxPoll, NormalTxPoll);
226
227 return NETDEV_TX_OK;
228 +out_dma_error:
229 + kfree_skb(skb);
230 + cp->dev->stats.tx_dropped++;
231 + goto out_unlock;
232 }
233
234 /* Set or clear the multicast filter for this adaptor.
235 @@ -1020,6 +1058,10 @@ static int cp_refill_rx(struct cp_private *cp)
236
237 mapping = dma_map_single(&cp->pdev->dev, skb->data,
238 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
239 + if (dma_mapping_error(&cp->pdev->dev, mapping)) {
240 + kfree_skb(skb);
241 + goto err_out;
242 + }
243 cp->rx_skb[i] = skb;
244
245 cp->rx_ring[i].opts2 = 0;
246 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
247 index 5caba55..d89747a 100644
248 --- a/drivers/net/usb/smsc75xx.c
249 +++ b/drivers/net/usb/smsc75xx.c
250 @@ -43,7 +43,6 @@
251 #define EEPROM_MAC_OFFSET (0x01)
252 #define DEFAULT_TX_CSUM_ENABLE (true)
253 #define DEFAULT_RX_CSUM_ENABLE (true)
254 -#define DEFAULT_TSO_ENABLE (true)
255 #define SMSC75XX_INTERNAL_PHY_ID (1)
256 #define SMSC75XX_TX_OVERHEAD (8)
257 #define MAX_RX_FIFO_SIZE (20 * 1024)
258 @@ -1049,17 +1048,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
259
260 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
261
262 - if (DEFAULT_TX_CSUM_ENABLE) {
263 + if (DEFAULT_TX_CSUM_ENABLE)
264 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
265 - if (DEFAULT_TSO_ENABLE)
266 - dev->net->features |= NETIF_F_SG |
267 - NETIF_F_TSO | NETIF_F_TSO6;
268 - }
269 +
270 if (DEFAULT_RX_CSUM_ENABLE)
271 dev->net->features |= NETIF_F_RXCSUM;
272
273 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
274 - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
275 + NETIF_F_RXCSUM;
276
277 /* Init all registers */
278 ret = smsc75xx_reset(dev);
279 @@ -1184,8 +1180,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
280 {
281 u32 tx_cmd_a, tx_cmd_b;
282
283 - skb_linearize(skb);
284 -
285 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
286 struct sk_buff *skb2 =
287 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
288 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
289 index 41c5237..2b8406a 100644
290 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
291 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
292 @@ -821,6 +821,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
293 if (error != 0)
294 goto err_rx;
295
296 + ath9k_hw_disable(priv->ah);
297 #ifdef CONFIG_MAC80211_LEDS
298 /* must be initialized before ieee80211_register_hw */
299 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
300 diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
301 index 7d00a87..4be8ccc 100644
302 --- a/drivers/net/wireless/mwifiex/sdio.c
303 +++ b/drivers/net/wireless/mwifiex/sdio.c
304 @@ -1449,8 +1449,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
305 /* Allocate buffer and copy payload */
306 blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
307 buf_block_len = (pkt_len + blk_size - 1) / blk_size;
308 - *(u16 *) &payload[0] = (u16) pkt_len;
309 - *(u16 *) &payload[2] = type;
310 + *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
311 + *(__le16 *)&payload[2] = cpu_to_le16(type);
312
313 /*
314 * This is SDIO specific header
315 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
316 index 50f92d5..4d792a2 100644
317 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
318 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
319 @@ -856,13 +856,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
320 spin_unlock_irqrestore(&queue->index_lock, irqflags);
321 }
322
323 -void rt2x00queue_pause_queue(struct data_queue *queue)
324 +void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
325 {
326 - if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
327 - !test_bit(QUEUE_STARTED, &queue->flags) ||
328 - test_and_set_bit(QUEUE_PAUSED, &queue->flags))
329 - return;
330 -
331 switch (queue->qid) {
332 case QID_AC_VO:
333 case QID_AC_VI:
334 @@ -878,6 +873,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
335 break;
336 }
337 }
338 +void rt2x00queue_pause_queue(struct data_queue *queue)
339 +{
340 + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
341 + !test_bit(QUEUE_STARTED, &queue->flags) ||
342 + test_and_set_bit(QUEUE_PAUSED, &queue->flags))
343 + return;
344 +
345 + rt2x00queue_pause_queue_nocheck(queue);
346 +}
347 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
348
349 void rt2x00queue_unpause_queue(struct data_queue *queue)
350 @@ -939,7 +943,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
351 return;
352 }
353
354 - rt2x00queue_pause_queue(queue);
355 + rt2x00queue_pause_queue_nocheck(queue);
356
357 queue->rt2x00dev->ops->lib->stop_queue(queue);
358
359 diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
360 index 039c054..c75b27b 100644
361 --- a/drivers/tty/serial/mxs-auart.c
362 +++ b/drivers/tty/serial/mxs-auart.c
363 @@ -375,11 +375,18 @@ static void mxs_auart_settermios(struct uart_port *u,
364
365 static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
366 {
367 - u32 istatus, istat;
368 + u32 istat;
369 struct mxs_auart_port *s = context;
370 u32 stat = readl(s->port.membase + AUART_STAT);
371
372 - istatus = istat = readl(s->port.membase + AUART_INTR);
373 + istat = readl(s->port.membase + AUART_INTR);
374 +
375 + /* ack irq */
376 + writel(istat & (AUART_INTR_RTIS
377 + | AUART_INTR_TXIS
378 + | AUART_INTR_RXIS
379 + | AUART_INTR_CTSMIS),
380 + s->port.membase + AUART_INTR_CLR);
381
382 if (istat & AUART_INTR_CTSMIS) {
383 uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
384 @@ -398,12 +405,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
385 istat &= ~AUART_INTR_TXIS;
386 }
387
388 - writel(istatus & (AUART_INTR_RTIS
389 - | AUART_INTR_TXIS
390 - | AUART_INTR_RXIS
391 - | AUART_INTR_CTSMIS),
392 - s->port.membase + AUART_INTR_CLR);
393 -
394 return IRQ_HANDLED;
395 }
396
397 @@ -543,7 +544,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
398 struct mxs_auart_port *s;
399 struct uart_port *port;
400 unsigned int old_ctrl0, old_ctrl2;
401 - unsigned int to = 1000;
402 + unsigned int to = 20000;
403
404 if (co->index > MXS_AUART_PORTS || co->index < 0)
405 return;
406 @@ -564,18 +565,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
407
408 uart_console_write(port, str, count, mxs_auart_console_putchar);
409
410 - /*
411 - * Finally, wait for transmitter to become empty
412 - * and restore the TCR
413 - */
414 + /* Finally, wait for transmitter to become empty ... */
415 while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
416 + udelay(1);
417 if (!to--)
418 break;
419 - udelay(1);
420 }
421
422 - writel(old_ctrl0, port->membase + AUART_CTRL0);
423 - writel(old_ctrl2, port->membase + AUART_CTRL2);
424 + /*
425 + * ... and restore the TCR if we waited long enough for the transmitter
426 + * to be idle. This might keep the transmitter enabled although it is
427 + * unused, but that is better than to disable it while it is still
428 + * transmitting.
429 + */
430 + if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
431 + writel(old_ctrl0, port->membase + AUART_CTRL0);
432 + writel(old_ctrl2, port->membase + AUART_CTRL2);
433 + }
434
435 clk_disable(s->clk);
436 }
437 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
438 index 3568c8a..48bc91d 100644
439 --- a/fs/notify/fanotify/fanotify_user.c
440 +++ b/fs/notify/fanotify/fanotify_user.c
441 @@ -120,6 +120,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
442 metadata->event_len = FAN_EVENT_METADATA_LEN;
443 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
444 metadata->vers = FANOTIFY_METADATA_VERSION;
445 + metadata->reserved = 0;
446 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
447 metadata->pid = pid_vnr(event->tgid);
448 if (unlikely(event->mask & FAN_Q_OVERFLOW))
449 diff --git a/kernel/events/core.c b/kernel/events/core.c
450 index d074cf0..8e810ba 100644
451 --- a/kernel/events/core.c
452 +++ b/kernel/events/core.c
453 @@ -250,9 +250,9 @@ perf_cgroup_match(struct perf_event *event)
454 return !event->cgrp || event->cgrp == cpuctx->cgrp;
455 }
456
457 -static inline void perf_get_cgroup(struct perf_event *event)
458 +static inline bool perf_tryget_cgroup(struct perf_event *event)
459 {
460 - css_get(&event->cgrp->css);
461 + return css_tryget(&event->cgrp->css);
462 }
463
464 static inline void perf_put_cgroup(struct perf_event *event)
465 @@ -481,7 +481,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
466 event->cgrp = cgrp;
467
468 /* must be done before we fput() the file */
469 - perf_get_cgroup(event);
470 + if (!perf_tryget_cgroup(event)) {
471 + event->cgrp = NULL;
472 + ret = -ENOENT;
473 + goto out;
474 + }
475
476 /*
477 * all events in a group must monitor
478 @@ -911,6 +915,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
479 }
480
481 /*
482 + * Initialize event state based on the perf_event_attr::disabled.
483 + */
484 +static inline void perf_event__state_init(struct perf_event *event)
485 +{
486 + event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
487 + PERF_EVENT_STATE_INACTIVE;
488 +}
489 +
490 +/*
491 * Called at perf_event creation and when events are attached/detached from a
492 * group.
493 */
494 @@ -6058,8 +6071,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
495 event->overflow_handler = overflow_handler;
496 event->overflow_handler_context = context;
497
498 - if (attr->disabled)
499 - event->state = PERF_EVENT_STATE_OFF;
500 + perf_event__state_init(event);
501
502 pmu = NULL;
503
504 @@ -6481,9 +6493,17 @@ SYSCALL_DEFINE5(perf_event_open,
505
506 mutex_lock(&gctx->mutex);
507 perf_remove_from_context(group_leader);
508 +
509 + /*
510 + * Removing from the context ends up with disabled
511 + * event. What we want here is event in the initial
512 + * startup state, ready to be add into new context.
513 + */
514 + perf_event__state_init(group_leader);
515 list_for_each_entry(sibling, &group_leader->sibling_list,
516 group_entry) {
517 perf_remove_from_context(sibling);
518 + perf_event__state_init(sibling);
519 put_ctx(gctx);
520 }
521 mutex_unlock(&gctx->mutex);
522 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
523 index e955364..da4512f 100644
524 --- a/kernel/sched/fair.c
525 +++ b/kernel/sched/fair.c
526 @@ -5511,7 +5511,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
527 * idle runqueue:
528 */
529 if (rq->cfs.load.weight)
530 - rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
531 + rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
532
533 return rr_interval;
534 }
535 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
536 index bf7a604..086c973 100644
537 --- a/net/ipv4/sysctl_net_ipv4.c
538 +++ b/net/ipv4/sysctl_net_ipv4.c
539 @@ -34,6 +34,8 @@ static int tcp_adv_win_scale_min = -31;
540 static int tcp_adv_win_scale_max = 31;
541 static int ip_ttl_min = 1;
542 static int ip_ttl_max = 255;
543 +static int tcp_syn_retries_min = 1;
544 +static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
545 static int ip_ping_group_range_min[] = { 0, 0 };
546 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
547
548 @@ -276,7 +278,9 @@ static struct ctl_table ipv4_table[] = {
549 .data = &sysctl_tcp_syn_retries,
550 .maxlen = sizeof(int),
551 .mode = 0644,
552 - .proc_handler = proc_dointvec
553 + .proc_handler = proc_dointvec_minmax,
554 + .extra1 = &tcp_syn_retries_min,
555 + .extra2 = &tcp_syn_retries_max
556 },
557 {
558 .procname = "tcp_synack_retries",
559 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
560 index 8110362..d5e4615 100644
561 --- a/net/ipv6/ip6mr.c
562 +++ b/net/ipv6/ip6mr.c
563 @@ -256,10 +256,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
564 {
565 struct mr6_table *mrt, *next;
566
567 + rtnl_lock();
568 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
569 list_del(&mrt->list);
570 ip6mr_free_table(mrt);
571 }
572 + rtnl_unlock();
573 fib_rules_unregister(net->ipv6.mr6_rules_ops);
574 }
575 #else
576 @@ -286,7 +288,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
577
578 static void __net_exit ip6mr_rules_exit(struct net *net)
579 {
580 + rtnl_lock();
581 ip6mr_free_table(net->ipv6.mrt6);
582 + net->ipv6.mrt6 = NULL;
583 + rtnl_unlock();
584 }
585 #endif
586
587 diff --git a/net/key/af_key.c b/net/key/af_key.c
588 index 5bbab6a..60109f4 100644
589 --- a/net/key/af_key.c
590 +++ b/net/key/af_key.c
591 @@ -2073,6 +2073,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
592 pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
593 }
594 pol->sadb_x_policy_dir = dir+1;
595 + pol->sadb_x_policy_reserved = 0;
596 pol->sadb_x_policy_id = xp->index;
597 pol->sadb_x_policy_priority = xp->priority;
598
599 @@ -3108,7 +3109,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
600 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
601 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
602 pol->sadb_x_policy_dir = dir+1;
603 + pol->sadb_x_policy_reserved = 0;
604 pol->sadb_x_policy_id = xp->index;
605 + pol->sadb_x_policy_priority = xp->priority;
606
607 /* Set sadb_comb's. */
608 if (x->id.proto == IPPROTO_AH)
609 @@ -3496,6 +3499,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
610 pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
611 pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
612 pol->sadb_x_policy_dir = dir + 1;
613 + pol->sadb_x_policy_reserved = 0;
614 pol->sadb_x_policy_id = 0;
615 pol->sadb_x_policy_priority = 0;
616
617 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
618 index 8ce9feb..067aa2a 100644
619 --- a/net/mac80211/rx.c
620 +++ b/net/mac80211/rx.c
621 @@ -831,8 +831,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
622 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
623 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
624
625 - /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
626 - if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
627 + /*
628 + * Drop duplicate 802.11 retransmissions
629 + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
630 + */
631 + if (rx->skb->len >= 24 && rx->sta &&
632 + !ieee80211_is_ctl(hdr->frame_control) &&
633 + !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
634 + !is_multicast_ether_addr(hdr->addr1)) {
635 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
636 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
637 hdr->seq_ctrl)) {
638 diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
639 index e25e490..6e38ef0 100644
640 --- a/net/sched/sch_atm.c
641 +++ b/net/sched/sch_atm.c
642 @@ -606,6 +606,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
643 struct sockaddr_atmpvc pvc;
644 int state;
645
646 + memset(&pvc, 0, sizeof(pvc));
647 pvc.sap_family = AF_ATMPVC;
648 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
649 pvc.sap_addr.vpi = flow->vcc->vpi;
650 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
651 index b7cddb9..7f59944 100644
652 --- a/net/sched/sch_cbq.c
653 +++ b/net/sched/sch_cbq.c
654 @@ -1467,6 +1467,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
655 unsigned char *b = skb_tail_pointer(skb);
656 struct tc_cbq_wrropt opt;
657
658 + memset(&opt, 0, sizeof(opt));
659 opt.flags = 0;
660 opt.allot = cl->allot;
661 opt.priority = cl->priority + 1;
662 diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
663 index 96eb168..3dd7207 100644
664 --- a/net/sctp/outqueue.c
665 +++ b/net/sctp/outqueue.c
666 @@ -205,6 +205,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
667 */
668 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
669 {
670 + memset(q, 0, sizeof(struct sctp_outq));
671 +
672 q->asoc = asoc;
673 INIT_LIST_HEAD(&q->out_chunk_list);
674 INIT_LIST_HEAD(&q->control_chunk_list);
675 @@ -212,13 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
676 INIT_LIST_HEAD(&q->sacked);
677 INIT_LIST_HEAD(&q->abandoned);
678
679 - q->fast_rtx = 0;
680 - q->outstanding_bytes = 0;
681 q->empty = 1;
682 - q->cork = 0;
683 -
684 - q->malloced = 0;
685 - q->out_qlen = 0;
686 }
687
688 /* Free the outqueue structure and any related pending chunks.
689 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
690 index f432c57..add9f94 100644
691 --- a/net/wireless/nl80211.c
692 +++ b/net/wireless/nl80211.c
693 @@ -5081,12 +5081,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
694
695 void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
696 {
697 + struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
698 void *hdr = ((void **)skb->cb)[1];
699 struct nlattr *data = ((void **)skb->cb)[2];
700
701 nla_nest_end(skb, data);
702 genlmsg_end(skb, hdr);
703 - genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
704 + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
705 + nl80211_testmode_mcgrp.id, gfp);
706 }
707 EXPORT_SYMBOL(cfg80211_testmode_event);
708 #endif
709 @@ -7768,7 +7770,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
710
711 genlmsg_end(msg, hdr);
712
713 - genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
714 + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
715 + nl80211_mlme_mcgrp.id, gfp);
716 return;
717
718 nla_put_failure:
719 diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
720 index a58cf35..84717ce 100644
721 --- a/sound/core/compress_offload.c
722 +++ b/sound/core/compress_offload.c
723 @@ -582,7 +582,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
724 mutex_lock(&stream->device->lock);
725 switch (_IOC_NR(cmd)) {
726 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
727 - put_user(SNDRV_COMPRESS_VERSION,
728 + retval = put_user(SNDRV_COMPRESS_VERSION,
729 (int __user *)arg) ? -EFAULT : 0;
730 break;
731 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):