Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0105-3.4.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1907 - (show annotations) (download)
Wed Oct 10 11:20:27 2012 UTC (11 years, 6 months ago) by niro
File size: 49447 byte(s)
-3.4.13-lts-r1
1 diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
2 index 33ecd0c..b1e05cc 100644
3 --- a/arch/arm/plat-samsung/adc.c
4 +++ b/arch/arm/plat-samsung/adc.c
5 @@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
6 return -EINVAL;
7 }
8
9 - if (client->is_ts && adc->ts_pend)
10 - return -EAGAIN;
11 -
12 spin_lock_irqsave(&adc->lock, flags);
13
14 + if (client->is_ts && adc->ts_pend) {
15 + spin_unlock_irqrestore(&adc->lock, flags);
16 + return -EAGAIN;
17 + }
18 +
19 client->channel = channel;
20 client->nr_samples = nr_samples;
21
22 diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
23 index 23ce096..fe66260 100644
24 --- a/drivers/acpi/acpica/nspredef.c
25 +++ b/drivers/acpi/acpica/nspredef.c
26 @@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
27 /* Create the new outer package and populate it */
28
29 status =
30 - acpi_ns_wrap_with_package(data, *elements,
31 + acpi_ns_wrap_with_package(data, return_object,
32 return_object_ptr);
33 if (ACPI_FAILURE(status)) {
34 return (status);
35 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
36 index c850de4..eff7222 100644
37 --- a/drivers/acpi/processor_core.c
38 +++ b/drivers/acpi/processor_core.c
39 @@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
40 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
41 * }
42 *
43 - * Ignores apic_id and always return 0 for CPU0's handle.
44 + * Ignores apic_id and always returns 0 for the processor
45 + * handle with acpi id 0 if nr_cpu_ids is 1.
46 + * This should be the case if SMP tables are not found.
47 * Return -1 for other CPU's handle.
48 */
49 - if (acpi_id == 0)
50 + if (nr_cpu_ids <= 1 && acpi_id == 0)
51 return acpi_id;
52 else
53 return apic_id;
54 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
55 index 2da025e..7f1ea56 100644
56 --- a/drivers/clk/clk.c
57 +++ b/drivers/clk/clk.c
58 @@ -834,18 +834,21 @@ static void clk_change_rate(struct clk *clk)
59 {
60 struct clk *child;
61 unsigned long old_rate;
62 + unsigned long best_parent_rate = 0;
63 struct hlist_node *tmp;
64
65 old_rate = clk->rate;
66
67 + if (clk->parent)
68 + best_parent_rate = clk->parent->rate;
69 +
70 if (clk->ops->set_rate)
71 clk->ops->set_rate(clk->hw, clk->new_rate);
72
73 if (clk->ops->recalc_rate)
74 - clk->rate = clk->ops->recalc_rate(clk->hw,
75 - clk->parent->rate);
76 + clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
77 else
78 - clk->rate = clk->parent->rate;
79 + clk->rate = best_parent_rate;
80
81 if (clk->notifier_count && old_rate != clk->rate)
82 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
83 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
84 index 299d238..899c712 100644
85 --- a/drivers/hid/hid-apple.c
86 +++ b/drivers/hid/hid-apple.c
87 @@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
88 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
89 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
90 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
91 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
92 + .driver_data = APPLE_HAS_FN },
93 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
94 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
95 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
96 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
97 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
98 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
99 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
100 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
101 index 4da66b4..054677b 100644
102 --- a/drivers/hid/hid-core.c
103 +++ b/drivers/hid/hid-core.c
104 @@ -1379,6 +1379,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
105 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
106 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
107 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
108 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
109 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
110 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
111 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
112 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
113 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
114 @@ -1914,6 +1917,7 @@ static const struct hid_device_id hid_ignore_list[] = {
115 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
116 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
117 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
118 + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
119 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
120 { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
121 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
122 @@ -2008,6 +2012,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
123 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
124 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
125 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
126 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
127 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
128 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
129 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
130 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
131 { }
132 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
133 index e39aecb..dfd4098 100644
134 --- a/drivers/hid/hid-ids.h
135 +++ b/drivers/hid/hid-ids.h
136 @@ -125,6 +125,9 @@
137 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
138 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
139 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
140 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
141 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
142 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
143 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
144 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
145 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
146 @@ -509,6 +512,9 @@
147 #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
148 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
149
150 +#define USB_VENDOR_ID_MADCATZ 0x0738
151 +#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
152 +
153 #define USB_VENDOR_ID_MCC 0x09db
154 #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
155 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
156 diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
157 index 0b204e4..f524882 100644
158 --- a/drivers/hwmon/it87.c
159 +++ b/drivers/hwmon/it87.c
160 @@ -2157,7 +2157,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
161
162 /* Start monitoring */
163 it87_write_value(data, IT87_REG_CONFIG,
164 - (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
165 + (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
166 | (update_vbat ? 0x41 : 0x01));
167 }
168
169 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
170 index fd7a0d5..42f7b25 100644
171 --- a/drivers/input/joystick/xpad.c
172 +++ b/drivers/input/joystick/xpad.c
173 @@ -142,6 +142,7 @@ static const struct xpad_device {
174 { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
175 { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
176 { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
177 + { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
178 { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
179 { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
180 { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
181 @@ -164,6 +165,7 @@ static const struct xpad_device {
182 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
183 { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
184 { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
185 + { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
186 { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
187 { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
188 };
189 @@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
190 XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
191 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
192 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
193 + { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
194 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
195 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
196 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
197 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
198 XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
199 - XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
200 + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
201 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
202 { }
203 };
204
205 diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
206 index f9e2758..e410b98 100644
207 --- a/drivers/input/mouse/bcm5974.c
208 +++ b/drivers/input/mouse/bcm5974.c
209 @@ -79,6 +79,10 @@
210 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
211 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
212 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
213 +/* MacbookPro10,1 (unibody, June 2012) */
214 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
215 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
216 +#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
217
218 #define BCM5974_DEVICE(prod) { \
219 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
220 @@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
221 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
222 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
223 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
224 + /* MacbookPro10,1 */
225 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
226 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
227 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
228 /* Terminating entry */
229 {}
230 };
231 @@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
232 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
233 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
234 },
235 + {
236 + USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
237 + USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
238 + USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
239 + HAS_INTEGRATED_BUTTON,
240 + 0x84, sizeof(struct bt_data),
241 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
242 + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
243 + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
244 + { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
245 + { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
246 + },
247 {}
248 };
249
250 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
251 index d7e9577..d1f74ab 100644
252 --- a/drivers/md/raid1.c
253 +++ b/drivers/md/raid1.c
254 @@ -2486,9 +2486,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
255 */
256 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
257 atomic_set(&r1_bio->remaining, read_targets);
258 - for (i = 0; i < conf->raid_disks * 2; i++) {
259 + for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
260 bio = r1_bio->bios[i];
261 if (bio->bi_end_io == end_sync_read) {
262 + read_targets--;
263 md_sync_acct(bio->bi_bdev, nr_sectors);
264 generic_make_request(bio);
265 }
266 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
267 index 00a6732..39eab73 100644
268 --- a/drivers/media/dvb/dvb-core/dvbdev.c
269 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
270 @@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
271 if (minor == MAX_DVB_MINORS) {
272 kfree(dvbdevfops);
273 kfree(dvbdev);
274 + up_write(&minor_rwsem);
275 mutex_unlock(&dvbdev_register_lock);
276 return -EINVAL;
277 }
278 diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
279 index a2c2b7d..e5742a0 100644
280 --- a/drivers/media/video/cx231xx/cx231xx-audio.c
281 +++ b/drivers/media/video/cx231xx/cx231xx-audio.c
282 @@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
283 urb->context = dev;
284 urb->pipe = usb_rcvisocpipe(dev->udev,
285 dev->adev.end_point_addr);
286 - urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
287 + urb->transfer_flags = URB_ISO_ASAP;
288 urb->transfer_buffer = dev->adev.transfer_buffer[i];
289 urb->interval = 1;
290 urb->complete = cx231xx_audio_isocirq;
291 @@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
292 urb->context = dev;
293 urb->pipe = usb_rcvbulkpipe(dev->udev,
294 dev->adev.end_point_addr);
295 - urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
296 + urb->transfer_flags = 0;
297 urb->transfer_buffer = dev->adev.transfer_buffer[i];
298 urb->complete = cx231xx_audio_bulkirq;
299 urb->transfer_buffer_length = sb_size;
300 diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
301 index 8cdee5f..9c5967e 100644
302 --- a/drivers/media/video/cx231xx/cx231xx-vbi.c
303 +++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
304 @@ -452,7 +452,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
305 return -ENOMEM;
306 }
307 dev->vbi_mode.bulk_ctl.urb[i] = urb;
308 - urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
309 + urb->transfer_flags = 0;
310
311 dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
312 kzalloc(sb_size, GFP_KERNEL);
313 diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
314 index 261f478..c606b6a 100644
315 --- a/drivers/mtd/nand/nandsim.c
316 +++ b/drivers/mtd/nand/nandsim.c
317 @@ -28,7 +28,7 @@
318 #include <linux/module.h>
319 #include <linux/moduleparam.h>
320 #include <linux/vmalloc.h>
321 -#include <asm/div64.h>
322 +#include <linux/math64.h>
323 #include <linux/slab.h>
324 #include <linux/errno.h>
325 #include <linux/string.h>
326 @@ -547,12 +547,6 @@ static char *get_partition_name(int i)
327 return kstrdup(buf, GFP_KERNEL);
328 }
329
330 -static uint64_t divide(uint64_t n, uint32_t d)
331 -{
332 - do_div(n, d);
333 - return n;
334 -}
335 -
336 /*
337 * Initialize the nandsim structure.
338 *
339 @@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
340 ns->geom.oobsz = mtd->oobsize;
341 ns->geom.secsz = mtd->erasesize;
342 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
343 - ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
344 + ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
345 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
346 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
347 ns->geom.pgshift = chip->page_shift;
348 @@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
349
350 if (!rptwear)
351 return 0;
352 - wear_eb_count = divide(mtd->size, mtd->erasesize);
353 + wear_eb_count = div_u64(mtd->size, mtd->erasesize);
354 mem = wear_eb_count * sizeof(unsigned long);
355 if (mem / sizeof(unsigned long) != wear_eb_count) {
356 NS_ERR("Too many erase blocks for wear reporting\n");
357 diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
358 index 3680aa2..2cf084e 100644
359 --- a/drivers/net/bonding/bond_debugfs.c
360 +++ b/drivers/net/bonding/bond_debugfs.c
361 @@ -6,7 +6,7 @@
362 #include "bonding.h"
363 #include "bond_alb.h"
364
365 -#ifdef CONFIG_DEBUG_FS
366 +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
367
368 #include <linux/debugfs.h>
369 #include <linux/seq_file.h>
370 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
371 index a579a2f..318a62a 100644
372 --- a/drivers/net/bonding/bond_main.c
373 +++ b/drivers/net/bonding/bond_main.c
374 @@ -3218,6 +3218,12 @@ static int bond_master_netdev_event(unsigned long event,
375 switch (event) {
376 case NETDEV_CHANGENAME:
377 return bond_event_changename(event_bond);
378 + case NETDEV_UNREGISTER:
379 + bond_remove_proc_entry(event_bond);
380 + break;
381 + case NETDEV_REGISTER:
382 + bond_create_proc_entry(event_bond);
383 + break;
384 default:
385 break;
386 }
387 @@ -4402,8 +4408,6 @@ static void bond_uninit(struct net_device *bond_dev)
388
389 bond_work_cancel_all(bond);
390
391 - bond_remove_proc_entry(bond);
392 -
393 bond_debug_unregister(bond);
394
395 __hw_addr_flush(&bond->mc_list);
396 @@ -4805,7 +4809,6 @@ static int bond_init(struct net_device *bond_dev)
397
398 bond_set_lockdep_class(bond_dev);
399
400 - bond_create_proc_entry(bond);
401 list_add_tail(&bond->bond_list, &bn->dev_list);
402
403 bond_prepare_sysfs_group(bond);
404 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
405 index c6d95f2..a9dd6a9 100644
406 --- a/drivers/net/ethernet/intel/e1000e/82571.c
407 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
408 @@ -1553,6 +1553,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
409 ctrl = er32(CTRL);
410 status = er32(STATUS);
411 rxcw = er32(RXCW);
412 + /* SYNCH bit and IV bit are sticky */
413 + udelay(10);
414 + rxcw = er32(RXCW);
415
416 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
417
418 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
419 index 48d56da..9bdfaba 100644
420 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
421 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
422 @@ -1158,6 +1158,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
423 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
424 wmb();
425 priv->hw->desc->set_tx_owner(desc);
426 + wmb();
427 }
428
429 /* Interrupt on completition only for the latest segment */
430 @@ -1173,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
431
432 /* To avoid raise condition */
433 priv->hw->desc->set_tx_owner(first);
434 + wmb();
435
436 priv->cur_tx++;
437
438 @@ -1236,6 +1238,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
439 }
440 wmb();
441 priv->hw->desc->set_rx_owner(p + entry);
442 + wmb();
443 }
444 }
445
446 diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
447 index c46275a..9aa4807 100644
448 --- a/drivers/net/wireless/iwlegacy/4965-mac.c
449 +++ b/drivers/net/wireless/iwlegacy/4965-mac.c
450 @@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
451 return 0;
452 }
453
454 - if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
455 + if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
456 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
457 key_flags);
458 spin_unlock_irqrestore(&il->sta_lock, flags);
459 @@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
460 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
461 il->stations[sta_id].sta.key.key_flags =
462 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
463 - il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
464 + il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
465 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
466 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
467
468 diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
469 index eaf24945..4bc2711 100644
470 --- a/drivers/net/wireless/iwlegacy/common.c
471 +++ b/drivers/net/wireless/iwlegacy/common.c
472 @@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
473 return;
474
475 /* monitor and check for other stuck queues */
476 - if (il_is_any_associated(il)) {
477 - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
478 - /* skip as we already checked the command queue */
479 - if (cnt == il->cmd_queue)
480 - continue;
481 - if (il_check_stuck_queue(il, cnt))
482 - return;
483 - }
484 + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
485 + /* skip as we already checked the command queue */
486 + if (cnt == il->cmd_queue)
487 + continue;
488 + if (il_check_stuck_queue(il, cnt))
489 + return;
490 }
491
492 mod_timer(&il->watchdog,
493 diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
494 index 66094eb..507085f 100644
495 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c
496 +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
497 @@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
498 case QID_RX:
499 if (!rt2x00queue_full(queue))
500 rt2x00queue_for_each_entry(queue,
501 - Q_INDEX_DONE,
502 Q_INDEX,
503 + Q_INDEX_DONE,
504 NULL,
505 rt2x00usb_kick_rx_entry);
506 break;
507 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
508 index 0ffdb3c..9af4257 100644
509 --- a/drivers/platform/x86/intel_ips.c
510 +++ b/drivers/platform/x86/intel_ips.c
511 @@ -72,6 +72,7 @@
512 #include <linux/string.h>
513 #include <linux/tick.h>
514 #include <linux/timer.h>
515 +#include <linux/dmi.h>
516 #include <drm/i915_drm.h>
517 #include <asm/msr.h>
518 #include <asm/processor.h>
519 @@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
520
521 MODULE_DEVICE_TABLE(pci, ips_id_table);
522
523 +static int ips_blacklist_callback(const struct dmi_system_id *id)
524 +{
525 + pr_info("Blacklisted intel_ips for %s\n", id->ident);
526 + return 1;
527 +}
528 +
529 +static const struct dmi_system_id ips_blacklist[] = {
530 + {
531 + .callback = ips_blacklist_callback,
532 + .ident = "HP ProBook",
533 + .matches = {
534 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
535 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
536 + },
537 + },
538 + { } /* terminating entry */
539 +};
540 +
541 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
542 {
543 u64 platform_info;
544 @@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
545 u16 htshi, trc, trc_required_mask;
546 u8 tse;
547
548 + if (dmi_check_system(ips_blacklist))
549 + return -ENODEV;
550 +
551 ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
552 if (!ips)
553 return -ENOMEM;
554 diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
555 index 39d3aa4..f56c8ba 100644
556 --- a/drivers/rpmsg/virtio_rpmsg_bus.c
557 +++ b/drivers/rpmsg/virtio_rpmsg_bus.c
558 @@ -1085,7 +1085,7 @@ static int __init rpmsg_init(void)
559
560 return ret;
561 }
562 -module_init(rpmsg_init);
563 +subsys_initcall(rpmsg_init);
564
565 static void __exit rpmsg_fini(void)
566 {
567 diff --git a/fs/buffer.c b/fs/buffer.c
568 index ad5938c..0bc1bed 100644
569 --- a/fs/buffer.c
570 +++ b/fs/buffer.c
571 @@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
572 static struct buffer_head *
573 __getblk_slow(struct block_device *bdev, sector_t block, int size)
574 {
575 + int ret;
576 + struct buffer_head *bh;
577 +
578 /* Size must be multiple of hard sectorsize */
579 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
580 (size < 512 || size > PAGE_SIZE))) {
581 @@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
582 return NULL;
583 }
584
585 - for (;;) {
586 - struct buffer_head * bh;
587 - int ret;
588 +retry:
589 + bh = __find_get_block(bdev, block, size);
590 + if (bh)
591 + return bh;
592
593 + ret = grow_buffers(bdev, block, size);
594 + if (ret == 0) {
595 + free_more_memory();
596 + goto retry;
597 + } else if (ret > 0) {
598 bh = __find_get_block(bdev, block, size);
599 if (bh)
600 return bh;
601 -
602 - ret = grow_buffers(bdev, block, size);
603 - if (ret < 0)
604 - return NULL;
605 - if (ret == 0)
606 - free_more_memory();
607 }
608 + return NULL;
609 }
610
611 /*
612 diff --git a/fs/fifo.c b/fs/fifo.c
613 index b1a524d..cf6f434 100644
614 --- a/fs/fifo.c
615 +++ b/fs/fifo.c
616 @@ -14,7 +14,7 @@
617 #include <linux/sched.h>
618 #include <linux/pipe_fs_i.h>
619
620 -static void wait_for_partner(struct inode* inode, unsigned int *cnt)
621 +static int wait_for_partner(struct inode* inode, unsigned int *cnt)
622 {
623 int cur = *cnt;
624
625 @@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
626 if (signal_pending(current))
627 break;
628 }
629 + return cur == *cnt ? -ERESTARTSYS : 0;
630 }
631
632 static void wake_up_partner(struct inode* inode)
633 @@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
634 * seen a writer */
635 filp->f_version = pipe->w_counter;
636 } else {
637 - wait_for_partner(inode, &pipe->w_counter);
638 - if(signal_pending(current))
639 + if (wait_for_partner(inode, &pipe->w_counter))
640 goto err_rd;
641 }
642 }
643 @@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
644 wake_up_partner(inode);
645
646 if (!pipe->readers) {
647 - wait_for_partner(inode, &pipe->r_counter);
648 - if (signal_pending(current))
649 + if (wait_for_partner(inode, &pipe->r_counter))
650 goto err_wr;
651 }
652 break;
653 diff --git a/fs/locks.c b/fs/locks.c
654 index 0d68f1f..6a64f15 100644
655 --- a/fs/locks.c
656 +++ b/fs/locks.c
657 @@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
658 case F_WRLCK:
659 return generic_add_lease(filp, arg, flp);
660 default:
661 - BUG();
662 + return -EINVAL;
663 }
664 }
665 EXPORT_SYMBOL(generic_setlease);
666 diff --git a/include/linux/Kbuild b/include/linux/Kbuild
667 index 50f55c7..f2f73f9 100644
668 --- a/include/linux/Kbuild
669 +++ b/include/linux/Kbuild
670 @@ -272,6 +272,7 @@ header-y += netfilter_ipv4.h
671 header-y += netfilter_ipv6.h
672 header-y += netlink.h
673 header-y += netrom.h
674 +header-y += nfc.h
675 header-y += nfs.h
676 header-y += nfs2.h
677 header-y += nfs3.h
678 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
679 index fd0dc30..cc07d27 100644
680 --- a/include/linux/hrtimer.h
681 +++ b/include/linux/hrtimer.h
682 @@ -165,6 +165,7 @@ enum hrtimer_base_type {
683 * @lock: lock protecting the base and associated clock bases
684 * and timers
685 * @active_bases: Bitfield to mark bases with active timers
686 + * @clock_was_set: Indicates that clock was set from irq context.
687 * @expires_next: absolute time of the next event which was scheduled
688 * via clock_set_next_event()
689 * @hres_active: State of high resolution mode
690 @@ -177,7 +178,8 @@ enum hrtimer_base_type {
691 */
692 struct hrtimer_cpu_base {
693 raw_spinlock_t lock;
694 - unsigned long active_bases;
695 + unsigned int active_bases;
696 + unsigned int clock_was_set;
697 #ifdef CONFIG_HIGH_RES_TIMERS
698 ktime_t expires_next;
699 int hres_active;
700 @@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
701 # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
702 # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
703
704 +extern void clock_was_set_delayed(void);
705 +
706 #else
707
708 # define MONOTONIC_RES_NSEC LOW_RES_NSEC
709 @@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
710 {
711 return 0;
712 }
713 +
714 +static inline void clock_was_set_delayed(void) { }
715 +
716 #endif
717
718 extern void clock_was_set(void);
719 @@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
720 extern ktime_t ktime_get_real(void);
721 extern ktime_t ktime_get_boottime(void);
722 extern ktime_t ktime_get_monotonic_offset(void);
723 +extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
724
725 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
726
727 diff --git a/include/linux/sched.h b/include/linux/sched.h
728 index 81a173c..7b06169 100644
729 --- a/include/linux/sched.h
730 +++ b/include/linux/sched.h
731 @@ -1933,6 +1933,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
732 }
733 #endif
734
735 +#ifdef CONFIG_NO_HZ
736 +void calc_load_enter_idle(void);
737 +void calc_load_exit_idle(void);
738 +#else
739 +static inline void calc_load_enter_idle(void) { }
740 +static inline void calc_load_exit_idle(void) { }
741 +#endif /* CONFIG_NO_HZ */
742 +
743 #ifndef CONFIG_CPUMASK_OFFSTACK
744 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
745 {
746 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
747 index ae34bf5..6db7a5e 100644
748 --- a/kernel/hrtimer.c
749 +++ b/kernel/hrtimer.c
750 @@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
751 return 0;
752 }
753
754 +static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
755 +{
756 + ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
757 + ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
758 +
759 + return ktime_get_update_offsets(offs_real, offs_boot);
760 +}
761 +
762 /*
763 * Retrigger next event is called after clock was set
764 *
765 @@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
766 static void retrigger_next_event(void *arg)
767 {
768 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
769 - struct timespec realtime_offset, xtim, wtm, sleep;
770
771 if (!hrtimer_hres_active())
772 return;
773
774 - /* Optimized out for !HIGH_RES */
775 - get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
776 - set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
777 -
778 - /* Adjust CLOCK_REALTIME offset */
779 raw_spin_lock(&base->lock);
780 - base->clock_base[HRTIMER_BASE_REALTIME].offset =
781 - timespec_to_ktime(realtime_offset);
782 - base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
783 - timespec_to_ktime(sleep);
784 -
785 + hrtimer_update_base(base);
786 hrtimer_force_reprogram(base, 0);
787 raw_spin_unlock(&base->lock);
788 }
789 @@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
790 base->clock_base[i].resolution = KTIME_HIGH_RES;
791
792 tick_setup_sched_timer();
793 -
794 /* "Retrigger" the interrupt to get things going */
795 retrigger_next_event(NULL);
796 local_irq_restore(flags);
797 return 1;
798 }
799
800 +/*
801 + * Called from timekeeping code to reprogramm the hrtimer interrupt
802 + * device. If called from the timer interrupt context we defer it to
803 + * softirq context.
804 + */
805 +void clock_was_set_delayed(void)
806 +{
807 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
808 +
809 + cpu_base->clock_was_set = 1;
810 + __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
811 +}
812 +
813 #else
814
815 static inline int hrtimer_hres_active(void) { return 0; }
816 @@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
817 cpu_base->nr_events++;
818 dev->next_event.tv64 = KTIME_MAX;
819
820 - entry_time = now = ktime_get();
821 + raw_spin_lock(&cpu_base->lock);
822 + entry_time = now = hrtimer_update_base(cpu_base);
823 retry:
824 expires_next.tv64 = KTIME_MAX;
825 -
826 - raw_spin_lock(&cpu_base->lock);
827 /*
828 * We set expires_next to KTIME_MAX here with cpu_base->lock
829 * held to prevent that a timer is enqueued in our queue via
830 @@ -1330,8 +1339,12 @@ retry:
831 * We need to prevent that we loop forever in the hrtimer
832 * interrupt routine. We give it 3 attempts to avoid
833 * overreacting on some spurious event.
834 + *
835 + * Acquire base lock for updating the offsets and retrieving
836 + * the current time.
837 */
838 - now = ktime_get();
839 + raw_spin_lock(&cpu_base->lock);
840 + now = hrtimer_update_base(cpu_base);
841 cpu_base->nr_retries++;
842 if (++retries < 3)
843 goto retry;
844 @@ -1343,6 +1356,7 @@ retry:
845 */
846 cpu_base->nr_hangs++;
847 cpu_base->hang_detected = 1;
848 + raw_spin_unlock(&cpu_base->lock);
849 delta = ktime_sub(now, entry_time);
850 if (delta.tv64 > cpu_base->max_hang_time.tv64)
851 cpu_base->max_hang_time = delta;
852 @@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
853
854 static void run_hrtimer_softirq(struct softirq_action *h)
855 {
856 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
857 +
858 + if (cpu_base->clock_was_set) {
859 + cpu_base->clock_was_set = 0;
860 + clock_was_set();
861 + }
862 +
863 hrtimer_peek_ahead_timers();
864 }
865
866 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
867 index 2000e06..817bf70 100644
868 --- a/kernel/sched/core.c
869 +++ b/kernel/sched/core.c
870 @@ -2162,11 +2162,73 @@ unsigned long this_cpu_load(void)
871 }
872
873
874 +/*
875 + * Global load-average calculations
876 + *
877 + * We take a distributed and async approach to calculating the global load-avg
878 + * in order to minimize overhead.
879 + *
880 + * The global load average is an exponentially decaying average of nr_running +
881 + * nr_uninterruptible.
882 + *
883 + * Once every LOAD_FREQ:
884 + *
885 + * nr_active = 0;
886 + * for_each_possible_cpu(cpu)
887 + * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
888 + *
889 + * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
890 + *
891 + * Due to a number of reasons the above turns in the mess below:
892 + *
893 + * - for_each_possible_cpu() is prohibitively expensive on machines with
894 + * serious number of cpus, therefore we need to take a distributed approach
895 + * to calculating nr_active.
896 + *
897 + * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
898 + * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
899 + *
900 + * So assuming nr_active := 0 when we start out -- true per definition, we
901 + * can simply take per-cpu deltas and fold those into a global accumulate
902 + * to obtain the same result. See calc_load_fold_active().
903 + *
904 + * Furthermore, in order to avoid synchronizing all per-cpu delta folding
905 + * across the machine, we assume 10 ticks is sufficient time for every
906 + * cpu to have completed this task.
907 + *
908 + * This places an upper-bound on the IRQ-off latency of the machine. Then
909 + * again, being late doesn't loose the delta, just wrecks the sample.
910 + *
911 + * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
912 + * this would add another cross-cpu cacheline miss and atomic operation
913 + * to the wakeup path. Instead we increment on whatever cpu the task ran
914 + * when it went into uninterruptible state and decrement on whatever cpu
915 + * did the wakeup. This means that only the sum of nr_uninterruptible over
916 + * all cpus yields the correct result.
917 + *
918 + * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
919 + */
920 +
921 /* Variables and functions for calc_load */
922 static atomic_long_t calc_load_tasks;
923 static unsigned long calc_load_update;
924 unsigned long avenrun[3];
925 -EXPORT_SYMBOL(avenrun);
926 +EXPORT_SYMBOL(avenrun); /* should be removed */
927 +
928 +/**
929 + * get_avenrun - get the load average array
930 + * @loads: pointer to dest load array
931 + * @offset: offset to add
932 + * @shift: shift count to shift the result left
933 + *
934 + * These values are estimates at best, so no need for locking.
935 + */
936 +void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
937 +{
938 + loads[0] = (avenrun[0] + offset) << shift;
939 + loads[1] = (avenrun[1] + offset) << shift;
940 + loads[2] = (avenrun[2] + offset) << shift;
941 +}
942
943 static long calc_load_fold_active(struct rq *this_rq)
944 {
945 @@ -2183,6 +2245,9 @@ static long calc_load_fold_active(struct rq *this_rq)
946 return delta;
947 }
948
949 +/*
950 + * a1 = a0 * e + a * (1 - e)
951 + */
952 static unsigned long
953 calc_load(unsigned long load, unsigned long exp, unsigned long active)
954 {
955 @@ -2194,30 +2259,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
956
957 #ifdef CONFIG_NO_HZ
958 /*
959 - * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
960 + * Handle NO_HZ for the global load-average.
961 + *
962 + * Since the above described distributed algorithm to compute the global
963 + * load-average relies on per-cpu sampling from the tick, it is affected by
964 + * NO_HZ.
965 + *
966 + * The basic idea is to fold the nr_active delta into a global idle-delta upon
967 + * entering NO_HZ state such that we can include this as an 'extra' cpu delta
968 + * when we read the global state.
969 + *
970 + * Obviously reality has to ruin such a delightfully simple scheme:
971 + *
972 + * - When we go NO_HZ idle during the window, we can negate our sample
973 + * contribution, causing under-accounting.
974 + *
975 + * We avoid this by keeping two idle-delta counters and flipping them
976 + * when the window starts, thus separating old and new NO_HZ load.
977 + *
978 + * The only trick is the slight shift in index flip for read vs write.
979 + *
980 + * 0s 5s 10s 15s
981 + * +10 +10 +10 +10
982 + * |-|-----------|-|-----------|-|-----------|-|
983 + * r:0 0 1 1 0 0 1 1 0
984 + * w:0 1 1 0 0 1 1 0 0
985 + *
986 + * This ensures we'll fold the old idle contribution in this window while
987 + * accumlating the new one.
988 + *
989 + * - When we wake up from NO_HZ idle during the window, we push up our
990 + * contribution, since we effectively move our sample point to a known
991 + * busy state.
992 + *
993 + * This is solved by pushing the window forward, and thus skipping the
994 + * sample, for this cpu (effectively using the idle-delta for this cpu which
995 + * was in effect at the time the window opened). This also solves the issue
996 + * of having to deal with a cpu having been in NOHZ idle for multiple
997 + * LOAD_FREQ intervals.
998 *
999 * When making the ILB scale, we should try to pull this in as well.
1000 */
1001 -static atomic_long_t calc_load_tasks_idle;
1002 +static atomic_long_t calc_load_idle[2];
1003 +static int calc_load_idx;
1004
1005 -void calc_load_account_idle(struct rq *this_rq)
1006 +static inline int calc_load_write_idx(void)
1007 {
1008 + int idx = calc_load_idx;
1009 +
1010 + /*
1011 + * See calc_global_nohz(), if we observe the new index, we also
1012 + * need to observe the new update time.
1013 + */
1014 + smp_rmb();
1015 +
1016 + /*
1017 + * If the folding window started, make sure we start writing in the
1018 + * next idle-delta.
1019 + */
1020 + if (!time_before(jiffies, calc_load_update))
1021 + idx++;
1022 +
1023 + return idx & 1;
1024 +}
1025 +
1026 +static inline int calc_load_read_idx(void)
1027 +{
1028 + return calc_load_idx & 1;
1029 +}
1030 +
1031 +void calc_load_enter_idle(void)
1032 +{
1033 + struct rq *this_rq = this_rq();
1034 long delta;
1035
1036 + /*
1037 + * We're going into NOHZ mode, if there's any pending delta, fold it
1038 + * into the pending idle delta.
1039 + */
1040 delta = calc_load_fold_active(this_rq);
1041 - if (delta)
1042 - atomic_long_add(delta, &calc_load_tasks_idle);
1043 + if (delta) {
1044 + int idx = calc_load_write_idx();
1045 + atomic_long_add(delta, &calc_load_idle[idx]);
1046 + }
1047 }
1048
1049 -static long calc_load_fold_idle(void)
1050 +void calc_load_exit_idle(void)
1051 {
1052 - long delta = 0;
1053 + struct rq *this_rq = this_rq();
1054 +
1055 + /*
1056 + * If we're still before the sample window, we're done.
1057 + */
1058 + if (time_before(jiffies, this_rq->calc_load_update))
1059 + return;
1060
1061 /*
1062 - * Its got a race, we don't care...
1063 + * We woke inside or after the sample window, this means we're already
1064 + * accounted through the nohz accounting, so skip the entire deal and
1065 + * sync up for the next window.
1066 */
1067 - if (atomic_long_read(&calc_load_tasks_idle))
1068 - delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
1069 + this_rq->calc_load_update = calc_load_update;
1070 + if (time_before(jiffies, this_rq->calc_load_update + 10))
1071 + this_rq->calc_load_update += LOAD_FREQ;
1072 +}
1073 +
1074 +static long calc_load_fold_idle(void)
1075 +{
1076 + int idx = calc_load_read_idx();
1077 + long delta = 0;
1078 +
1079 + if (atomic_long_read(&calc_load_idle[idx]))
1080 + delta = atomic_long_xchg(&calc_load_idle[idx], 0);
1081
1082 return delta;
1083 }
1084 @@ -2303,66 +2456,39 @@ static void calc_global_nohz(void)
1085 {
1086 long delta, active, n;
1087
1088 - /*
1089 - * If we crossed a calc_load_update boundary, make sure to fold
1090 - * any pending idle changes, the respective CPUs might have
1091 - * missed the tick driven calc_load_account_active() update
1092 - * due to NO_HZ.
1093 - */
1094 - delta = calc_load_fold_idle();
1095 - if (delta)
1096 - atomic_long_add(delta, &calc_load_tasks);
1097 -
1098 - /*
1099 - * It could be the one fold was all it took, we done!
1100 - */
1101 - if (time_before(jiffies, calc_load_update + 10))
1102 - return;
1103 -
1104 - /*
1105 - * Catch-up, fold however many we are behind still
1106 - */
1107 - delta = jiffies - calc_load_update - 10;
1108 - n = 1 + (delta / LOAD_FREQ);
1109 + if (!time_before(jiffies, calc_load_update + 10)) {
1110 + /*
1111 + * Catch-up, fold however many we are behind still
1112 + */
1113 + delta = jiffies - calc_load_update - 10;
1114 + n = 1 + (delta / LOAD_FREQ);
1115
1116 - active = atomic_long_read(&calc_load_tasks);
1117 - active = active > 0 ? active * FIXED_1 : 0;
1118 + active = atomic_long_read(&calc_load_tasks);
1119 + active = active > 0 ? active * FIXED_1 : 0;
1120
1121 - avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
1122 - avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
1123 - avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
1124 + avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
1125 + avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
1126 + avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
1127
1128 - calc_load_update += n * LOAD_FREQ;
1129 -}
1130 -#else
1131 -void calc_load_account_idle(struct rq *this_rq)
1132 -{
1133 -}
1134 + calc_load_update += n * LOAD_FREQ;
1135 + }
1136
1137 -static inline long calc_load_fold_idle(void)
1138 -{
1139 - return 0;
1140 + /*
1141 + * Flip the idle index...
1142 + *
1143 + * Make sure we first write the new time then flip the index, so that
1144 + * calc_load_write_idx() will see the new time when it reads the new
1145 + * index, this avoids a double flip messing things up.
1146 + */
1147 + smp_wmb();
1148 + calc_load_idx++;
1149 }
1150 +#else /* !CONFIG_NO_HZ */
1151
1152 -static void calc_global_nohz(void)
1153 -{
1154 -}
1155 -#endif
1156 +static inline long calc_load_fold_idle(void) { return 0; }
1157 +static inline void calc_global_nohz(void) { }
1158
1159 -/**
1160 - * get_avenrun - get the load average array
1161 - * @loads: pointer to dest load array
1162 - * @offset: offset to add
1163 - * @shift: shift count to shift the result left
1164 - *
1165 - * These values are estimates at best, so no need for locking.
1166 - */
1167 -void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
1168 -{
1169 - loads[0] = (avenrun[0] + offset) << shift;
1170 - loads[1] = (avenrun[1] + offset) << shift;
1171 - loads[2] = (avenrun[2] + offset) << shift;
1172 -}
1173 +#endif /* CONFIG_NO_HZ */
1174
1175 /*
1176 * calc_load - update the avenrun load estimates 10 ticks after the
1177 @@ -2370,11 +2496,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
1178 */
1179 void calc_global_load(unsigned long ticks)
1180 {
1181 - long active;
1182 + long active, delta;
1183
1184 if (time_before(jiffies, calc_load_update + 10))
1185 return;
1186
1187 + /*
1188 + * Fold the 'old' idle-delta to include all NO_HZ cpus.
1189 + */
1190 + delta = calc_load_fold_idle();
1191 + if (delta)
1192 + atomic_long_add(delta, &calc_load_tasks);
1193 +
1194 active = atomic_long_read(&calc_load_tasks);
1195 active = active > 0 ? active * FIXED_1 : 0;
1196
1197 @@ -2385,12 +2518,7 @@ void calc_global_load(unsigned long ticks)
1198 calc_load_update += LOAD_FREQ;
1199
1200 /*
1201 - * Account one period with whatever state we found before
1202 - * folding in the nohz state and ageing the entire idle period.
1203 - *
1204 - * This avoids loosing a sample when we go idle between
1205 - * calc_load_account_active() (10 ticks ago) and now and thus
1206 - * under-accounting.
1207 + * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
1208 */
1209 calc_global_nohz();
1210 }
1211 @@ -2407,7 +2535,6 @@ static void calc_load_account_active(struct rq *this_rq)
1212 return;
1213
1214 delta = calc_load_fold_active(this_rq);
1215 - delta += calc_load_fold_idle();
1216 if (delta)
1217 atomic_long_add(delta, &calc_load_tasks);
1218
1219 @@ -2415,6 +2542,10 @@ static void calc_load_account_active(struct rq *this_rq)
1220 }
1221
1222 /*
1223 + * End of global load-average stuff
1224 + */
1225 +
1226 +/*
1227 * The exact cpuload at various idx values, calculated at every tick would be
1228 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
1229 *
1230 diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
1231 index 91b4c95..fdf7522 100644
1232 --- a/kernel/sched/idle_task.c
1233 +++ b/kernel/sched/idle_task.c
1234 @@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
1235 static struct task_struct *pick_next_task_idle(struct rq *rq)
1236 {
1237 schedstat_inc(rq, sched_goidle);
1238 - calc_load_account_idle(rq);
1239 return rq->idle;
1240 }
1241
1242 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1243 index fb3acba..116ced0 100644
1244 --- a/kernel/sched/sched.h
1245 +++ b/kernel/sched/sched.h
1246 @@ -940,8 +940,6 @@ static inline u64 sched_avg_period(void)
1247 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1248 }
1249
1250 -void calc_load_account_idle(struct rq *this_rq);
1251 -
1252 #ifdef CONFIG_SCHED_HRTICK
1253
1254 /*
1255 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1256 index 6a3a5b9..fd4e160 100644
1257 --- a/kernel/time/tick-sched.c
1258 +++ b/kernel/time/tick-sched.c
1259 @@ -401,6 +401,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
1260 */
1261 if (!ts->tick_stopped) {
1262 select_nohz_load_balancer(1);
1263 + calc_load_enter_idle();
1264
1265 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
1266 ts->tick_stopped = 1;
1267 @@ -591,6 +592,7 @@ void tick_nohz_idle_exit(void)
1268 account_idle_ticks(ticks);
1269 #endif
1270
1271 + calc_load_exit_idle();
1272 touch_softlockup_watchdog();
1273 /*
1274 * Cancel the scheduled timer and restore the tick
1275 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1276 index d42574df..7c50de8 100644
1277 --- a/kernel/time/timekeeping.c
1278 +++ b/kernel/time/timekeeping.c
1279 @@ -70,6 +70,12 @@ struct timekeeper {
1280 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
1281 struct timespec raw_time;
1282
1283 + /* Offset clock monotonic -> clock realtime */
1284 + ktime_t offs_real;
1285 +
1286 + /* Offset clock monotonic -> clock boottime */
1287 + ktime_t offs_boot;
1288 +
1289 /* Seqlock for all timekeeper values */
1290 seqlock_t lock;
1291 };
1292 @@ -172,6 +178,14 @@ static inline s64 timekeeping_get_ns_raw(void)
1293 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
1294 }
1295
1296 +static void update_rt_offset(void)
1297 +{
1298 + struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic;
1299 +
1300 + set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
1301 + timekeeper.offs_real = timespec_to_ktime(tmp);
1302 +}
1303 +
1304 /* must hold write on timekeeper.lock */
1305 static void timekeeping_update(bool clearntp)
1306 {
1307 @@ -179,6 +193,7 @@ static void timekeeping_update(bool clearntp)
1308 timekeeper.ntp_error = 0;
1309 ntp_clear();
1310 }
1311 + update_rt_offset();
1312 update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
1313 timekeeper.clock, timekeeper.mult);
1314 }
1315 @@ -606,6 +621,7 @@ void __init timekeeping_init(void)
1316 }
1317 set_normalized_timespec(&timekeeper.wall_to_monotonic,
1318 -boot.tv_sec, -boot.tv_nsec);
1319 + update_rt_offset();
1320 timekeeper.total_sleep_time.tv_sec = 0;
1321 timekeeper.total_sleep_time.tv_nsec = 0;
1322 write_sequnlock_irqrestore(&timekeeper.lock, flags);
1323 @@ -614,6 +630,12 @@ void __init timekeeping_init(void)
1324 /* time in seconds when suspend began */
1325 static struct timespec timekeeping_suspend_time;
1326
1327 +static void update_sleep_time(struct timespec t)
1328 +{
1329 + timekeeper.total_sleep_time = t;
1330 + timekeeper.offs_boot = timespec_to_ktime(t);
1331 +}
1332 +
1333 /**
1334 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1335 * @delta: pointer to a timespec delta value
1336 @@ -632,8 +654,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
1337 timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
1338 timekeeper.wall_to_monotonic =
1339 timespec_sub(timekeeper.wall_to_monotonic, *delta);
1340 - timekeeper.total_sleep_time = timespec_add(
1341 - timekeeper.total_sleep_time, *delta);
1342 + update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta));
1343 }
1344
1345
1346 @@ -698,6 +719,7 @@ static void timekeeping_resume(void)
1347 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
1348 timekeeper.ntp_error = 0;
1349 timekeeping_suspended = 0;
1350 + timekeeping_update(false);
1351 write_sequnlock_irqrestore(&timekeeper.lock, flags);
1352
1353 touch_softlockup_watchdog();
1354 @@ -965,6 +987,8 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
1355 leap = second_overflow(timekeeper.xtime.tv_sec);
1356 timekeeper.xtime.tv_sec += leap;
1357 timekeeper.wall_to_monotonic.tv_sec -= leap;
1358 + if (leap)
1359 + clock_was_set_delayed();
1360 }
1361
1362 /* Accumulate raw time */
1363 @@ -1081,6 +1105,8 @@ static void update_wall_time(void)
1364 leap = second_overflow(timekeeper.xtime.tv_sec);
1365 timekeeper.xtime.tv_sec += leap;
1366 timekeeper.wall_to_monotonic.tv_sec -= leap;
1367 + if (leap)
1368 + clock_was_set_delayed();
1369 }
1370
1371 timekeeping_update(false);
1372 @@ -1248,6 +1274,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1373 } while (read_seqretry(&timekeeper.lock, seq));
1374 }
1375
1376 +#ifdef CONFIG_HIGH_RES_TIMERS
1377 +/**
1378 + * ktime_get_update_offsets - hrtimer helper
1379 + * @offs_real: pointer to storage for monotonic -> realtime offset
1380 + * @offs_boot: pointer to storage for monotonic -> boottime offset
1381 + *
1382 + * Returns current monotonic time and updates the offsets
1383 + * Called from hrtimer_interupt() or retrigger_next_event()
1384 + */
1385 +ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1386 +{
1387 + ktime_t now;
1388 + unsigned int seq;
1389 + u64 secs, nsecs;
1390 +
1391 + do {
1392 + seq = read_seqbegin(&timekeeper.lock);
1393 +
1394 + secs = timekeeper.xtime.tv_sec;
1395 + nsecs = timekeeper.xtime.tv_nsec;
1396 + nsecs += timekeeping_get_ns();
1397 + /* If arch requires, add in gettimeoffset() */
1398 + nsecs += arch_gettimeoffset();
1399 +
1400 + *offs_real = timekeeper.offs_real;
1401 + *offs_boot = timekeeper.offs_boot;
1402 + } while (read_seqretry(&timekeeper.lock, seq));
1403 +
1404 + now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1405 + now = ktime_sub(now, *offs_real);
1406 + return now;
1407 +}
1408 +#endif
1409 +
1410 /**
1411 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1412 */
1413 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
1414 index 1197e8d..d132b98 100644
1415 --- a/net/mac80211/mlme.c
1416 +++ b/net/mac80211/mlme.c
1417 @@ -2183,15 +2183,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1418 sdata->name, mgmt->sa, status_code);
1419 ieee80211_destroy_assoc_data(sdata, false);
1420 } else {
1421 - printk(KERN_DEBUG "%s: associated\n", sdata->name);
1422 -
1423 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
1424 /* oops -- internal error -- send timeout for now */
1425 - ieee80211_destroy_assoc_data(sdata, true);
1426 - sta_info_destroy_addr(sdata, mgmt->bssid);
1427 + ieee80211_destroy_assoc_data(sdata, false);
1428 cfg80211_put_bss(*bss);
1429 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
1430 }
1431 + printk(KERN_DEBUG "%s: associated\n", sdata->name);
1432
1433 /*
1434 * destroy assoc_data afterwards, as otherwise an idle
1435 diff --git a/net/wireless/util.c b/net/wireless/util.c
1436 index b5b6890..0eb6cc0 100644
1437 --- a/net/wireless/util.c
1438 +++ b/net/wireless/util.c
1439 @@ -805,7 +805,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
1440 ntype == NL80211_IFTYPE_P2P_CLIENT))
1441 return -EBUSY;
1442
1443 - if (ntype != otype) {
1444 + if (ntype != otype && netif_running(dev)) {
1445 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
1446 ntype);
1447 if (err)