Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.27-r3/0112-2.6.27.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1176 - (show annotations) (download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 7 months ago) by niro
File size: 56839 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
2 index b117e42..90f718c 100644
3 --- a/Documentation/sound/alsa/ALSA-Configuration.txt
4 +++ b/Documentation/sound/alsa/ALSA-Configuration.txt
5 @@ -960,9 +960,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
6 6stack 6-jack, separate surrounds (default)
7 3stack 3-stack, shared surrounds
8 laptop 2-channel only (FSC V2060, Samsung M50)
9 - laptop-eapd 2-channel with EAPD (Samsung R65, ASUS A6J)
10 + laptop-eapd 2-channel with EAPD (ASUS A6J)
11 laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
12 ultra 2-channel with EAPD (Samsung Ultra tablet PC)
13 + samsung 2-channel with EAPD (Samsung R65)
14
15 AD1988/AD1988B/AD1989A/AD1989B
16 6stack 6-jack
17 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
18 index 48e496f..fb7e69c 100644
19 --- a/arch/ia64/Kconfig
20 +++ b/arch/ia64/Kconfig
21 @@ -15,6 +15,7 @@ config IA64
22 select ACPI if (!IA64_HP_SIM)
23 select PM if (!IA64_HP_SIM)
24 select ARCH_SUPPORTS_MSI
25 + select HAVE_UNSTABLE_SCHED_CLOCK
26 select HAVE_IDE
27 select HAVE_OPROFILE
28 select HAVE_KPROBES
29 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
30 index db44e02..ba51948 100644
31 --- a/arch/powerpc/mm/slice.c
32 +++ b/arch/powerpc/mm/slice.c
33 @@ -710,9 +710,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
34 unsigned long len)
35 {
36 struct slice_mask mask, available;
37 + unsigned int psize = mm->context.user_psize;
38
39 mask = slice_range_to_mask(addr, len);
40 - available = slice_mask_for_size(mm, mm->context.user_psize);
41 + available = slice_mask_for_size(mm, psize);
42 +#ifdef CONFIG_PPC_64K_PAGES
43 + /* We need to account for 4k slices too */
44 + if (psize == MMU_PAGE_64K) {
45 + struct slice_mask compat_mask;
46 + compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
47 + or_mask(available, compat_mask);
48 + }
49 +#endif
50
51 #if 0 /* too verbose */
52 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
53 diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
54 index 13946eb..b4704e1 100644
55 --- a/drivers/firmware/dell_rbu.c
56 +++ b/drivers/firmware/dell_rbu.c
57 @@ -576,7 +576,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
58 {
59 int size = 0;
60 if (!pos)
61 - size = sprintf(buffer, "%s\n", image_type);
62 + size = scnprintf(buffer, count, "%s\n", image_type);
63 return size;
64 }
65
66 @@ -648,7 +648,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
67 int size = 0;
68 if (!pos) {
69 spin_lock(&rbu_data.lock);
70 - size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
71 + size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
72 spin_unlock(&rbu_data.lock);
73 }
74 return size;
75 diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
76 index d9e7a49..58a5efb 100644
77 --- a/drivers/hwmon/abituguru3.c
78 +++ b/drivers/hwmon/abituguru3.c
79 @@ -1153,7 +1153,7 @@ static int __init abituguru3_dmi_detect(void)
80
81 static inline int abituguru3_dmi_detect(void)
82 {
83 - return -ENODEV;
84 + return 1;
85 }
86
87 #endif /* CONFIG_DMI */
88 diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
89 index c54eff9..bfc2961 100644
90 --- a/drivers/hwmon/hwmon-vid.c
91 +++ b/drivers/hwmon/hwmon-vid.c
92 @@ -180,6 +180,7 @@ static struct vrm_model vrm_models[] = {
93 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
94 {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
95 {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
96 + {X86_VENDOR_AMD, 0x10, ANY, ANY, 25}, /* NPT family 10h */
97 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
98 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
99 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
100 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
101 index b4882cc..d32c1ee 100644
102 --- a/drivers/misc/sgi-xp/xpc_sn2.c
103 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
104 @@ -904,7 +904,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
105 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
106 part_sn2->remote_vars_pa);
107
108 - part->last_heartbeat = remote_vars->heartbeat;
109 + part->last_heartbeat = remote_vars->heartbeat - 1;
110 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
111 part->last_heartbeat);
112
113 diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
114 index b5d6b9a..b427978 100644
115 --- a/drivers/net/irda/irda-usb.c
116 +++ b/drivers/net/irda/irda-usb.c
117 @@ -1075,7 +1075,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
118 {
119 unsigned int i;
120 int ret;
121 - char stir421x_fw_name[11];
122 + char stir421x_fw_name[12];
123 const struct firmware *fw;
124 const unsigned char *fw_version_ptr; /* pointer to version string */
125 unsigned long fw_version = 0;
126 diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
127 index 5d86281..c71982d 100644
128 --- a/drivers/net/r6040.c
129 +++ b/drivers/net/r6040.c
130 @@ -49,8 +49,8 @@
131 #include <asm/processor.h>
132
133 #define DRV_NAME "r6040"
134 -#define DRV_VERSION "0.18"
135 -#define DRV_RELDATE "13Jul2008"
136 +#define DRV_VERSION "0.19"
137 +#define DRV_RELDATE "18Dec2008"
138
139 /* PHY CHIP Address */
140 #define PHY1_ADDR 1 /* For MAC1 */
141 @@ -214,7 +214,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
142 /* Wait for the read bit to be cleared */
143 while (limit--) {
144 cmd = ioread16(ioaddr + MMDIO);
145 - if (cmd & MDIO_READ)
146 + if (!(cmd & MDIO_READ))
147 break;
148 }
149
150 @@ -233,7 +233,7 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
151 /* Wait for the write bit to be cleared */
152 while (limit--) {
153 cmd = ioread16(ioaddr + MMDIO);
154 - if (cmd & MDIO_WRITE)
155 + if (!(cmd & MDIO_WRITE))
156 break;
157 }
158 }
159 @@ -681,8 +681,10 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
160 struct net_device *dev = dev_id;
161 struct r6040_private *lp = netdev_priv(dev);
162 void __iomem *ioaddr = lp->base;
163 - u16 status;
164 + u16 misr, status;
165
166 + /* Save MIER */
167 + misr = ioread16(ioaddr + MIER);
168 /* Mask off RDC MAC interrupt */
169 iowrite16(MSK_INT, ioaddr + MIER);
170 /* Read MISR status and clear */
171 @@ -702,7 +704,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
172 dev->stats.rx_fifo_errors++;
173
174 /* Mask off RX interrupt */
175 - iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
176 + misr &= ~RX_INTS;
177 netif_rx_schedule(dev, &lp->napi);
178 }
179
180 @@ -710,6 +712,9 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
181 if (status & TX_INTS)
182 r6040_tx(dev);
183
184 + /* Restore RDC MAC interrupt */
185 + iowrite16(misr, ioaddr + MIER);
186 +
187 return IRQ_HANDLED;
188 }
189
190 diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
191 index 6dbfed0..69120b5 100644
192 --- a/drivers/net/wireless/ath9k/hw.c
193 +++ b/drivers/net/wireless/ath9k/hw.c
194 @@ -729,7 +729,7 @@ ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
195 AR_AN_TOP2_LOCALBIAS,
196 AR_AN_TOP2_LOCALBIAS_S,
197 pModal->local_bias);
198 - DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
199 + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
200 pModal->force_xpaon);
201 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
202 pModal->force_xpaon);
203 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
204 index cbaca23..bfff6b5 100644
205 --- a/drivers/net/wireless/p54/p54usb.c
206 +++ b/drivers/net/wireless/p54/p54usb.c
207 @@ -53,6 +53,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
208 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
209 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
210 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
211 + {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
212 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
213 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
214 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
215 diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
216 index 9761eaa..45bf8f7 100644
217 --- a/drivers/net/wireless/rt2x00/rt73usb.c
218 +++ b/drivers/net/wireless/rt2x00/rt73usb.c
219 @@ -2113,6 +2113,7 @@ static struct usb_device_id rt73usb_device_table[] = {
220 /* Linksys */
221 { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
222 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
223 + { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
224 /* MSI */
225 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
226 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
227 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
228 index aa6fda1..8a82a62 100644
229 --- a/drivers/pci/pcie/aspm.c
230 +++ b/drivers/pci/pcie/aspm.c
231 @@ -33,6 +33,11 @@ struct endpoint_state {
232 struct pcie_link_state {
233 struct list_head sibiling;
234 struct pci_dev *pdev;
235 + bool downstream_has_switch;
236 +
237 + struct pcie_link_state *parent;
238 + struct list_head children;
239 + struct list_head link;
240
241 /* ASPM state */
242 unsigned int support_state;
243 @@ -125,7 +130,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
244 link_state->clk_pm_enabled = !!enable;
245 }
246
247 -static void pcie_check_clock_pm(struct pci_dev *pdev)
248 +static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
249 {
250 int pos;
251 u32 reg32;
252 @@ -149,10 +154,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
253 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
254 enabled = 0;
255 }
256 - link_state->clk_pm_capable = capable;
257 link_state->clk_pm_enabled = enabled;
258 link_state->bios_clk_state = enabled;
259 - pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
260 + if (!blacklist) {
261 + link_state->clk_pm_capable = capable;
262 + pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
263 + } else {
264 + link_state->clk_pm_capable = 0;
265 + pcie_set_clock_pm(pdev, 0);
266 + }
267 +}
268 +
269 +static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
270 +{
271 + struct pci_dev *child_dev;
272 +
273 + list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
274 + if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
275 + return true;
276 + }
277 + return false;
278 }
279
280 /*
281 @@ -419,9 +440,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
282 {
283 struct pci_dev *child_dev;
284
285 - /* If no child, disable the link */
286 + /* If no child, ignore the link */
287 if (list_empty(&pdev->subordinate->devices))
288 - return 0;
289 + return state;
290 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
291 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
292 /*
293 @@ -462,6 +483,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
294 int valid = 1;
295 struct pcie_link_state *link_state = pdev->link_state;
296
297 + /* If no child, disable the link */
298 + if (list_empty(&pdev->subordinate->devices))
299 + state = 0;
300 /*
301 * if the downstream component has pci bridge function, don't do ASPM
302 * now
303 @@ -493,20 +517,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
304 link_state->enabled_state = state;
305 }
306
307 +static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
308 +{
309 + struct pcie_link_state *root_port_link = link;
310 + while (root_port_link->parent)
311 + root_port_link = root_port_link->parent;
312 + return root_port_link;
313 +}
314 +
315 +/* check the whole hierarchy, and configure each link in the hierarchy */
316 static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
317 unsigned int state)
318 {
319 struct pcie_link_state *link_state = pdev->link_state;
320 + struct pcie_link_state *root_port_link = get_root_port_link(link_state);
321 + struct pcie_link_state *leaf;
322
323 - if (link_state->support_state == 0)
324 - return;
325 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
326
327 - /* state 0 means disabling aspm */
328 - state = pcie_aspm_check_state(pdev, state);
329 + /* check all links who have specific root port link */
330 + list_for_each_entry(leaf, &link_list, sibiling) {
331 + if (!list_empty(&leaf->children) ||
332 + get_root_port_link(leaf) != root_port_link)
333 + continue;
334 + state = pcie_aspm_check_state(leaf->pdev, state);
335 + }
336 + /* check root port link too in case it hasn't children */
337 + state = pcie_aspm_check_state(root_port_link->pdev, state);
338 +
339 if (link_state->enabled_state == state)
340 return;
341 - __pcie_aspm_config_link(pdev, state);
342 +
343 + /*
344 + * we must change the hierarchy. See comments in
345 + * __pcie_aspm_config_link for the order
346 + **/
347 + if (state & PCIE_LINK_STATE_L1) {
348 + list_for_each_entry(leaf, &link_list, sibiling) {
349 + if (get_root_port_link(leaf) == root_port_link)
350 + __pcie_aspm_config_link(leaf->pdev, state);
351 + }
352 + } else {
353 + list_for_each_entry_reverse(leaf, &link_list, sibiling) {
354 + if (get_root_port_link(leaf) == root_port_link)
355 + __pcie_aspm_config_link(leaf->pdev, state);
356 + }
357 + }
358 }
359
360 /*
361 @@ -570,6 +626,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
362 unsigned int state;
363 struct pcie_link_state *link_state;
364 int error = 0;
365 + int blacklist;
366
367 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
368 return;
369 @@ -580,29 +637,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
370 if (list_empty(&pdev->subordinate->devices))
371 goto out;
372
373 - if (pcie_aspm_sanity_check(pdev))
374 - goto out;
375 + blacklist = !!pcie_aspm_sanity_check(pdev);
376
377 mutex_lock(&aspm_lock);
378
379 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
380 if (!link_state)
381 goto unlock_out;
382 - pdev->link_state = link_state;
383
384 - pcie_aspm_configure_common_clock(pdev);
385 + link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
386 + INIT_LIST_HEAD(&link_state->children);
387 + INIT_LIST_HEAD(&link_state->link);
388 + if (pdev->bus->self) {/* this is a switch */
389 + struct pcie_link_state *parent_link_state;
390
391 - pcie_aspm_cap_init(pdev);
392 + parent_link_state = pdev->bus->parent->self->link_state;
393 + if (!parent_link_state) {
394 + kfree(link_state);
395 + goto unlock_out;
396 + }
397 + list_add(&link_state->link, &parent_link_state->children);
398 + link_state->parent = parent_link_state;
399 + }
400
401 - /* config link state to avoid BIOS error */
402 - state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
403 - __pcie_aspm_config_link(pdev, state);
404 + pdev->link_state = link_state;
405
406 - pcie_check_clock_pm(pdev);
407 + if (!blacklist) {
408 + pcie_aspm_configure_common_clock(pdev);
409 + pcie_aspm_cap_init(pdev);
410 + } else {
411 + link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
412 + link_state->bios_aspm_state = 0;
413 + /* Set support state to 0, so we will disable ASPM later */
414 + link_state->support_state = 0;
415 + }
416
417 link_state->pdev = pdev;
418 list_add(&link_state->sibiling, &link_list);
419
420 + if (link_state->downstream_has_switch) {
421 + /*
422 + * If link has switch, delay the link config. The leaf link
423 + * initialization will config the whole hierarchy. but we must
424 + * make sure BIOS doesn't set unsupported link state
425 + **/
426 + state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
427 + __pcie_aspm_config_link(pdev, state);
428 + } else
429 + __pcie_aspm_configure_link_state(pdev,
430 + policy_to_aspm_state(pdev));
431 +
432 + pcie_check_clock_pm(pdev, blacklist);
433 +
434 unlock_out:
435 if (error)
436 free_link_state(pdev);
437 @@ -635,6 +721,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
438 /* All functions are removed, so just disable ASPM for the link */
439 __pcie_aspm_config_one_dev(parent, 0);
440 list_del(&link_state->sibiling);
441 + list_del(&link_state->link);
442 /* Clock PM is for endpoint device */
443
444 free_link_state(parent);
445 diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
446 index d617e8a..f970b27 100644
447 --- a/drivers/usb/storage/libusual.c
448 +++ b/drivers/usb/storage/libusual.c
449 @@ -46,6 +46,12 @@ static int usu_probe_thread(void *arg);
450 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
451 .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
452
453 +#define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
454 + vendorName, productName, useProtocol, useTransport, \
455 + initFunction, flags) \
456 +{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
457 + .driver_info = (flags) }
458 +
459 #define USUAL_DEV(useProto, useTrans, useType) \
460 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
461 .driver_info = ((useType)<<24) }
462 @@ -57,6 +63,7 @@ struct usb_device_id storage_usb_ids [] = {
463
464 #undef USUAL_DEV
465 #undef UNUSUAL_DEV
466 +#undef COMPLIANT_DEV
467
468 MODULE_DEVICE_TABLE(usb, storage_usb_ids);
469 EXPORT_SYMBOL_GPL(storage_usb_ids);
470 diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
471 index 09779f6..620c2b5 100644
472 --- a/drivers/usb/storage/scsiglue.c
473 +++ b/drivers/usb/storage/scsiglue.c
474 @@ -59,6 +59,13 @@
475 #include "transport.h"
476 #include "protocol.h"
477
478 +/* Vendor IDs for companies that seem to include the READ CAPACITY bug
479 + * in all their devices
480 + */
481 +#define VENDOR_ID_NOKIA 0x0421
482 +#define VENDOR_ID_NIKON 0x04b0
483 +#define VENDOR_ID_MOTOROLA 0x22b8
484 +
485 /***********************************************************************
486 * Host functions
487 ***********************************************************************/
488 @@ -134,6 +141,22 @@ static int slave_configure(struct scsi_device *sdev)
489 * settings can't be overridden via the scsi devinfo mechanism. */
490 if (sdev->type == TYPE_DISK) {
491
492 + /* Some vendors seem to put the READ CAPACITY bug into
493 + * all their devices -- primarily makers of cell phones
494 + * and digital cameras. Since these devices always use
495 + * flash media and can be expected to have an even number
496 + * of sectors, we will always enable the CAPACITY_HEURISTICS
497 + * flag unless told otherwise. */
498 + switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
499 + case VENDOR_ID_NOKIA:
500 + case VENDOR_ID_NIKON:
501 + case VENDOR_ID_MOTOROLA:
502 + if (!(us->fflags & (US_FL_FIX_CAPACITY |
503 + US_FL_CAPACITY_OK)))
504 + us->fflags |= US_FL_CAPACITY_HEURISTICS;
505 + break;
506 + }
507 +
508 /* Disk-type devices use MODE SENSE(6) if the protocol
509 * (SubClass) is Transparent SCSI, otherwise they use
510 * MODE SENSE(10). */
511 @@ -196,6 +219,14 @@ static int slave_configure(struct scsi_device *sdev)
512 * sector in a larger then 1 sector read, since the performance
513 * impact is negible we set this flag for all USB disks */
514 sdev->last_sector_bug = 1;
515 +
516 + /* Enable last-sector hacks for single-target devices using
517 + * the Bulk-only transport, unless we already know the
518 + * capacity will be decremented or is correct. */
519 + if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
520 + US_FL_SCM_MULT_TARG)) &&
521 + us->protocol == US_PR_BULK)
522 + us->use_last_sector_hacks = 1;
523 } else {
524
525 /* Non-disk-type devices don't need to blacklist any pages
526 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
527 index 3523a0b..861e308 100644
528 --- a/drivers/usb/storage/transport.c
529 +++ b/drivers/usb/storage/transport.c
530 @@ -57,6 +57,9 @@
531 #include "scsiglue.h"
532 #include "debug.h"
533
534 +#include <linux/blkdev.h>
535 +#include "../../scsi/sd.h"
536 +
537
538 /***********************************************************************
539 * Data transfer routines
540 @@ -511,6 +514,80 @@ int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
541 * Transport routines
542 ***********************************************************************/
543
544 +/* There are so many devices that report the capacity incorrectly,
545 + * this routine was written to counteract some of the resulting
546 + * problems.
547 + */
548 +static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
549 +{
550 + struct gendisk *disk;
551 + struct scsi_disk *sdkp;
552 + u32 sector;
553 +
554 + /* To Report "Medium Error: Record Not Found */
555 + static unsigned char record_not_found[18] = {
556 + [0] = 0x70, /* current error */
557 + [2] = MEDIUM_ERROR, /* = 0x03 */
558 + [7] = 0x0a, /* additional length */
559 + [12] = 0x14 /* Record Not Found */
560 + };
561 +
562 + /* If last-sector problems can't occur, whether because the
563 + * capacity was already decremented or because the device is
564 + * known to report the correct capacity, then we don't need
565 + * to do anything.
566 + */
567 + if (!us->use_last_sector_hacks)
568 + return;
569 +
570 + /* Was this command a READ(10) or a WRITE(10)? */
571 + if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
572 + goto done;
573 +
574 + /* Did this command access the last sector? */
575 + sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
576 + (srb->cmnd[4] << 8) | (srb->cmnd[5]);
577 + disk = srb->request->rq_disk;
578 + if (!disk)
579 + goto done;
580 + sdkp = scsi_disk(disk);
581 + if (!sdkp)
582 + goto done;
583 + if (sector + 1 != sdkp->capacity)
584 + goto done;
585 +
586 + if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
587 +
588 + /* The command succeeded. We know this device doesn't
589 + * have the last-sector bug, so stop checking it.
590 + */
591 + us->use_last_sector_hacks = 0;
592 +
593 + } else {
594 + /* The command failed. Allow up to 3 retries in case this
595 + * is some normal sort of failure. After that, assume the
596 + * capacity is wrong and we're trying to access the sector
597 + * beyond the end. Replace the result code and sense data
598 + * with values that will cause the SCSI core to fail the
599 + * command immediately, instead of going into an infinite
600 + * (or even just a very long) retry loop.
601 + */
602 + if (++us->last_sector_retries < 3)
603 + return;
604 + srb->result = SAM_STAT_CHECK_CONDITION;
605 + memcpy(srb->sense_buffer, record_not_found,
606 + sizeof(record_not_found));
607 + }
608 +
609 + done:
610 + /* Don't reset the retry counter for TEST UNIT READY commands,
611 + * because they get issued after device resets which might be
612 + * caused by a failed last-sector access.
613 + */
614 + if (srb->cmnd[0] != TEST_UNIT_READY)
615 + us->last_sector_retries = 0;
616 +}
617 +
618 /* Invoke the transport and basic error-handling/recovery methods
619 *
620 * This is used by the protocol layers to actually send the message to
621 @@ -544,6 +621,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
622 /* if the transport provided its own sense data, don't auto-sense */
623 if (result == USB_STOR_TRANSPORT_NO_SENSE) {
624 srb->result = SAM_STAT_CHECK_CONDITION;
625 + last_sector_hacks(us, srb);
626 return;
627 }
628
629 @@ -667,6 +745,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
630 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
631 srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
632
633 + last_sector_hacks(us, srb);
634 return;
635
636 /* Error and abort processing: try to resynchronize with the device
637 @@ -694,6 +773,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
638 us->transport_reset(us);
639 }
640 clear_bit(US_FLIDX_RESETTING, &us->dflags);
641 + last_sector_hacks(us, srb);
642 }
643
644 /* Stop the current URB transfer */
645 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
646 index 476da5d..6fcb6d1 100644
647 --- a/drivers/usb/storage/unusual_devs.h
648 +++ b/drivers/usb/storage/unusual_devs.h
649 @@ -27,7 +27,8 @@
650
651 /* IMPORTANT NOTE: This file must be included in another file which does
652 * the following thing for it to work:
653 - * The macro UNUSUAL_DEV() must be defined before this file is included
654 + * The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
655 + * before this file is included.
656 */
657
658 /* If you edit this file, please try to keep it sorted first by VendorID,
659 @@ -46,6 +47,12 @@
660 * <usb-storage@lists.one-eyed-alien.net>
661 */
662
663 +/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
664 + * use the COMPLIANT_DEV macro instead of UNUSUAL_DEV. This is
665 + * because such entries mark devices which actually work correctly,
666 + * as opposed to devices that do something strangely or wrongly.
667 + */
668 +
669 /* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
670 */
671 UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
672 @@ -160,20 +167,6 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592,
673 US_SC_DEVICE, US_PR_DEVICE, NULL,
674 US_FL_MAX_SECTORS_64 ),
675
676 -/* Reported by Filip Joelsson <filip@blueturtle.nu> */
677 -UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
678 - "Nokia",
679 - "Nokia 3110c",
680 - US_SC_DEVICE, US_PR_DEVICE, NULL,
681 - US_FL_FIX_CAPACITY ),
682 -
683 -/* Patch for Nokia 5310 capacity */
684 -UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701,
685 - "Nokia",
686 - "5310",
687 - US_SC_DEVICE, US_PR_DEVICE, NULL,
688 - US_FL_FIX_CAPACITY ),
689 -
690 /* Reported by Mario Rettig <mariorettig@web.de> */
691 UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
692 "Nokia",
693 @@ -239,56 +232,6 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
694 US_SC_DEVICE, US_PR_DEVICE, NULL,
695 US_FL_MAX_SECTORS_64 ),
696
697 -/* Reported by Cedric Godin <cedric@belbone.be> */
698 -UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
699 - "Nokia",
700 - "5300",
701 - US_SC_DEVICE, US_PR_DEVICE, NULL,
702 - US_FL_FIX_CAPACITY ),
703 -
704 -/* Reported by Paulo Fessel <pfessel@gmail.com> */
705 -UNUSUAL_DEV( 0x0421, 0x04bd, 0x0000, 0x9999,
706 - "Nokia",
707 - "5200",
708 - US_SC_DEVICE, US_PR_DEVICE, NULL,
709 - US_FL_FIX_CAPACITY ),
710 -
711 -/* Reported by Richard Nauber <RichardNauber@web.de> */
712 -UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
713 - "Nokia",
714 - "6300",
715 - US_SC_DEVICE, US_PR_DEVICE, NULL,
716 - US_FL_FIX_CAPACITY ),
717 -
718 -/* Reported by Ozan Sener <themgzzy@gmail.com> */
719 -UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551,
720 - "Nokia",
721 - "3500c",
722 - US_SC_DEVICE, US_PR_DEVICE, NULL,
723 - US_FL_FIX_CAPACITY ),
724 -
725 -/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
726 -UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601,
727 - "Nokia",
728 - "Nokia 3109c",
729 - US_SC_DEVICE, US_PR_DEVICE, NULL,
730 - US_FL_FIX_CAPACITY ),
731 -
732 -/* Patch for Nokia 5310 capacity */
733 -UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
734 - "Nokia",
735 - "5310",
736 - US_SC_DEVICE, US_PR_DEVICE, NULL,
737 - US_FL_FIX_CAPACITY ),
738 -
739 -/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@gmail.com> */
740 -/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
741 -UNUSUAL_DEV( 0x0421, 0x00f5, 0x0000, 0x0470,
742 - "Nokia",
743 - "7610 Supernova",
744 - US_SC_DEVICE, US_PR_DEVICE, NULL,
745 - US_FL_FIX_CAPACITY ),
746 -
747 /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
748 UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
749 "SMSC",
750 @@ -692,6 +635,13 @@ UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
751 US_SC_8070, US_PR_DEVICE, NULL,
752 US_FL_FIX_INQUIRY ),
753
754 +/* Added by Alan Stern <stern@rowland.harvard.edu> */
755 +COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
756 + "Linux",
757 + "File-backed Storage Gadget",
758 + US_SC_DEVICE, US_PR_DEVICE, NULL,
759 + US_FL_CAPACITY_OK ),
760 +
761 /* Yakumo Mega Image 37
762 * Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
763 UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
764 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
765 index 27016fd..ceb8ac3 100644
766 --- a/drivers/usb/storage/usb.c
767 +++ b/drivers/usb/storage/usb.c
768 @@ -126,6 +126,8 @@ MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
769 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
770 .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
771
772 +#define COMPLIANT_DEV UNUSUAL_DEV
773 +
774 #define USUAL_DEV(useProto, useTrans, useType) \
775 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
776 .driver_info = (USB_US_TYPE_STOR<<24) }
777 @@ -134,6 +136,7 @@ static struct usb_device_id storage_usb_ids [] = {
778
779 # include "unusual_devs.h"
780 #undef UNUSUAL_DEV
781 +#undef COMPLIANT_DEV
782 #undef USUAL_DEV
783 /* Terminating entry */
784 { }
785 @@ -164,6 +167,8 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
786 .initFunction = init_function, \
787 }
788
789 +#define COMPLIANT_DEV UNUSUAL_DEV
790 +
791 #define USUAL_DEV(use_protocol, use_transport, use_type) \
792 { \
793 .useProtocol = use_protocol, \
794 @@ -173,6 +178,7 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
795 static struct us_unusual_dev us_unusual_dev_list[] = {
796 # include "unusual_devs.h"
797 # undef UNUSUAL_DEV
798 +# undef COMPLIANT_DEV
799 # undef USUAL_DEV
800
801 /* Terminating entry */
802 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
803 index a4ad73b..2e995c9 100644
804 --- a/drivers/usb/storage/usb.h
805 +++ b/drivers/usb/storage/usb.h
806 @@ -155,6 +155,10 @@ struct us_data {
807 #ifdef CONFIG_PM
808 pm_hook suspend_resume_hook;
809 #endif
810 +
811 + /* hacks for READ CAPACITY bug handling */
812 + int use_last_sector_hacks;
813 + int last_sector_retries;
814 };
815
816 /* Convert between us_data and the corresponding Scsi_Host */
817 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
818 index 25adfc3..c8616a0 100644
819 --- a/fs/fs-writeback.c
820 +++ b/fs/fs-writeback.c
821 @@ -421,9 +421,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
822 * If we're a pdlfush thread, then implement pdflush collision avoidance
823 * against the entire list.
824 *
825 - * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
826 - * that it can be located for waiting on in __writeback_single_inode().
827 - *
828 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
829 * This function assumes that the blockdev superblock's inodes are backed by
830 * a variety of queues, so all inodes are searched. For other superblocks,
831 @@ -443,6 +440,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
832 struct writeback_control *wbc)
833 {
834 const unsigned long start = jiffies; /* livelock avoidance */
835 + int sync = wbc->sync_mode == WB_SYNC_ALL;
836
837 spin_lock(&inode_lock);
838 if (!wbc->for_kupdate || list_empty(&sb->s_io))
839 @@ -499,10 +497,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
840 __iget(inode);
841 pages_skipped = wbc->pages_skipped;
842 __writeback_single_inode(inode, wbc);
843 - if (wbc->sync_mode == WB_SYNC_HOLD) {
844 - inode->dirtied_when = jiffies;
845 - list_move(&inode->i_list, &sb->s_dirty);
846 - }
847 if (current_is_pdflush())
848 writeback_release(bdi);
849 if (wbc->pages_skipped != pages_skipped) {
850 @@ -523,7 +517,49 @@ void generic_sync_sb_inodes(struct super_block *sb,
851 if (!list_empty(&sb->s_more_io))
852 wbc->more_io = 1;
853 }
854 - spin_unlock(&inode_lock);
855 +
856 + if (sync) {
857 + struct inode *inode, *old_inode = NULL;
858 +
859 + /*
860 + * Data integrity sync. Must wait for all pages under writeback,
861 + * because there may have been pages dirtied before our sync
862 + * call, but which had writeout started before we write it out.
863 + * In which case, the inode may not be on the dirty list, but
864 + * we still have to wait for that writeout.
865 + */
866 + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
867 + struct address_space *mapping;
868 +
869 + if (inode->i_state & (I_FREEING|I_WILL_FREE))
870 + continue;
871 + mapping = inode->i_mapping;
872 + if (mapping->nrpages == 0)
873 + continue;
874 + __iget(inode);
875 + spin_unlock(&inode_lock);
876 + /*
877 + * We hold a reference to 'inode' so it couldn't have
878 + * been removed from s_inodes list while we dropped the
879 + * inode_lock. We cannot iput the inode now as we can
880 + * be holding the last reference and we cannot iput it
881 + * under inode_lock. So we keep the reference and iput
882 + * it later.
883 + */
884 + iput(old_inode);
885 + old_inode = inode;
886 +
887 + filemap_fdatawait(mapping);
888 +
889 + cond_resched();
890 +
891 + spin_lock(&inode_lock);
892 + }
893 + spin_unlock(&inode_lock);
894 + iput(old_inode);
895 + } else
896 + spin_unlock(&inode_lock);
897 +
898 return; /* Leave any unwritten inodes on s_io */
899 }
900 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
901 @@ -588,8 +624,7 @@ restart:
902
903 /*
904 * writeback and wait upon the filesystem's dirty inodes. The caller will
905 - * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
906 - * used to park the written inodes on sb->s_dirty for the wait pass.
907 + * do this in two passes - one to write, and one to wait.
908 *
909 * A finite limit is set on the number of pages which will be written.
910 * To prevent infinite livelock of sys_sync().
911 @@ -600,30 +635,21 @@ restart:
912 void sync_inodes_sb(struct super_block *sb, int wait)
913 {
914 struct writeback_control wbc = {
915 - .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
916 + .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
917 .range_start = 0,
918 .range_end = LLONG_MAX,
919 };
920 - unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
921 - unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
922
923 - wbc.nr_to_write = nr_dirty + nr_unstable +
924 - (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
925 - nr_dirty + nr_unstable;
926 - wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
927 - sync_sb_inodes(sb, &wbc);
928 -}
929 + if (!wait) {
930 + unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
931 + unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
932
933 -/*
934 - * Rather lame livelock avoidance.
935 - */
936 -static void set_sb_syncing(int val)
937 -{
938 - struct super_block *sb;
939 - spin_lock(&sb_lock);
940 - list_for_each_entry_reverse(sb, &super_blocks, s_list)
941 - sb->s_syncing = val;
942 - spin_unlock(&sb_lock);
943 + wbc.nr_to_write = nr_dirty + nr_unstable +
944 + (inodes_stat.nr_inodes - inodes_stat.nr_unused);
945 + } else
946 + wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
947 +
948 + sync_sb_inodes(sb, &wbc);
949 }
950
951 /**
952 @@ -652,9 +678,6 @@ static void __sync_inodes(int wait)
953 spin_lock(&sb_lock);
954 restart:
955 list_for_each_entry(sb, &super_blocks, s_list) {
956 - if (sb->s_syncing)
957 - continue;
958 - sb->s_syncing = 1;
959 sb->s_count++;
960 spin_unlock(&sb_lock);
961 down_read(&sb->s_umount);
962 @@ -672,13 +695,10 @@ restart:
963
964 void sync_inodes(int wait)
965 {
966 - set_sb_syncing(0);
967 __sync_inodes(0);
968
969 - if (wait) {
970 - set_sb_syncing(0);
971 + if (wait)
972 __sync_inodes(1);
973 - }
974 }
975
976 /**
977 diff --git a/fs/sync.c b/fs/sync.c
978 index 6cc8cb4..9e5f60d 100644
979 --- a/fs/sync.c
980 +++ b/fs/sync.c
981 @@ -287,7 +287,7 @@ int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
982
983 if (flags & SYNC_FILE_RANGE_WRITE) {
984 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
985 - WB_SYNC_NONE);
986 + WB_SYNC_ALL);
987 if (ret < 0)
988 goto out;
989 }
990 diff --git a/include/linux/fs.h b/include/linux/fs.h
991 index d621217..d1b3e22 100644
992 --- a/include/linux/fs.h
993 +++ b/include/linux/fs.h
994 @@ -1080,7 +1080,6 @@ struct super_block {
995 struct rw_semaphore s_umount;
996 struct mutex s_lock;
997 int s_count;
998 - int s_syncing;
999 int s_need_sync_fs;
1000 atomic_t s_active;
1001 #ifdef CONFIG_SECURITY
1002 diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
1003 index d9a3bbe..bd414ec 100644
1004 --- a/include/linux/usb_usual.h
1005 +++ b/include/linux/usb_usual.h
1006 @@ -52,8 +52,9 @@
1007 US_FLAG(MAX_SECTORS_MIN,0x00002000) \
1008 /* Sets max_sectors to arch min */ \
1009 US_FLAG(BULK_IGNORE_TAG,0x00004000) \
1010 - /* Ignore tag mismatch in bulk operations */
1011 -
1012 + /* Ignore tag mismatch in bulk operations */ \
1013 + US_FLAG(CAPACITY_OK, 0x00010000) \
1014 + /* READ CAPACITY response is correct */
1015
1016 #define US_FLAG(name, value) US_FL_##name = value ,
1017 enum { US_DO_ALL_FLAGS };
1018 diff --git a/include/linux/writeback.h b/include/linux/writeback.h
1019 index 12b15c5..c2835bb 100644
1020 --- a/include/linux/writeback.h
1021 +++ b/include/linux/writeback.h
1022 @@ -30,7 +30,6 @@ static inline int task_is_pdflush(struct task_struct *task)
1023 enum writeback_sync_modes {
1024 WB_SYNC_NONE, /* Don't wait on anything */
1025 WB_SYNC_ALL, /* Wait on every mapping */
1026 - WB_SYNC_HOLD, /* Hold the inode on sb_dirty for sys_sync() */
1027 };
1028
1029 /*
1030 diff --git a/kernel/signal.c b/kernel/signal.c
1031 index 6f06f43..3d161f0 100644
1032 --- a/kernel/signal.c
1033 +++ b/kernel/signal.c
1034 @@ -1141,7 +1141,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1035 struct task_struct * p;
1036
1037 for_each_process(p) {
1038 - if (p->pid > 1 && !same_thread_group(p, current)) {
1039 + if (task_pid_vnr(p) > 1 &&
1040 + !same_thread_group(p, current)) {
1041 int err = group_send_sig_info(sig, info, p);
1042 ++count;
1043 if (err != -EPERM)
1044 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1045 index 4220a2e..521960b 100644
1046 --- a/kernel/time/timekeeping.c
1047 +++ b/kernel/time/timekeeping.c
1048 @@ -61,27 +61,23 @@ struct clocksource *clock;
1049
1050 #ifdef CONFIG_GENERIC_TIME
1051 /**
1052 - * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
1053 + * clocksource_forward_now - update clock to the current time
1054 *
1055 - * private function, must hold xtime_lock lock when being
1056 - * called. Returns the number of nanoseconds since the
1057 - * last call to update_wall_time() (adjusted by NTP scaling)
1058 + * Forward the current clock to update its state since the last call to
1059 + * update_wall_time(). This is useful before significant clock changes,
1060 + * as it avoids having to deal with this time offset explicitly.
1061 */
1062 -static inline s64 __get_nsec_offset(void)
1063 +static void clocksource_forward_now(void)
1064 {
1065 cycle_t cycle_now, cycle_delta;
1066 - s64 ns_offset;
1067 + s64 nsec;
1068
1069 - /* read clocksource: */
1070 cycle_now = clocksource_read(clock);
1071 -
1072 - /* calculate the delta since the last update_wall_time: */
1073 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
1074 + clock->cycle_last = cycle_now;
1075
1076 - /* convert to nanoseconds: */
1077 - ns_offset = cyc2ns(clock, cycle_delta);
1078 -
1079 - return ns_offset;
1080 + nsec = cyc2ns(clock, cycle_delta);
1081 + timespec_add_ns(&xtime, nsec);
1082 }
1083
1084 /**
1085 @@ -92,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
1086 */
1087 void getnstimeofday(struct timespec *ts)
1088 {
1089 + cycle_t cycle_now, cycle_delta;
1090 unsigned long seq;
1091 s64 nsecs;
1092
1093 @@ -101,7 +98,15 @@ void getnstimeofday(struct timespec *ts)
1094 seq = read_seqbegin(&xtime_lock);
1095
1096 *ts = xtime;
1097 - nsecs = __get_nsec_offset();
1098 +
1099 + /* read clocksource: */
1100 + cycle_now = clocksource_read(clock);
1101 +
1102 + /* calculate the delta since the last update_wall_time: */
1103 + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
1104 +
1105 + /* convert to nanoseconds: */
1106 + nsecs = cyc2ns(clock, cycle_delta);
1107
1108 } while (read_seqretry(&xtime_lock, seq));
1109
1110 @@ -134,22 +139,22 @@ EXPORT_SYMBOL(do_gettimeofday);
1111 */
1112 int do_settimeofday(struct timespec *tv)
1113 {
1114 + struct timespec ts_delta;
1115 unsigned long flags;
1116 - time_t wtm_sec, sec = tv->tv_sec;
1117 - long wtm_nsec, nsec = tv->tv_nsec;
1118
1119 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
1120 return -EINVAL;
1121
1122 write_seqlock_irqsave(&xtime_lock, flags);
1123
1124 - nsec -= __get_nsec_offset();
1125 + clocksource_forward_now();
1126 +
1127 + ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
1128 + ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
1129 + wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
1130
1131 - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
1132 - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
1133 + xtime = *tv;
1134
1135 - set_normalized_timespec(&xtime, sec, nsec);
1136 - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
1137 update_xtime_cache(0);
1138
1139 clock->error = 0;
1140 @@ -175,22 +180,17 @@ EXPORT_SYMBOL(do_settimeofday);
1141 static void change_clocksource(void)
1142 {
1143 struct clocksource *new;
1144 - cycle_t now;
1145 - u64 nsec;
1146
1147 new = clocksource_get_next();
1148
1149 if (clock == new)
1150 return;
1151
1152 - new->cycle_last = 0;
1153 - now = clocksource_read(new);
1154 - nsec = __get_nsec_offset();
1155 - timespec_add_ns(&xtime, nsec);
1156 + clocksource_forward_now();
1157
1158 clock = new;
1159 - clock->cycle_last = now;
1160 -
1161 + clock->cycle_last = 0;
1162 + clock->cycle_last = clocksource_read(new);
1163 clock->error = 0;
1164 clock->xtime_nsec = 0;
1165 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
1166 @@ -205,8 +205,8 @@ static void change_clocksource(void)
1167 */
1168 }
1169 #else
1170 +static inline void clocksource_forward_now(void) { }
1171 static inline void change_clocksource(void) { }
1172 -static inline s64 __get_nsec_offset(void) { return 0; }
1173 #endif
1174
1175 /**
1176 @@ -268,8 +268,6 @@ void __init timekeeping_init(void)
1177
1178 /* time in seconds when suspend began */
1179 static unsigned long timekeeping_suspend_time;
1180 -/* xtime offset when we went into suspend */
1181 -static s64 timekeeping_suspend_nsecs;
1182
1183 /**
1184 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1185 @@ -295,8 +293,6 @@ static int timekeeping_resume(struct sys_device *dev)
1186 wall_to_monotonic.tv_sec -= sleep_length;
1187 total_sleep_time += sleep_length;
1188 }
1189 - /* Make sure that we have the correct xtime reference */
1190 - timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
1191 update_xtime_cache(0);
1192 /* re-base the last cycle value */
1193 clock->cycle_last = 0;
1194 @@ -322,8 +318,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
1195 timekeeping_suspend_time = read_persistent_clock();
1196
1197 write_seqlock_irqsave(&xtime_lock, flags);
1198 - /* Get the current xtime offset */
1199 - timekeeping_suspend_nsecs = __get_nsec_offset();
1200 + clocksource_forward_now();
1201 timekeeping_suspended = 1;
1202 write_sequnlock_irqrestore(&xtime_lock, flags);
1203
1204 @@ -464,10 +459,10 @@ void update_wall_time(void)
1205 */
1206 while (offset >= clock->cycle_interval) {
1207 /* accumulate one interval */
1208 - clock->xtime_nsec += clock->xtime_interval;
1209 - clock->cycle_last += clock->cycle_interval;
1210 offset -= clock->cycle_interval;
1211 + clock->cycle_last += clock->cycle_interval;
1212
1213 + clock->xtime_nsec += clock->xtime_interval;
1214 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1215 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
1216 xtime.tv_sec++;
1217 diff --git a/lib/idr.c b/lib/idr.c
1218 index 1c4f928..21154ae 100644
1219 --- a/lib/idr.c
1220 +++ b/lib/idr.c
1221 @@ -121,7 +121,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1222 {
1223 while (idp->id_free_cnt < IDR_FREE_MAX) {
1224 struct idr_layer *new;
1225 - new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
1226 + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
1227 if (new == NULL)
1228 return (0);
1229 move_to_free_list(idp, new);
1230 @@ -623,16 +623,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
1231 }
1232 EXPORT_SYMBOL(idr_replace);
1233
1234 -static void idr_cache_ctor(void *idr_layer)
1235 -{
1236 - memset(idr_layer, 0, sizeof(struct idr_layer));
1237 -}
1238 -
1239 void __init idr_init_cache(void)
1240 {
1241 idr_layer_cache = kmem_cache_create("idr_layer_cache",
1242 - sizeof(struct idr_layer), 0, SLAB_PANIC,
1243 - idr_cache_ctor);
1244 + sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
1245 }
1246
1247 /**
1248 diff --git a/mm/filemap.c b/mm/filemap.c
1249 index f3033d0..8a477d3 100644
1250 --- a/mm/filemap.c
1251 +++ b/mm/filemap.c
1252 @@ -209,7 +209,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
1253 int ret;
1254 struct writeback_control wbc = {
1255 .sync_mode = sync_mode,
1256 - .nr_to_write = mapping->nrpages * 2,
1257 + .nr_to_write = LONG_MAX,
1258 .range_start = start,
1259 .range_end = end,
1260 };
1261 @@ -1304,7 +1304,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1262 goto out; /* skip atime */
1263 size = i_size_read(inode);
1264 if (pos < size) {
1265 - retval = filemap_write_and_wait(mapping);
1266 + retval = filemap_write_and_wait_range(mapping, pos,
1267 + pos + iov_length(iov, nr_segs) - 1);
1268 if (!retval) {
1269 retval = mapping->a_ops->direct_IO(READ, iocb,
1270 iov, pos, nr_segs);
1271 @@ -2117,18 +2118,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1272 if (count != ocount)
1273 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1274
1275 - /*
1276 - * Unmap all mmappings of the file up-front.
1277 - *
1278 - * This will cause any pte dirty bits to be propagated into the
1279 - * pageframes for the subsequent filemap_write_and_wait().
1280 - */
1281 write_len = iov_length(iov, *nr_segs);
1282 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
1283 - if (mapping_mapped(mapping))
1284 - unmap_mapping_range(mapping, pos, write_len, 0);
1285
1286 - written = filemap_write_and_wait(mapping);
1287 + written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
1288 if (written)
1289 goto out;
1290
1291 @@ -2519,7 +2512,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1292 * the file data here, to try to honour O_DIRECT expectations.
1293 */
1294 if (unlikely(file->f_flags & O_DIRECT) && written)
1295 - status = filemap_write_and_wait(mapping);
1296 + status = filemap_write_and_wait_range(mapping,
1297 + pos, pos + written - 1);
1298
1299 return written ? written : status;
1300 }
1301 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
1302 index 24de8b6..8875822 100644
1303 --- a/mm/page-writeback.c
1304 +++ b/mm/page-writeback.c
1305 @@ -872,9 +872,11 @@ int write_cache_pages(struct address_space *mapping,
1306 int done = 0;
1307 struct pagevec pvec;
1308 int nr_pages;
1309 + pgoff_t uninitialized_var(writeback_index);
1310 pgoff_t index;
1311 pgoff_t end; /* Inclusive */
1312 - int scanned = 0;
1313 + pgoff_t done_index;
1314 + int cycled;
1315 int range_whole = 0;
1316
1317 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1318 @@ -884,82 +886,134 @@ int write_cache_pages(struct address_space *mapping,
1319
1320 pagevec_init(&pvec, 0);
1321 if (wbc->range_cyclic) {
1322 - index = mapping->writeback_index; /* Start from prev offset */
1323 + writeback_index = mapping->writeback_index; /* prev offset */
1324 + index = writeback_index;
1325 + if (index == 0)
1326 + cycled = 1;
1327 + else
1328 + cycled = 0;
1329 end = -1;
1330 } else {
1331 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1332 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1333 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1334 range_whole = 1;
1335 - scanned = 1;
1336 + cycled = 1; /* ignore range_cyclic tests */
1337 }
1338 retry:
1339 - while (!done && (index <= end) &&
1340 - (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1341 - PAGECACHE_TAG_DIRTY,
1342 - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
1343 - unsigned i;
1344 + done_index = index;
1345 + while (!done && (index <= end)) {
1346 + int i;
1347 +
1348 + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1349 + PAGECACHE_TAG_DIRTY,
1350 + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1351 + if (nr_pages == 0)
1352 + break;
1353
1354 - scanned = 1;
1355 for (i = 0; i < nr_pages; i++) {
1356 struct page *page = pvec.pages[i];
1357
1358 /*
1359 - * At this point we hold neither mapping->tree_lock nor
1360 - * lock on the page itself: the page may be truncated or
1361 - * invalidated (changing page->mapping to NULL), or even
1362 - * swizzled back from swapper_space to tmpfs file
1363 - * mapping
1364 + * At this point, the page may be truncated or
1365 + * invalidated (changing page->mapping to NULL), or
1366 + * even swizzled back from swapper_space to tmpfs file
1367 + * mapping. However, page->index will not change
1368 + * because we have a reference on the page.
1369 */
1370 + if (page->index > end) {
1371 + /*
1372 + * can't be range_cyclic (1st pass) because
1373 + * end == -1 in that case.
1374 + */
1375 + done = 1;
1376 + break;
1377 + }
1378 +
1379 + done_index = page->index + 1;
1380 +
1381 lock_page(page);
1382
1383 + /*
1384 + * Page truncated or invalidated. We can freely skip it
1385 + * then, even for data integrity operations: the page
1386 + * has disappeared concurrently, so there could be no
1387 + * real expectation of this data interity operation
1388 + * even if there is now a new, dirty page at the same
1389 + * pagecache address.
1390 + */
1391 if (unlikely(page->mapping != mapping)) {
1392 +continue_unlock:
1393 unlock_page(page);
1394 continue;
1395 }
1396
1397 - if (!wbc->range_cyclic && page->index > end) {
1398 - done = 1;
1399 - unlock_page(page);
1400 - continue;
1401 + if (!PageDirty(page)) {
1402 + /* someone wrote it for us */
1403 + goto continue_unlock;
1404 }
1405
1406 - if (wbc->sync_mode != WB_SYNC_NONE)
1407 - wait_on_page_writeback(page);
1408 -
1409 - if (PageWriteback(page) ||
1410 - !clear_page_dirty_for_io(page)) {
1411 - unlock_page(page);
1412 - continue;
1413 + if (PageWriteback(page)) {
1414 + if (wbc->sync_mode != WB_SYNC_NONE)
1415 + wait_on_page_writeback(page);
1416 + else
1417 + goto continue_unlock;
1418 }
1419
1420 + BUG_ON(PageWriteback(page));
1421 + if (!clear_page_dirty_for_io(page))
1422 + goto continue_unlock;
1423 +
1424 ret = (*writepage)(page, wbc, data);
1425
1426 - if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
1427 - unlock_page(page);
1428 - ret = 0;
1429 + if (unlikely(ret)) {
1430 + if (ret == AOP_WRITEPAGE_ACTIVATE) {
1431 + unlock_page(page);
1432 + ret = 0;
1433 + } else {
1434 + /*
1435 + * done_index is set past this page,
1436 + * so media errors will not choke
1437 + * background writeout for the entire
1438 + * file. This has consequences for
1439 + * range_cyclic semantics (ie. it may
1440 + * not be suitable for data integrity
1441 + * writeout).
1442 + */
1443 + done = 1;
1444 + break;
1445 + }
1446 + }
1447 +
1448 + if (wbc->sync_mode == WB_SYNC_NONE) {
1449 + wbc->nr_to_write--;
1450 + if (wbc->nr_to_write <= 0) {
1451 + done = 1;
1452 + break;
1453 + }
1454 }
1455 - if (ret || (--(wbc->nr_to_write) <= 0))
1456 - done = 1;
1457 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1458 wbc->encountered_congestion = 1;
1459 done = 1;
1460 + break;
1461 }
1462 }
1463 pagevec_release(&pvec);
1464 cond_resched();
1465 }
1466 - if (!scanned && !done) {
1467 + if (!cycled) {
1468 /*
1469 + * range_cyclic:
1470 * We hit the last page and there is more work to be done: wrap
1471 * back to the start of the file
1472 */
1473 - scanned = 1;
1474 + cycled = 1;
1475 index = 0;
1476 + end = writeback_index - 1;
1477 goto retry;
1478 }
1479 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1480 - mapping->writeback_index = index;
1481 + mapping->writeback_index = done_index;
1482
1483 if (wbc->range_cont)
1484 wbc->range_start = index << PAGE_CACHE_SHIFT;
1485 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1486 index 1ab341e..f57d576 100644
1487 --- a/net/ipv4/tcp.c
1488 +++ b/net/ipv4/tcp.c
1489 @@ -576,10 +576,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
1490 else if (!ret) {
1491 if (spliced)
1492 break;
1493 - if (flags & SPLICE_F_NONBLOCK) {
1494 - ret = -EAGAIN;
1495 - break;
1496 - }
1497 if (sock_flag(sk, SOCK_DONE))
1498 break;
1499 if (sk->sk_err) {
1500 @@ -597,6 +593,10 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
1501 ret = -ENOTCONN;
1502 break;
1503 }
1504 + if (flags & SPLICE_F_NONBLOCK) {
1505 + ret = -EAGAIN;
1506 + break;
1507 + }
1508 if (!timeo) {
1509 ret = -EAGAIN;
1510 break;
1511 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1512 index 29c7c99..52ee1dc 100644
1513 --- a/net/ipv6/ip6_fib.c
1514 +++ b/net/ipv6/ip6_fib.c
1515 @@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
1516 struct fib6_walker_t *w = (void*)cb->args[2];
1517
1518 if (w) {
1519 + if (cb->args[4]) {
1520 + cb->args[4] = 0;
1521 + fib6_walker_unlink(w);
1522 + }
1523 cb->args[2] = 0;
1524 kfree(w);
1525 }
1526 @@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
1527 read_lock_bh(&table->tb6_lock);
1528 res = fib6_walk_continue(w);
1529 read_unlock_bh(&table->tb6_lock);
1530 - if (res != 0) {
1531 - if (res < 0)
1532 - fib6_walker_unlink(w);
1533 - goto end;
1534 + if (res <= 0) {
1535 + fib6_walker_unlink(w);
1536 + cb->args[4] = 0;
1537 }
1538 - fib6_walker_unlink(w);
1539 - cb->args[4] = 0;
1540 }
1541 -end:
1542 +
1543 return res;
1544 }
1545
1546 diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1547 index 246f906..ea51fcd 100644
1548 --- a/net/sched/cls_u32.c
1549 +++ b/net/sched/cls_u32.c
1550 @@ -637,8 +637,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
1551 break;
1552
1553 n->next = *ins;
1554 - wmb();
1555 + tcf_tree_lock(tp);
1556 *ins = n;
1557 + tcf_tree_unlock(tp);
1558
1559 *arg = (unsigned long)n;
1560 return 0;
1561 diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1562 index d14f020..d2943a4 100644
1563 --- a/net/sched/sch_htb.c
1564 +++ b/net/sched/sch_htb.c
1565 @@ -924,6 +924,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1566 }
1567 }
1568 sch->qstats.overlimits++;
1569 + qdisc_watchdog_cancel(&q->watchdog);
1570 qdisc_watchdog_schedule(&q->watchdog, next_event);
1571 fin:
1572 return skb;
1573 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1574 index 7c622af..649d174 100644
1575 --- a/net/sctp/sm_statefuns.c
1576 +++ b/net/sctp/sm_statefuns.c
1577 @@ -3635,6 +3635,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
1578 {
1579 struct sctp_chunk *chunk = arg;
1580 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
1581 + struct sctp_fwdtsn_skip *skip;
1582 __u16 len;
1583 __u32 tsn;
1584
1585 @@ -3664,6 +3665,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
1586 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
1587 goto discard_noforce;
1588
1589 + /* Silently discard the chunk if stream-id is not valid */
1590 + sctp_walk_fwdtsn(skip, chunk) {
1591 + if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
1592 + goto discard_noforce;
1593 + }
1594 +
1595 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
1596 if (len > sizeof(struct sctp_fwdtsn_hdr))
1597 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
1598 @@ -3695,6 +3702,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
1599 {
1600 struct sctp_chunk *chunk = arg;
1601 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
1602 + struct sctp_fwdtsn_skip *skip;
1603 __u16 len;
1604 __u32 tsn;
1605
1606 @@ -3724,6 +3732,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
1607 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
1608 goto gen_shutdown;
1609
1610 + /* Silently discard the chunk if stream-id is not valid */
1611 + sctp_walk_fwdtsn(skip, chunk) {
1612 + if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
1613 + goto gen_shutdown;
1614 + }
1615 +
1616 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
1617 if (len > sizeof(struct sctp_fwdtsn_hdr))
1618 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
1619 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
1620 index 9b4e0e9..3c0f421 100644
1621 --- a/security/keys/keyctl.c
1622 +++ b/security/keys/keyctl.c
1623 @@ -270,6 +270,7 @@ long keyctl_join_session_keyring(const char __user *_name)
1624
1625 /* join the session */
1626 ret = join_session_keyring(name);
1627 + kfree(name);
1628
1629 error:
1630 return ret;
1631 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
1632 index 591f62f..8c857d5 100644
1633 --- a/sound/pci/hda/patch_analog.c
1634 +++ b/sound/pci/hda/patch_analog.c
1635 @@ -629,6 +629,36 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
1636 HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
1637 HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
1638 HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
1639 + HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
1640 + HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
1641 + HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
1642 + HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
1643 + HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
1644 + HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
1645 + HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
1646 + {
1647 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1648 + .name = "Capture Source",
1649 + .info = ad198x_mux_enum_info,
1650 + .get = ad198x_mux_enum_get,
1651 + .put = ad198x_mux_enum_put,
1652 + },
1653 + {
1654 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1655 + .name = "External Amplifier",
1656 + .info = ad198x_eapd_info,
1657 + .get = ad198x_eapd_get,
1658 + .put = ad198x_eapd_put,
1659 + .private_value = 0x1b | (1 << 8), /* port-D, inversed */
1660 + },
1661 + { } /* end */
1662 +};
1663 +
1664 +static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
1665 + HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
1666 + HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
1667 + HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
1668 + HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
1669 HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
1670 HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
1671 HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
1672 @@ -917,6 +947,7 @@ enum {
1673 AD1986A_LAPTOP_EAPD,
1674 AD1986A_LAPTOP_AUTOMUTE,
1675 AD1986A_ULTRA,
1676 + AD1986A_SAMSUNG,
1677 AD1986A_MODELS
1678 };
1679
1680 @@ -927,6 +958,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = {
1681 [AD1986A_LAPTOP_EAPD] = "laptop-eapd",
1682 [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
1683 [AD1986A_ULTRA] = "ultra",
1684 + [AD1986A_SAMSUNG] = "samsung",
1685 };
1686
1687 static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
1688 @@ -949,9 +981,9 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
1689 SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
1690 SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
1691 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
1692 - SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_LAPTOP_EAPD),
1693 - SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_LAPTOP_EAPD),
1694 - SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_LAPTOP_EAPD),
1695 + SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_SAMSUNG),
1696 + SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_SAMSUNG),
1697 + SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_SAMSUNG),
1698 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
1699 SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
1700 SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
1701 @@ -1033,6 +1065,17 @@ static int patch_ad1986a(struct hda_codec *codec)
1702 break;
1703 case AD1986A_LAPTOP_EAPD:
1704 spec->mixers[0] = ad1986a_laptop_eapd_mixers;
1705 + spec->num_init_verbs = 2;
1706 + spec->init_verbs[1] = ad1986a_eapd_init_verbs;
1707 + spec->multiout.max_channels = 2;
1708 + spec->multiout.num_dacs = 1;
1709 + spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
1710 + if (!is_jack_available(codec, 0x25))
1711 + spec->multiout.dig_out_nid = 0;
1712 + spec->input_mux = &ad1986a_laptop_eapd_capture_source;
1713 + break;
1714 + case AD1986A_SAMSUNG:
1715 + spec->mixers[0] = ad1986a_samsung_mixers;
1716 spec->num_init_verbs = 3;
1717 spec->init_verbs[1] = ad1986a_eapd_init_verbs;
1718 spec->init_verbs[2] = ad1986a_automic_verbs;
1719 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1720 index a1a3a34..7225f0f 100644
1721 --- a/sound/pci/hda/patch_realtek.c
1722 +++ b/sound/pci/hda/patch_realtek.c
1723 @@ -9882,6 +9882,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
1724 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
1725 SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
1726 SND_PCI_QUIRK(0x144d, 0xc039, "Samsung Q1U EL", ALC262_ULTRA),
1727 + SND_PCI_QUIRK(0x144d, 0xc510, "Samsung Q45", ALC262_HIPPO),
1728 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000 y410", ALC262_LENOVO_3000),
1729 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
1730 SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),