Contents of /trunk/kernel-magellan/patches-3.9/0104-3.9.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2197 -
(show annotations)
(download)
Mon Jun 10 18:41:52 2013 UTC (11 years, 3 months ago) by niro
File size: 176270 byte(s)
Mon Jun 10 18:41:52 2013 UTC (11 years, 3 months ago) by niro
File size: 176270 byte(s)
-linux-3.9.5
1 | diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt |
2 | index c907be4..dc23e58 100644 |
3 | --- a/Documentation/powerpc/transactional_memory.txt |
4 | +++ b/Documentation/powerpc/transactional_memory.txt |
5 | @@ -147,6 +147,25 @@ Example signal handler: |
6 | fix_the_problem(ucp->dar); |
7 | } |
8 | |
9 | +When in an active transaction that takes a signal, we need to be careful with |
10 | +the stack. It's possible that the stack has moved back up after the tbegin. |
11 | +The obvious case here is when the tbegin is called inside a function that |
12 | +returns before a tend. In this case, the stack is part of the checkpointed |
13 | +transactional memory state. If we write over this non transactionally or in |
14 | +suspend, we are in trouble because if we get a tm abort, the program counter and |
15 | +stack pointer will be back at the tbegin but our in memory stack won't be valid |
16 | +anymore. |
17 | + |
18 | +To avoid this, when taking a signal in an active transaction, we need to use |
19 | +the stack pointer from the checkpointed state, rather than the speculated |
20 | +state. This ensures that the signal context (written tm suspended) will be |
21 | +written below the stack required for the rollback. The transaction is aborted |
22 | +becuase of the treclaim, so any memory written between the tbegin and the |
23 | +signal will be rolled back anyway. |
24 | + |
25 | +For signals taken in non-TM or suspended mode, we use the |
26 | +normal/non-checkpointed stack pointer. |
27 | + |
28 | |
29 | Failure cause codes used by kernel |
30 | ================================== |
31 | @@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the |
32 | kernel aborted a transaction: |
33 | |
34 | TM_CAUSE_RESCHED Thread was rescheduled. |
35 | + TM_CAUSE_TLBI Software TLB invalide. |
36 | TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. |
37 | TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort |
38 | transactions for consistency will use this. |
39 | TM_CAUSE_SIGNAL Signal delivered. |
40 | TM_CAUSE_MISC Currently unused. |
41 | + TM_CAUSE_ALIGNMENT Alignment fault. |
42 | + TM_CAUSE_EMULATE Emulation that touched memory. |
43 | |
44 | -These can be checked by the user program's abort handler as TEXASR[0:7]. |
45 | - |
46 | +These can be checked by the user program's abort handler as TEXASR[0:7]. If |
47 | +bit 7 is set, it indicates that the error is consider persistent. For example |
48 | +a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q |
49 | |
50 | GDB |
51 | === |
52 | diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h |
53 | index b7e3668..8ca472c 100644 |
54 | --- a/arch/arc/include/asm/pgtable.h |
55 | +++ b/arch/arc/include/asm/pgtable.h |
56 | @@ -57,9 +57,9 @@ |
57 | |
58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
60 | -#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | -#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | -#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | +#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
64 | +#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ |
65 | +#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ |
66 | #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ |
67 | #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ |
68 | #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ |
69 | @@ -72,9 +72,9 @@ |
70 | |
71 | /* PD1 */ |
72 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
73 | -#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
74 | -#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
75 | -#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
76 | +#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
77 | +#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ |
78 | +#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ |
79 | #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ |
80 | #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ |
81 | #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ |
82 | @@ -93,7 +93,8 @@ |
83 | #endif |
84 | |
85 | /* Kernel allowed all permissions for all pages */ |
86 | -#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) |
87 | +#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ |
88 | + _PAGE_GLOBAL | _PAGE_PRESENT) |
89 | |
90 | #ifdef CONFIG_ARC_CACHE_PAGES |
91 | #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE |
92 | @@ -106,7 +107,11 @@ |
93 | * -by default cached, unless config otherwise |
94 | * -present in memory |
95 | */ |
96 | -#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
97 | +#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) |
98 | + |
99 | +#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ) |
100 | +#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE) |
101 | +#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE) |
102 | |
103 | /* Set of bits not changed in pte_modify */ |
104 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) |
105 | @@ -125,11 +130,10 @@ |
106 | * kernel vaddr space - visible in all addr spaces, but kernel mode only |
107 | * Thus Global, all-kernel-access, no-user-access, cached |
108 | */ |
109 | -#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL) |
110 | +#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
111 | |
112 | /* ioremap */ |
113 | -#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \ |
114 | - _PAGE_GLOBAL) |
115 | +#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
116 | |
117 | /************************************************************************** |
118 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
119 | diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h |
120 | index 3eb2ce0..5e0ee30 100644 |
121 | --- a/arch/arc/include/asm/tlb.h |
122 | +++ b/arch/arc/include/asm/tlb.h |
123 | @@ -16,7 +16,7 @@ |
124 | /* Masks for actual TLB "PD"s */ |
125 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) |
126 | #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ |
127 | - _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
128 | + _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ |
129 | _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) |
130 | |
131 | #ifndef __ASSEMBLY__ |
132 | diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S |
133 | index 9df765d..3357d26 100644 |
134 | --- a/arch/arc/mm/tlbex.S |
135 | +++ b/arch/arc/mm/tlbex.S |
136 | @@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI |
137 | ;---------------------------------------------------------------- |
138 | ; VERIFY_PTE: Check if PTE permissions approp for executing code |
139 | cmp_s r2, VMALLOC_START |
140 | - mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE) |
141 | + mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) |
142 | mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) |
143 | |
144 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
145 | @@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD |
146 | mov_s r2, 0 |
147 | lr r3, [ecr] |
148 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access |
149 | - or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE |
150 | + or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE |
151 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access |
152 | - or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE |
153 | + or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE |
154 | ; Above laddering takes care of XCHG access |
155 | ; which is both Read and Write |
156 | |
157 | diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi |
158 | index 02b70a4..755a61e 100644 |
159 | --- a/arch/arm/boot/dts/at91sam9260.dtsi |
160 | +++ b/arch/arm/boot/dts/at91sam9260.dtsi |
161 | @@ -264,7 +264,7 @@ |
162 | atmel,pins = |
163 | <0 10 0x2 0x0 /* PA10 periph B */ |
164 | 0 11 0x2 0x0 /* PA11 periph B */ |
165 | - 0 24 0x2 0x0 /* PA24 periph B */ |
166 | + 0 22 0x2 0x0 /* PA22 periph B */ |
167 | 0 25 0x2 0x0 /* PA25 periph B */ |
168 | 0 26 0x2 0x0 /* PA26 periph B */ |
169 | 0 27 0x2 0x0 /* PA27 periph B */ |
170 | diff --git a/arch/arm/boot/dts/at91sam9x25ek.dts b/arch/arm/boot/dts/at91sam9x25ek.dts |
171 | index af907ea..80015b0 100644 |
172 | --- a/arch/arm/boot/dts/at91sam9x25ek.dts |
173 | +++ b/arch/arm/boot/dts/at91sam9x25ek.dts |
174 | @@ -11,6 +11,6 @@ |
175 | /include/ "at91sam9x5ek.dtsi" |
176 | |
177 | / { |
178 | - model = "Atmel AT91SAM9G25-EK"; |
179 | + model = "Atmel AT91SAM9X25-EK"; |
180 | compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; |
181 | }; |
182 | diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S |
183 | index 92c6eed..99207c4 100644 |
184 | --- a/arch/arm/crypto/sha1-armv4-large.S |
185 | +++ b/arch/arm/crypto/sha1-armv4-large.S |
186 | @@ -195,6 +195,7 @@ ENTRY(sha1_block_data_order) |
187 | add r3,r3,r10 @ E+=F_00_19(B,C,D) |
188 | cmp r14,sp |
189 | bne .L_00_15 @ [((11+4)*5+2)*3] |
190 | + sub sp,sp,#25*4 |
191 | #if __ARM_ARCH__<7 |
192 | ldrb r10,[r1,#2] |
193 | ldrb r9,[r1,#3] |
194 | @@ -290,7 +291,6 @@ ENTRY(sha1_block_data_order) |
195 | add r3,r3,r10 @ E+=F_00_19(B,C,D) |
196 | |
197 | ldr r8,.LK_20_39 @ [+15+16*4] |
198 | - sub sp,sp,#25*4 |
199 | cmn sp,#0 @ [+3], clear carry to denote 20_39 |
200 | .L_20_39_or_60_79: |
201 | ldr r9,[r14,#15*4] |
202 | diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c |
203 | index 2acdff4..180b302 100644 |
204 | --- a/arch/arm/mach-at91/at91rm9200_time.c |
205 | +++ b/arch/arm/mach-at91/at91rm9200_time.c |
206 | @@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) |
207 | static struct clock_event_device clkevt = { |
208 | .name = "at91_tick", |
209 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
210 | + .shift = 32, |
211 | .rating = 150, |
212 | .set_next_event = clkevt32k_next_event, |
213 | .set_mode = clkevt32k_mode, |
214 | @@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void) |
215 | at91_st_write(AT91_ST_RTMR, 1); |
216 | |
217 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ |
218 | + clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); |
219 | + clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); |
220 | + clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; |
221 | clkevt.cpumask = cpumask_of(0); |
222 | - clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, |
223 | - 2, AT91_ST_ALMV); |
224 | + clockevents_register_device(&clkevt); |
225 | |
226 | /* register clocksource */ |
227 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); |
228 | diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c |
229 | index 283abff..e1267d6 100644 |
230 | --- a/arch/arm/mach-kirkwood/ts219-setup.c |
231 | +++ b/arch/arm/mach-kirkwood/ts219-setup.c |
232 | @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) |
233 | static int __init ts219_pci_init(void) |
234 | { |
235 | if (machine_is_ts219()) |
236 | - kirkwood_pcie_init(KW_PCIE0); |
237 | + kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); |
238 | |
239 | return 0; |
240 | } |
241 | diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c |
242 | index 476b820..8327721 100644 |
243 | --- a/arch/arm/mach-omap2/cclock33xx_data.c |
244 | +++ b/arch/arm/mach-omap2/cclock33xx_data.c |
245 | @@ -446,9 +446,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0, |
246 | */ |
247 | DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); |
248 | |
249 | -DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0, |
250 | - AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT, |
251 | - 0x0, NULL); |
252 | +static struct clk clkdiv32k_ick; |
253 | + |
254 | +static const char *clkdiv32k_ick_parent_names[] = { |
255 | + "clkdiv32k_ck", |
256 | +}; |
257 | + |
258 | +static const struct clk_ops clkdiv32k_ick_ops = { |
259 | + .enable = &omap2_dflt_clk_enable, |
260 | + .disable = &omap2_dflt_clk_disable, |
261 | + .is_enabled = &omap2_dflt_clk_is_enabled, |
262 | + .init = &omap2_init_clk_clkdm, |
263 | +}; |
264 | + |
265 | +static struct clk_hw_omap clkdiv32k_ick_hw = { |
266 | + .hw = { |
267 | + .clk = &clkdiv32k_ick, |
268 | + }, |
269 | + .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL, |
270 | + .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT, |
271 | + .clkdm_name = "clk_24mhz_clkdm", |
272 | +}; |
273 | + |
274 | +DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops); |
275 | |
276 | /* "usbotg_fck" is an additional clock and not really a modulemode */ |
277 | DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, |
278 | diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c |
279 | index a202a47..3a750de 100644 |
280 | --- a/arch/arm/mach-omap2/omap_hwmod.c |
281 | +++ b/arch/arm/mach-omap2/omap_hwmod.c |
282 | @@ -2066,7 +2066,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh) |
283 | * do so is present in the hwmod data, then call it and pass along the |
284 | * return value; otherwise, return 0. |
285 | */ |
286 | -static int __init _enable_preprogram(struct omap_hwmod *oh) |
287 | +static int _enable_preprogram(struct omap_hwmod *oh) |
288 | { |
289 | if (!oh->class->enable_preprogram) |
290 | return 0; |
291 | diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c |
292 | index 2d4b641..7bb9619 100644 |
293 | --- a/arch/arm/plat-orion/common.c |
294 | +++ b/arch/arm/plat-orion/common.c |
295 | @@ -373,7 +373,7 @@ static struct resource orion_ge10_shared_resources[] = { |
296 | |
297 | static struct platform_device orion_ge10_shared = { |
298 | .name = MV643XX_ETH_SHARED_NAME, |
299 | - .id = 1, |
300 | + .id = 2, |
301 | .dev = { |
302 | .platform_data = &orion_ge10_shared_data, |
303 | }, |
304 | @@ -388,8 +388,8 @@ static struct resource orion_ge10_resources[] = { |
305 | |
306 | static struct platform_device orion_ge10 = { |
307 | .name = MV643XX_ETH_NAME, |
308 | - .id = 1, |
309 | - .num_resources = 2, |
310 | + .id = 2, |
311 | + .num_resources = 1, |
312 | .resource = orion_ge10_resources, |
313 | .dev = { |
314 | .coherent_dma_mask = DMA_BIT_MASK(32), |
315 | @@ -425,7 +425,7 @@ static struct resource orion_ge11_shared_resources[] = { |
316 | |
317 | static struct platform_device orion_ge11_shared = { |
318 | .name = MV643XX_ETH_SHARED_NAME, |
319 | - .id = 1, |
320 | + .id = 3, |
321 | .dev = { |
322 | .platform_data = &orion_ge11_shared_data, |
323 | }, |
324 | @@ -440,8 +440,8 @@ static struct resource orion_ge11_resources[] = { |
325 | |
326 | static struct platform_device orion_ge11 = { |
327 | .name = MV643XX_ETH_NAME, |
328 | - .id = 1, |
329 | - .num_resources = 2, |
330 | + .id = 3, |
331 | + .num_resources = 1, |
332 | .resource = orion_ge11_resources, |
333 | .dev = { |
334 | .coherent_dma_mask = DMA_BIT_MASK(32), |
335 | diff --git a/arch/arm/plat-samsung/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c |
336 | index 1474593..66df315 100644 |
337 | --- a/arch/arm/plat-samsung/setup-mipiphy.c |
338 | +++ b/arch/arm/plat-samsung/setup-mipiphy.c |
339 | @@ -8,6 +8,7 @@ |
340 | * published by the Free Software Foundation. |
341 | */ |
342 | |
343 | +#include <linux/export.h> |
344 | #include <linux/kernel.h> |
345 | #include <linux/platform_device.h> |
346 | #include <linux/io.h> |
347 | @@ -50,8 +51,10 @@ int s5p_csis_phy_enable(int id, bool on) |
348 | { |
349 | return __s5p_mipi_phy_control(id, on, S5P_MIPI_DPHY_SRESETN); |
350 | } |
351 | +EXPORT_SYMBOL(s5p_csis_phy_enable); |
352 | |
353 | int s5p_dsim_phy_enable(struct platform_device *pdev, bool on) |
354 | { |
355 | return __s5p_mipi_phy_control(pdev->id, on, S5P_MIPI_DPHY_MRESETN); |
356 | } |
357 | +EXPORT_SYMBOL(s5p_dsim_phy_enable); |
358 | diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c |
359 | index b3c5f62..671136e 100644 |
360 | --- a/arch/arm64/kernel/traps.c |
361 | +++ b/arch/arm64/kernel/traps.c |
362 | @@ -317,14 +317,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) |
363 | */ |
364 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) |
365 | { |
366 | + siginfo_t info; |
367 | + void __user *pc = (void __user *)instruction_pointer(regs); |
368 | console_verbose(); |
369 | |
370 | pr_crit("Bad mode in %s handler detected, code 0x%08x\n", |
371 | handler[reason], esr); |
372 | + __show_regs(regs); |
373 | + |
374 | + info.si_signo = SIGILL; |
375 | + info.si_errno = 0; |
376 | + info.si_code = ILL_ILLOPC; |
377 | + info.si_addr = pc; |
378 | |
379 | - die("Oops - bad mode", regs, 0); |
380 | - local_irq_disable(); |
381 | - panic("bad mode"); |
382 | + arm64_notify_die("Oops - bad mode", regs, &info, 0); |
383 | } |
384 | |
385 | void __pte_error(const char *file, int line, unsigned long val) |
386 | diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c |
387 | index 596f730..2c94129 100644 |
388 | --- a/arch/avr32/kernel/module.c |
389 | +++ b/arch/avr32/kernel/module.c |
390 | @@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, |
391 | break; |
392 | case R_AVR32_GOT18SW: |
393 | if ((relocation & 0xfffe0003) != 0 |
394 | - && (relocation & 0xfffc0003) != 0xffff0000) |
395 | + && (relocation & 0xfffc0000) != 0xfffc0000) |
396 | return reloc_overflow(module, "R_AVR32_GOT18SW", |
397 | relocation); |
398 | relocation >>= 2; |
399 | diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S |
400 | index d197e7f..ac85f16 100644 |
401 | --- a/arch/m68k/kernel/head.S |
402 | +++ b/arch/m68k/kernel/head.S |
403 | @@ -2752,11 +2752,9 @@ func_return get_new_page |
404 | #ifdef CONFIG_MAC |
405 | |
406 | L(scc_initable_mac): |
407 | - .byte 9,12 /* Reset */ |
408 | .byte 4,0x44 /* x16, 1 stopbit, no parity */ |
409 | .byte 3,0xc0 /* receiver: 8 bpc */ |
410 | .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ |
411 | - .byte 9,0 /* no interrupts */ |
412 | .byte 10,0 /* NRZ */ |
413 | .byte 11,0x50 /* use baud rate generator */ |
414 | .byte 12,1,13,0 /* 38400 baud */ |
415 | @@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
416 | is_not_mac(L(serial_init_not_mac)) |
417 | |
418 | #ifdef SERIAL_DEBUG |
419 | + |
420 | /* You may define either or both of these. */ |
421 | #define MAC_USE_SCC_A /* Modem port */ |
422 | #define MAC_USE_SCC_B /* Printer port */ |
423 | @@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
424 | #define mac_scc_cha_b_data_offset 0x4 |
425 | #define mac_scc_cha_a_data_offset 0x6 |
426 | |
427 | +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
428 | + movel %pc@(L(mac_sccbase)),%a0 |
429 | + /* Reset SCC device */ |
430 | + moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) |
431 | + moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) |
432 | + /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ |
433 | + /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */ |
434 | + movel #35,%d0 |
435 | +5: |
436 | + subq #1,%d0 |
437 | + jne 5b |
438 | +#endif |
439 | + |
440 | #ifdef MAC_USE_SCC_A |
441 | /* Initialize channel A */ |
442 | - movel %pc@(L(mac_sccbase)),%a0 |
443 | lea %pc@(L(scc_initable_mac)),%a1 |
444 | 5: moveb %a1@+,%d0 |
445 | jmi 6f |
446 | @@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
447 | |
448 | #ifdef MAC_USE_SCC_B |
449 | /* Initialize channel B */ |
450 | -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ |
451 | - movel %pc@(L(mac_sccbase)),%a0 |
452 | -#endif /* MAC_USE_SCC_A */ |
453 | lea %pc@(L(scc_initable_mac)),%a1 |
454 | 7: moveb %a1@+,%d0 |
455 | jmi 8f |
456 | @@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 |
457 | jra 7b |
458 | 8: |
459 | #endif /* MAC_USE_SCC_B */ |
460 | + |
461 | #endif /* SERIAL_DEBUG */ |
462 | |
463 | jra L(serial_init_done) |
464 | @@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1 |
465 | |
466 | #ifdef SERIAL_DEBUG |
467 | |
468 | -#ifdef MAC_USE_SCC_A |
469 | +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
470 | movel %pc@(L(mac_sccbase)),%a1 |
471 | +#endif |
472 | + |
473 | +#ifdef MAC_USE_SCC_A |
474 | 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) |
475 | jeq 3b |
476 | moveb %d0,%a1@(mac_scc_cha_a_data_offset) |
477 | #endif /* MAC_USE_SCC_A */ |
478 | |
479 | #ifdef MAC_USE_SCC_B |
480 | -#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ |
481 | - movel %pc@(L(mac_sccbase)),%a1 |
482 | -#endif /* MAC_USE_SCC_A */ |
483 | 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) |
484 | jeq 4b |
485 | moveb %d0,%a1@(mac_scc_cha_b_data_offset) |
486 | diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h |
487 | index 7ff9eaa..a7b42ca 100644 |
488 | --- a/arch/powerpc/include/asm/processor.h |
489 | +++ b/arch/powerpc/include/asm/processor.h |
490 | @@ -407,21 +407,16 @@ static inline void prefetchw(const void *x) |
491 | #endif |
492 | |
493 | #ifdef CONFIG_PPC64 |
494 | -static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) |
495 | +static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
496 | { |
497 | - unsigned long sp; |
498 | - |
499 | if (is_32) |
500 | - sp = regs->gpr[1] & 0x0ffffffffUL; |
501 | - else |
502 | - sp = regs->gpr[1]; |
503 | - |
504 | + return sp & 0x0ffffffffUL; |
505 | return sp; |
506 | } |
507 | #else |
508 | -static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) |
509 | +static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
510 | { |
511 | - return regs->gpr[1]; |
512 | + return sp; |
513 | } |
514 | #endif |
515 | |
516 | diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
517 | index c9c67fc..3b097a8 100644 |
518 | --- a/arch/powerpc/include/asm/reg.h |
519 | +++ b/arch/powerpc/include/asm/reg.h |
520 | @@ -111,17 +111,6 @@ |
521 | #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) |
522 | #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) |
523 | |
524 | -/* Reason codes describing kernel causes for transaction aborts. By |
525 | - convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if |
526 | - the failure is persistent. |
527 | -*/ |
528 | -#define TM_CAUSE_RESCHED 0xfe |
529 | -#define TM_CAUSE_TLBI 0xfc |
530 | -#define TM_CAUSE_FAC_UNAV 0xfa |
531 | -#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ |
532 | -#define TM_CAUSE_MISC 0xf6 |
533 | -#define TM_CAUSE_SIGNAL 0xf4 |
534 | - |
535 | #if defined(CONFIG_PPC_BOOK3S_64) |
536 | #define MSR_64BIT MSR_SF |
537 | |
538 | diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h |
539 | index fbe66c4..9322c28 100644 |
540 | --- a/arch/powerpc/include/asm/signal.h |
541 | +++ b/arch/powerpc/include/asm/signal.h |
542 | @@ -3,5 +3,8 @@ |
543 | |
544 | #define __ARCH_HAS_SA_RESTORER |
545 | #include <uapi/asm/signal.h> |
546 | +#include <uapi/asm/ptrace.h> |
547 | + |
548 | +extern unsigned long get_tm_stackpointer(struct pt_regs *regs); |
549 | |
550 | #endif /* _ASM_POWERPC_SIGNAL_H */ |
551 | diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h |
552 | index 4b4449a..9dfbc34 100644 |
553 | --- a/arch/powerpc/include/asm/tm.h |
554 | +++ b/arch/powerpc/include/asm/tm.h |
555 | @@ -5,6 +5,8 @@ |
556 | * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. |
557 | */ |
558 | |
559 | +#include <uapi/asm/tm.h> |
560 | + |
561 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
562 | extern void do_load_up_transact_fpu(struct thread_struct *thread); |
563 | extern void do_load_up_transact_altivec(struct thread_struct *thread); |
564 | diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild |
565 | index f7bca63..5182c86 100644 |
566 | --- a/arch/powerpc/include/uapi/asm/Kbuild |
567 | +++ b/arch/powerpc/include/uapi/asm/Kbuild |
568 | @@ -40,6 +40,7 @@ header-y += statfs.h |
569 | header-y += swab.h |
570 | header-y += termbits.h |
571 | header-y += termios.h |
572 | +header-y += tm.h |
573 | header-y += types.h |
574 | header-y += ucontext.h |
575 | header-y += unistd.h |
576 | diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h |
577 | new file mode 100644 |
578 | index 0000000..85059a0 |
579 | --- /dev/null |
580 | +++ b/arch/powerpc/include/uapi/asm/tm.h |
581 | @@ -0,0 +1,18 @@ |
582 | +#ifndef _ASM_POWERPC_TM_H |
583 | +#define _ASM_POWERPC_TM_H |
584 | + |
585 | +/* Reason codes describing kernel causes for transaction aborts. By |
586 | + * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if |
587 | + * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. |
588 | + */ |
589 | +#define TM_CAUSE_PERSISTENT 0x01 |
590 | +#define TM_CAUSE_RESCHED 0xde |
591 | +#define TM_CAUSE_TLBI 0xdc |
592 | +#define TM_CAUSE_FAC_UNAV 0xda |
593 | +#define TM_CAUSE_SYSCALL 0xd8 /* future use */ |
594 | +#define TM_CAUSE_MISC 0xd6 /* future use */ |
595 | +#define TM_CAUSE_SIGNAL 0xd4 |
596 | +#define TM_CAUSE_ALIGNMENT 0xd2 |
597 | +#define TM_CAUSE_EMULATE 0xd0 |
598 | + |
599 | +#endif |
600 | diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S |
601 | index e514de5..4498467 100644 |
602 | --- a/arch/powerpc/kernel/entry_32.S |
603 | +++ b/arch/powerpc/kernel/entry_32.S |
604 | @@ -851,7 +851,7 @@ resume_kernel: |
605 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
606 | CURRENT_THREAD_INFO(r9, r1) |
607 | lwz r8,TI_FLAGS(r9) |
608 | - andis. r8,r8,_TIF_EMULATE_STACK_STORE@h |
609 | + andis. r0,r8,_TIF_EMULATE_STACK_STORE@h |
610 | beq+ 1f |
611 | |
612 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ |
613 | diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c |
614 | index cf12eae..78760b8 100644 |
615 | --- a/arch/powerpc/kernel/signal.c |
616 | +++ b/arch/powerpc/kernel/signal.c |
617 | @@ -17,6 +17,7 @@ |
618 | #include <asm/uaccess.h> |
619 | #include <asm/unistd.h> |
620 | #include <asm/debug.h> |
621 | +#include <asm/tm.h> |
622 | |
623 | #include "signal.h" |
624 | |
625 | @@ -29,13 +30,13 @@ int show_unhandled_signals = 0; |
626 | /* |
627 | * Allocate space for the signal frame |
628 | */ |
629 | -void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
630 | +void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, |
631 | size_t frame_size, int is_32) |
632 | { |
633 | unsigned long oldsp, newsp; |
634 | |
635 | /* Default to using normal stack */ |
636 | - oldsp = get_clean_sp(regs, is_32); |
637 | + oldsp = get_clean_sp(sp, is_32); |
638 | |
639 | /* Check for alt stack */ |
640 | if ((ka->sa.sa_flags & SA_ONSTACK) && |
641 | @@ -170,3 +171,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
642 | tracehook_notify_resume(regs); |
643 | } |
644 | } |
645 | + |
646 | +unsigned long get_tm_stackpointer(struct pt_regs *regs) |
647 | +{ |
648 | + /* When in an active transaction that takes a signal, we need to be |
649 | + * careful with the stack. It's possible that the stack has moved back |
650 | + * up after the tbegin. The obvious case here is when the tbegin is |
651 | + * called inside a function that returns before a tend. In this case, |
652 | + * the stack is part of the checkpointed transactional memory state. |
653 | + * If we write over this non transactionally or in suspend, we are in |
654 | + * trouble because if we get a tm abort, the program counter and stack |
655 | + * pointer will be back at the tbegin but our in memory stack won't be |
656 | + * valid anymore. |
657 | + * |
658 | + * To avoid this, when taking a signal in an active transaction, we |
659 | + * need to use the stack pointer from the checkpointed state, rather |
660 | + * than the speculated state. This ensures that the signal context |
661 | + * (written tm suspended) will be written below the stack required for |
662 | + * the rollback. The transaction is aborted becuase of the treclaim, |
663 | + * so any memory written between the tbegin and the signal will be |
664 | + * rolled back anyway. |
665 | + * |
666 | + * For signals taken in non-TM or suspended mode, we use the |
667 | + * normal/non-checkpointed stack pointer. |
668 | + */ |
669 | + |
670 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
671 | + if (MSR_TM_ACTIVE(regs->msr)) { |
672 | + tm_enable(); |
673 | + tm_reclaim(¤t->thread, regs->msr, TM_CAUSE_SIGNAL); |
674 | + if (MSR_TM_TRANSACTIONAL(regs->msr)) |
675 | + return current->thread.ckpt_regs.gpr[1]; |
676 | + } |
677 | +#endif |
678 | + return regs->gpr[1]; |
679 | +} |
680 | diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h |
681 | index ec84c90..c69b9ae 100644 |
682 | --- a/arch/powerpc/kernel/signal.h |
683 | +++ b/arch/powerpc/kernel/signal.h |
684 | @@ -12,7 +12,7 @@ |
685 | |
686 | extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); |
687 | |
688 | -extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
689 | +extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, |
690 | size_t frame_size, int is_32); |
691 | |
692 | extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
693 | diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c |
694 | index 95068bf..201385c 100644 |
695 | --- a/arch/powerpc/kernel/signal_32.c |
696 | +++ b/arch/powerpc/kernel/signal_32.c |
697 | @@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs, |
698 | { |
699 | unsigned long msr = regs->msr; |
700 | |
701 | - /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs, |
702 | - * thread.transact_fpr[], thread.transact_vr[], etc. |
703 | - */ |
704 | - tm_enable(); |
705 | - tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); |
706 | - |
707 | /* Make sure floating point registers are stored in regs */ |
708 | flush_fp_to_thread(current); |
709 | |
710 | @@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, |
711 | |
712 | /* Set up Signal Frame */ |
713 | /* Put a Real Time Context onto stack */ |
714 | - rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); |
715 | + rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); |
716 | addr = rt_sf; |
717 | if (unlikely(rt_sf == NULL)) |
718 | goto badframe; |
719 | @@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
720 | unsigned long tramp; |
721 | |
722 | /* Set up Signal Frame */ |
723 | - frame = get_sigframe(ka, regs, sizeof(*frame), 1); |
724 | + frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1); |
725 | if (unlikely(frame == NULL)) |
726 | goto badframe; |
727 | sc = (struct sigcontext __user *) &frame->sctx; |
728 | diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
729 | index c179428..3459473 100644 |
730 | --- a/arch/powerpc/kernel/signal_64.c |
731 | +++ b/arch/powerpc/kernel/signal_64.c |
732 | @@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
733 | * As above, but Transactional Memory is in use, so deliver sigcontexts |
734 | * containing checkpointed and transactional register states. |
735 | * |
736 | - * To do this, we treclaim to gather both sets of registers and set up the |
737 | - * 'normal' sigcontext registers with rolled-back register values such that a |
738 | - * simple signal handler sees a correct checkpointed register state. |
739 | - * If interested, a TM-aware sighandler can examine the transactional registers |
740 | - * in the 2nd sigcontext to determine the real origin of the signal. |
741 | + * To do this, we treclaim (done before entering here) to gather both sets of |
742 | + * registers and set up the 'normal' sigcontext registers with rolled-back |
743 | + * register values such that a simple signal handler sees a correct |
744 | + * checkpointed register state. If interested, a TM-aware sighandler can |
745 | + * examine the transactional registers in the 2nd sigcontext to determine the |
746 | + * real origin of the signal. |
747 | */ |
748 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, |
749 | struct sigcontext __user *tm_sc, |
750 | @@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, |
751 | |
752 | BUG_ON(!MSR_TM_ACTIVE(regs->msr)); |
753 | |
754 | - /* tm_reclaim rolls back all reg states, saving checkpointed (older) |
755 | - * GPRs to thread.ckpt_regs and (if used) FPRs to (newer) |
756 | - * thread.transact_fp and/or VRs to (newer) thread.transact_vr. |
757 | - * THEN we save out FP/VRs, if necessary, to the checkpointed (older) |
758 | - * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the |
759 | - * stack, in *regs. |
760 | - */ |
761 | - tm_enable(); |
762 | - tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); |
763 | - |
764 | flush_fp_to_thread(current); |
765 | |
766 | #ifdef CONFIG_ALTIVEC |
767 | @@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, |
768 | unsigned long newsp = 0; |
769 | long err = 0; |
770 | |
771 | - frame = get_sigframe(ka, regs, sizeof(*frame), 0); |
772 | + frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0); |
773 | if (unlikely(frame == NULL)) |
774 | goto badframe; |
775 | |
776 | diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
777 | index 83efa2f..1c22b2d 100644 |
778 | --- a/arch/powerpc/kernel/traps.c |
779 | +++ b/arch/powerpc/kernel/traps.c |
780 | @@ -52,6 +52,7 @@ |
781 | #ifdef CONFIG_PPC64 |
782 | #include <asm/firmware.h> |
783 | #include <asm/processor.h> |
784 | +#include <asm/tm.h> |
785 | #endif |
786 | #include <asm/kexec.h> |
787 | #include <asm/ppc-opcode.h> |
788 | @@ -913,6 +914,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword) |
789 | return 0; |
790 | } |
791 | |
792 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
793 | +static inline bool tm_abort_check(struct pt_regs *regs, int cause) |
794 | +{ |
795 | + /* If we're emulating a load/store in an active transaction, we cannot |
796 | + * emulate it as the kernel operates in transaction suspended context. |
797 | + * We need to abort the transaction. This creates a persistent TM |
798 | + * abort so tell the user what caused it with a new code. |
799 | + */ |
800 | + if (MSR_TM_TRANSACTIONAL(regs->msr)) { |
801 | + tm_enable(); |
802 | + tm_abort(cause); |
803 | + return true; |
804 | + } |
805 | + return false; |
806 | +} |
807 | +#else |
808 | +static inline bool tm_abort_check(struct pt_regs *regs, int reason) |
809 | +{ |
810 | + return false; |
811 | +} |
812 | +#endif |
813 | + |
814 | static int emulate_instruction(struct pt_regs *regs) |
815 | { |
816 | u32 instword; |
817 | @@ -952,6 +975,9 @@ static int emulate_instruction(struct pt_regs *regs) |
818 | |
819 | /* Emulate load/store string insn. */ |
820 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
821 | + if (tm_abort_check(regs, |
822 | + TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) |
823 | + return -EINVAL; |
824 | PPC_WARN_EMULATED(string, regs); |
825 | return emulate_string_inst(regs, instword); |
826 | } |
827 | @@ -1124,6 +1150,9 @@ void alignment_exception(struct pt_regs *regs) |
828 | if (!arch_irq_disabled_regs(regs)) |
829 | local_irq_enable(); |
830 | |
831 | + if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) |
832 | + goto bail; |
833 | + |
834 | /* we don't implement logging of alignment exceptions */ |
835 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) |
836 | fixed = fix_alignment(regs); |
837 | diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig |
838 | index 9a0941b..b9fd0d3 100644 |
839 | --- a/arch/powerpc/platforms/pseries/Kconfig |
840 | +++ b/arch/powerpc/platforms/pseries/Kconfig |
841 | @@ -18,6 +18,8 @@ config PPC_PSERIES |
842 | select PPC_PCI_CHOICE if EXPERT |
843 | select ZLIB_DEFLATE |
844 | select PPC_DOORBELL |
845 | + select HOTPLUG if SMP |
846 | + select HOTPLUG_CPU if SMP |
847 | default y |
848 | |
849 | config PPC_SPLPAR |
850 | diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h |
851 | index 3cb47cf..fd80015 100644 |
852 | --- a/arch/s390/include/asm/pgtable.h |
853 | +++ b/arch/s390/include/asm/pgtable.h |
854 | @@ -637,7 +637,7 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) |
855 | unsigned long address, bits; |
856 | unsigned char skey; |
857 | |
858 | - if (!pte_present(*ptep)) |
859 | + if (pte_val(*ptep) & _PAGE_INVALID) |
860 | return pgste; |
861 | address = pte_val(*ptep) & PAGE_MASK; |
862 | skey = page_get_storage_key(address); |
863 | @@ -671,7 +671,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) |
864 | #ifdef CONFIG_PGSTE |
865 | int young; |
866 | |
867 | - if (!pte_present(*ptep)) |
868 | + if (pte_val(*ptep) & _PAGE_INVALID) |
869 | return pgste; |
870 | /* Get referenced bit from storage key */ |
871 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); |
872 | @@ -697,7 +697,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) |
873 | unsigned long address; |
874 | unsigned long okey, nkey; |
875 | |
876 | - if (!pte_present(entry)) |
877 | + if (pte_val(entry) & _PAGE_INVALID) |
878 | return; |
879 | address = pte_val(entry) & PAGE_MASK; |
880 | okey = nkey = page_get_storage_key(address); |
881 | @@ -1072,6 +1072,9 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
882 | pte = *ptep; |
883 | if (!mm_exclusive(mm)) |
884 | __ptep_ipte(address, ptep); |
885 | + |
886 | + if (mm_has_pgste(mm)) |
887 | + pgste = pgste_update_all(&pte, pgste); |
888 | return pte; |
889 | } |
890 | |
891 | @@ -1079,9 +1082,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
892 | unsigned long address, |
893 | pte_t *ptep, pte_t pte) |
894 | { |
895 | + pgste_t pgste; |
896 | + |
897 | if (mm_has_pgste(mm)) { |
898 | + pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); |
899 | + pgste_set_key(ptep, pgste, pte); |
900 | pgste_set_pte(ptep, pte); |
901 | - pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); |
902 | + pgste_set_unlock(ptep, pgste); |
903 | } else |
904 | *ptep = pte; |
905 | } |
906 | diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S |
907 | index c8335014..c18c398 100644 |
908 | --- a/arch/x86/crypto/crc32-pclmul_asm.S |
909 | +++ b/arch/x86/crypto/crc32-pclmul_asm.S |
910 | @@ -241,6 +241,6 @@ fold_64: |
911 | pand %xmm3, %xmm1 |
912 | PCLMULQDQ 0x00, CONSTANT, %xmm1 |
913 | pxor %xmm2, %xmm1 |
914 | - pextrd $0x01, %xmm1, %eax |
915 | + PEXTRD 0x01, %xmm1, %eax |
916 | |
917 | ret |
918 | diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h |
919 | index 280bf7f..3e11527 100644 |
920 | --- a/arch/x86/include/asm/inst.h |
921 | +++ b/arch/x86/include/asm/inst.h |
922 | @@ -9,12 +9,68 @@ |
923 | |
924 | #define REG_NUM_INVALID 100 |
925 | |
926 | -#define REG_TYPE_R64 0 |
927 | -#define REG_TYPE_XMM 1 |
928 | +#define REG_TYPE_R32 0 |
929 | +#define REG_TYPE_R64 1 |
930 | +#define REG_TYPE_XMM 2 |
931 | #define REG_TYPE_INVALID 100 |
932 | |
933 | + .macro R32_NUM opd r32 |
934 | + \opd = REG_NUM_INVALID |
935 | + .ifc \r32,%eax |
936 | + \opd = 0 |
937 | + .endif |
938 | + .ifc \r32,%ecx |
939 | + \opd = 1 |
940 | + .endif |
941 | + .ifc \r32,%edx |
942 | + \opd = 2 |
943 | + .endif |
944 | + .ifc \r32,%ebx |
945 | + \opd = 3 |
946 | + .endif |
947 | + .ifc \r32,%esp |
948 | + \opd = 4 |
949 | + .endif |
950 | + .ifc \r32,%ebp |
951 | + \opd = 5 |
952 | + .endif |
953 | + .ifc \r32,%esi |
954 | + \opd = 6 |
955 | + .endif |
956 | + .ifc \r32,%edi |
957 | + \opd = 7 |
958 | + .endif |
959 | +#ifdef CONFIG_X86_64 |
960 | + .ifc \r32,%r8d |
961 | + \opd = 8 |
962 | + .endif |
963 | + .ifc \r32,%r9d |
964 | + \opd = 9 |
965 | + .endif |
966 | + .ifc \r32,%r10d |
967 | + \opd = 10 |
968 | + .endif |
969 | + .ifc \r32,%r11d |
970 | + \opd = 11 |
971 | + .endif |
972 | + .ifc \r32,%r12d |
973 | + \opd = 12 |
974 | + .endif |
975 | + .ifc \r32,%r13d |
976 | + \opd = 13 |
977 | + .endif |
978 | + .ifc \r32,%r14d |
979 | + \opd = 14 |
980 | + .endif |
981 | + .ifc \r32,%r15d |
982 | + \opd = 15 |
983 | + .endif |
984 | +#endif |
985 | + .endm |
986 | + |
987 | .macro R64_NUM opd r64 |
988 | \opd = REG_NUM_INVALID |
989 | +#ifdef CONFIG_X86_64 |
990 | .ifc \r64,%rax |
991 | \opd = 0 |
992 | .endif |
993 | @@ -63,6 +119,7 @@ |
994 | .ifc \r64,%r15 |
995 | \opd = 15 |
996 | .endif |
997 | +#endif |
998 | .endm |
999 | |
1000 | .macro XMM_NUM opd xmm |
1001 | @@ -118,10 +175,13 @@ |
1002 | .endm |
1003 | |
1004 | .macro REG_TYPE type reg |
1005 | + R32_NUM reg_type_r32 \reg |
1006 | R64_NUM reg_type_r64 \reg |
1007 | XMM_NUM reg_type_xmm \reg |
1008 | .if reg_type_r64 <> REG_NUM_INVALID |
1009 | \type = REG_TYPE_R64 |
1010 | + .elseif reg_type_r32 <> REG_NUM_INVALID |
1011 | + \type = REG_TYPE_R32 |
1012 | .elseif reg_type_xmm <> REG_NUM_INVALID |
1013 | \type = REG_TYPE_XMM |
1014 | .else |
1015 | @@ -162,6 +222,16 @@ |
1016 | .byte \imm8 |
1017 | .endm |
1018 | |
1019 | + .macro PEXTRD imm8 xmm gpr |
1020 | + R32_NUM extrd_opd1 \gpr |
1021 | + XMM_NUM extrd_opd2 \xmm |
1022 | + PFX_OPD_SIZE |
1023 | + PFX_REX extrd_opd1 extrd_opd2 |
1024 | + .byte 0x0f, 0x3a, 0x16 |
1025 | + MODRM 0xc0 extrd_opd1 extrd_opd2 |
1026 | + .byte \imm8 |
1027 | + .endm |
1028 | + |
1029 | .macro AESKEYGENASSIST rcon xmm1 xmm2 |
1030 | XMM_NUM aeskeygen_opd1 \xmm1 |
1031 | XMM_NUM aeskeygen_opd2 \xmm2 |
1032 | diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S |
1033 | index 08f7e80..321d65e 100644 |
1034 | --- a/arch/x86/kernel/head_64.S |
1035 | +++ b/arch/x86/kernel/head_64.S |
1036 | @@ -115,8 +115,10 @@ startup_64: |
1037 | movq %rdi, %rax |
1038 | shrq $PUD_SHIFT, %rax |
1039 | andl $(PTRS_PER_PUD-1), %eax |
1040 | - movq %rdx, (4096+0)(%rbx,%rax,8) |
1041 | - movq %rdx, (4096+8)(%rbx,%rax,8) |
1042 | + movq %rdx, 4096(%rbx,%rax,8) |
1043 | + incl %eax |
1044 | + andl $(PTRS_PER_PUD-1), %eax |
1045 | + movq %rdx, 4096(%rbx,%rax,8) |
1046 | |
1047 | addq $8192, %rbx |
1048 | movq %rdi, %rax |
1049 | diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c |
1050 | index 245a71d..cb33909 100644 |
1051 | --- a/arch/x86/kernel/i387.c |
1052 | +++ b/arch/x86/kernel/i387.c |
1053 | @@ -22,23 +22,19 @@ |
1054 | /* |
1055 | * Were we in an interrupt that interrupted kernel mode? |
1056 | * |
1057 | - * For now, with eagerfpu we will return interrupted kernel FPU |
1058 | - * state as not-idle. TBD: Ideally we can change the return value |
1059 | - * to something like __thread_has_fpu(current). But we need to |
1060 | - * be careful of doing __thread_clear_has_fpu() before saving |
1061 | - * the FPU etc for supporting nested uses etc. For now, take |
1062 | - * the simple route! |
1063 | - * |
1064 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
1065 | * pair does nothing at all: the thread must not have fpu (so |
1066 | * that we don't try to save the FPU state), and TS must |
1067 | * be set (so that the clts/stts pair does nothing that is |
1068 | * visible in the interrupted kernel thread). |
1069 | + * |
1070 | + * Except for the eagerfpu case when we return 1 unless we've already |
1071 | + * been eager and saved the state in kernel_fpu_begin(). |
1072 | */ |
1073 | static inline bool interrupted_kernel_fpu_idle(void) |
1074 | { |
1075 | if (use_eager_fpu()) |
1076 | - return 0; |
1077 | + return __thread_has_fpu(current); |
1078 | |
1079 | return !__thread_has_fpu(current) && |
1080 | (read_cr0() & X86_CR0_TS); |
1081 | @@ -78,8 +74,8 @@ void __kernel_fpu_begin(void) |
1082 | struct task_struct *me = current; |
1083 | |
1084 | if (__thread_has_fpu(me)) { |
1085 | - __save_init_fpu(me); |
1086 | __thread_clear_has_fpu(me); |
1087 | + __save_init_fpu(me); |
1088 | /* We do 'stts()' in __kernel_fpu_end() */ |
1089 | } else if (!use_eager_fpu()) { |
1090 | this_cpu_write(fpu_owner_task, NULL); |
1091 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
1092 | index 59622c9..698eece 100644 |
1093 | --- a/arch/x86/kvm/emulate.c |
1094 | +++ b/arch/x86/kvm/emulate.c |
1095 | @@ -1239,9 +1239,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, |
1096 | ctxt->modrm_seg = VCPU_SREG_DS; |
1097 | |
1098 | if (ctxt->modrm_mod == 3) { |
1099 | + int highbyte_regs = ctxt->rex_prefix == 0; |
1100 | + |
1101 | op->type = OP_REG; |
1102 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
1103 | - op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); |
1104 | + op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, |
1105 | + highbyte_regs && (ctxt->d & ByteOp)); |
1106 | if (ctxt->d & Sse) { |
1107 | op->type = OP_XMM; |
1108 | op->bytes = 16; |
1109 | @@ -3987,7 +3990,8 @@ static const struct opcode twobyte_table[256] = { |
1110 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, |
1111 | N, D(ImplicitOps | ModRM), N, N, |
1112 | /* 0x10 - 0x1F */ |
1113 | - N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, |
1114 | + N, N, N, N, N, N, N, N, |
1115 | + D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), |
1116 | /* 0x20 - 0x2F */ |
1117 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), |
1118 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), |
1119 | @@ -4825,6 +4829,7 @@ twobyte_insn: |
1120 | case 0x08: /* invd */ |
1121 | case 0x0d: /* GrpP (prefetch) */ |
1122 | case 0x18: /* Grp16 (prefetch/nop) */ |
1123 | + case 0x1f: /* nop */ |
1124 | break; |
1125 | case 0x20: /* mov cr, reg */ |
1126 | ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); |
1127 | diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile |
1128 | index 474fcfe..eb8278a 100644 |
1129 | --- a/drivers/acpi/Makefile |
1130 | +++ b/drivers/acpi/Makefile |
1131 | @@ -24,7 +24,7 @@ acpi-y += nvs.o |
1132 | # Power management related files |
1133 | acpi-y += wakeup.o |
1134 | acpi-y += sleep.o |
1135 | -acpi-$(CONFIG_PM) += device_pm.o |
1136 | +acpi-y += device_pm.o |
1137 | acpi-$(CONFIG_ACPI_SLEEP) += proc.o |
1138 | |
1139 | |
1140 | diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
1141 | index dd314ef..557e58d 100644 |
1142 | --- a/drivers/acpi/device_pm.c |
1143 | +++ b/drivers/acpi/device_pm.c |
1144 | @@ -37,68 +37,6 @@ |
1145 | #define _COMPONENT ACPI_POWER_COMPONENT |
1146 | ACPI_MODULE_NAME("device_pm"); |
1147 | |
1148 | -static DEFINE_MUTEX(acpi_pm_notifier_lock); |
1149 | - |
1150 | -/** |
1151 | - * acpi_add_pm_notifier - Register PM notifier for given ACPI device. |
1152 | - * @adev: ACPI device to add the notifier for. |
1153 | - * @context: Context information to pass to the notifier routine. |
1154 | - * |
1155 | - * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of |
1156 | - * PM wakeup events. For example, wakeup events may be generated for bridges |
1157 | - * if one of the devices below the bridge is signaling wakeup, even if the |
1158 | - * bridge itself doesn't have a wakeup GPE associated with it. |
1159 | - */ |
1160 | -acpi_status acpi_add_pm_notifier(struct acpi_device *adev, |
1161 | - acpi_notify_handler handler, void *context) |
1162 | -{ |
1163 | - acpi_status status = AE_ALREADY_EXISTS; |
1164 | - |
1165 | - mutex_lock(&acpi_pm_notifier_lock); |
1166 | - |
1167 | - if (adev->wakeup.flags.notifier_present) |
1168 | - goto out; |
1169 | - |
1170 | - status = acpi_install_notify_handler(adev->handle, |
1171 | - ACPI_SYSTEM_NOTIFY, |
1172 | - handler, context); |
1173 | - if (ACPI_FAILURE(status)) |
1174 | - goto out; |
1175 | - |
1176 | - adev->wakeup.flags.notifier_present = true; |
1177 | - |
1178 | - out: |
1179 | - mutex_unlock(&acpi_pm_notifier_lock); |
1180 | - return status; |
1181 | -} |
1182 | - |
1183 | -/** |
1184 | - * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. |
1185 | - * @adev: ACPI device to remove the notifier from. |
1186 | - */ |
1187 | -acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, |
1188 | - acpi_notify_handler handler) |
1189 | -{ |
1190 | - acpi_status status = AE_BAD_PARAMETER; |
1191 | - |
1192 | - mutex_lock(&acpi_pm_notifier_lock); |
1193 | - |
1194 | - if (!adev->wakeup.flags.notifier_present) |
1195 | - goto out; |
1196 | - |
1197 | - status = acpi_remove_notify_handler(adev->handle, |
1198 | - ACPI_SYSTEM_NOTIFY, |
1199 | - handler); |
1200 | - if (ACPI_FAILURE(status)) |
1201 | - goto out; |
1202 | - |
1203 | - adev->wakeup.flags.notifier_present = false; |
1204 | - |
1205 | - out: |
1206 | - mutex_unlock(&acpi_pm_notifier_lock); |
1207 | - return status; |
1208 | -} |
1209 | - |
1210 | /** |
1211 | * acpi_power_state_string - String representation of ACPI device power state. |
1212 | * @state: ACPI device power state to return the string representation of. |
1213 | @@ -376,6 +314,69 @@ bool acpi_bus_power_manageable(acpi_handle handle) |
1214 | } |
1215 | EXPORT_SYMBOL(acpi_bus_power_manageable); |
1216 | |
1217 | +#ifdef CONFIG_PM |
1218 | +static DEFINE_MUTEX(acpi_pm_notifier_lock); |
1219 | + |
1220 | +/** |
1221 | + * acpi_add_pm_notifier - Register PM notifier for given ACPI device. |
1222 | + * @adev: ACPI device to add the notifier for. |
1223 | + * @context: Context information to pass to the notifier routine. |
1224 | + * |
1225 | + * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of |
1226 | + * PM wakeup events. For example, wakeup events may be generated for bridges |
1227 | + * if one of the devices below the bridge is signaling wakeup, even if the |
1228 | + * bridge itself doesn't have a wakeup GPE associated with it. |
1229 | + */ |
1230 | +acpi_status acpi_add_pm_notifier(struct acpi_device *adev, |
1231 | + acpi_notify_handler handler, void *context) |
1232 | +{ |
1233 | + acpi_status status = AE_ALREADY_EXISTS; |
1234 | + |
1235 | + mutex_lock(&acpi_pm_notifier_lock); |
1236 | + |
1237 | + if (adev->wakeup.flags.notifier_present) |
1238 | + goto out; |
1239 | + |
1240 | + status = acpi_install_notify_handler(adev->handle, |
1241 | + ACPI_SYSTEM_NOTIFY, |
1242 | + handler, context); |
1243 | + if (ACPI_FAILURE(status)) |
1244 | + goto out; |
1245 | + |
1246 | + adev->wakeup.flags.notifier_present = true; |
1247 | + |
1248 | + out: |
1249 | + mutex_unlock(&acpi_pm_notifier_lock); |
1250 | + return status; |
1251 | +} |
1252 | + |
1253 | +/** |
1254 | + * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. |
1255 | + * @adev: ACPI device to remove the notifier from. |
1256 | + */ |
1257 | +acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, |
1258 | + acpi_notify_handler handler) |
1259 | +{ |
1260 | + acpi_status status = AE_BAD_PARAMETER; |
1261 | + |
1262 | + mutex_lock(&acpi_pm_notifier_lock); |
1263 | + |
1264 | + if (!adev->wakeup.flags.notifier_present) |
1265 | + goto out; |
1266 | + |
1267 | + status = acpi_remove_notify_handler(adev->handle, |
1268 | + ACPI_SYSTEM_NOTIFY, |
1269 | + handler); |
1270 | + if (ACPI_FAILURE(status)) |
1271 | + goto out; |
1272 | + |
1273 | + adev->wakeup.flags.notifier_present = false; |
1274 | + |
1275 | + out: |
1276 | + mutex_unlock(&acpi_pm_notifier_lock); |
1277 | + return status; |
1278 | +} |
1279 | + |
1280 | bool acpi_bus_can_wakeup(acpi_handle handle) |
1281 | { |
1282 | struct acpi_device *device; |
1283 | @@ -1014,3 +1015,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev) |
1284 | mutex_unlock(&adev->physical_node_lock); |
1285 | } |
1286 | EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent); |
1287 | +#endif /* CONFIG_PM */ |
1288 | diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c |
1289 | index 4ac2593..abcae69 100644 |
1290 | --- a/drivers/acpi/video_detect.c |
1291 | +++ b/drivers/acpi/video_detect.c |
1292 | @@ -164,6 +164,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { |
1293 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), |
1294 | }, |
1295 | }, |
1296 | + { |
1297 | + .callback = video_detect_force_vendor, |
1298 | + .ident = "Asus UL30A", |
1299 | + .matches = { |
1300 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), |
1301 | + DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), |
1302 | + }, |
1303 | + }, |
1304 | { }, |
1305 | }; |
1306 | |
1307 | diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c |
1308 | index 2f48123..93cb092 100644 |
1309 | --- a/drivers/ata/ata_piix.c |
1310 | +++ b/drivers/ata/ata_piix.c |
1311 | @@ -151,6 +151,7 @@ enum piix_controller_ids { |
1312 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
1313 | ich8_sata_snb, |
1314 | ich8_2port_sata_snb, |
1315 | + ich8_2port_sata_byt, |
1316 | }; |
1317 | |
1318 | struct piix_map_db { |
1319 | @@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = { |
1320 | { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
1321 | /* SATA Controller IDE (Wellsburg) */ |
1322 | { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
1323 | + /* SATA Controller IDE (BayTrail) */ |
1324 | + { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, |
1325 | + { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, |
1326 | |
1327 | { } /* terminate list */ |
1328 | }; |
1329 | @@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = { |
1330 | [tolapai_sata] = &tolapai_map_db, |
1331 | [ich8_sata_snb] = &ich8_map_db, |
1332 | [ich8_2port_sata_snb] = &ich8_2port_map_db, |
1333 | + [ich8_2port_sata_byt] = &ich8_2port_map_db, |
1334 | }; |
1335 | |
1336 | static struct pci_bits piix_enable_bits[] = { |
1337 | @@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = { |
1338 | .udma_mask = ATA_UDMA6, |
1339 | .port_ops = &piix_sata_ops, |
1340 | }, |
1341 | + |
1342 | + [ich8_2port_sata_byt] = |
1343 | + { |
1344 | + .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16, |
1345 | + .pio_mask = ATA_PIO4, |
1346 | + .mwdma_mask = ATA_MWDMA2, |
1347 | + .udma_mask = ATA_UDMA6, |
1348 | + .port_ops = &piix_sata_ops, |
1349 | + }, |
1350 | + |
1351 | }; |
1352 | |
1353 | #define AHCI_PCI_BAR 5 |
1354 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1355 | index 63c743b..cf15aee 100644 |
1356 | --- a/drivers/ata/libata-core.c |
1357 | +++ b/drivers/ata/libata-core.c |
1358 | @@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, |
1359 | qc->tf = *tf; |
1360 | if (cdb) |
1361 | memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); |
1362 | + |
1363 | + /* some SATA bridges need us to indicate data xfer direction */ |
1364 | + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && |
1365 | + dma_dir == DMA_FROM_DEVICE) |
1366 | + qc->tf.feature |= ATAPI_DMADIR; |
1367 | + |
1368 | qc->flags |= ATA_QCFLAG_RESULT_TF; |
1369 | qc->dma_dir = dma_dir; |
1370 | if (dma_dir != DMA_NONE) { |
1371 | diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c |
1372 | index caf33f6..d7b77e0 100644 |
1373 | --- a/drivers/ata/sata_rcar.c |
1374 | +++ b/drivers/ata/sata_rcar.c |
1375 | @@ -548,6 +548,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc) |
1376 | |
1377 | /* start host DMA transaction */ |
1378 | dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG); |
1379 | + dmactl &= ~ATAPI_CONTROL1_STOP; |
1380 | dmactl |= ATAPI_CONTROL1_START; |
1381 | iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG); |
1382 | } |
1383 | @@ -617,17 +618,16 @@ static struct ata_port_operations sata_rcar_port_ops = { |
1384 | .bmdma_status = sata_rcar_bmdma_status, |
1385 | }; |
1386 | |
1387 | -static int sata_rcar_serr_interrupt(struct ata_port *ap) |
1388 | +static void sata_rcar_serr_interrupt(struct ata_port *ap) |
1389 | { |
1390 | struct sata_rcar_priv *priv = ap->host->private_data; |
1391 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1392 | int freeze = 0; |
1393 | - int handled = 0; |
1394 | u32 serror; |
1395 | |
1396 | serror = ioread32(priv->base + SCRSERR_REG); |
1397 | if (!serror) |
1398 | - return 0; |
1399 | + return; |
1400 | |
1401 | DPRINTK("SError @host_intr: 0x%x\n", serror); |
1402 | |
1403 | @@ -640,7 +640,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap) |
1404 | ata_ehi_push_desc(ehi, "%s", "hotplug"); |
1405 | |
1406 | freeze = serror & SERR_COMM_WAKE ? 0 : 1; |
1407 | - handled = 1; |
1408 | } |
1409 | |
1410 | /* freeze or abort */ |
1411 | @@ -648,11 +647,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap) |
1412 | ata_port_freeze(ap); |
1413 | else |
1414 | ata_port_abort(ap); |
1415 | - |
1416 | - return handled; |
1417 | } |
1418 | |
1419 | -static int sata_rcar_ata_interrupt(struct ata_port *ap) |
1420 | +static void sata_rcar_ata_interrupt(struct ata_port *ap) |
1421 | { |
1422 | struct ata_queued_cmd *qc; |
1423 | int handled = 0; |
1424 | @@ -661,7 +658,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap) |
1425 | if (qc) |
1426 | handled |= ata_bmdma_port_intr(ap, qc); |
1427 | |
1428 | - return handled; |
1429 | + /* be sure to clear ATA interrupt */ |
1430 | + if (!handled) |
1431 | + sata_rcar_check_status(ap); |
1432 | } |
1433 | |
1434 | static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) |
1435 | @@ -676,20 +675,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance) |
1436 | spin_lock_irqsave(&host->lock, flags); |
1437 | |
1438 | sataintstat = ioread32(priv->base + SATAINTSTAT_REG); |
1439 | + sataintstat &= SATA_RCAR_INT_MASK; |
1440 | if (!sataintstat) |
1441 | goto done; |
1442 | /* ack */ |
1443 | - iowrite32(sataintstat & ~SATA_RCAR_INT_MASK, |
1444 | - priv->base + SATAINTSTAT_REG); |
1445 | + iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG); |
1446 | |
1447 | ap = host->ports[0]; |
1448 | |
1449 | if (sataintstat & SATAINTSTAT_ATA) |
1450 | - handled |= sata_rcar_ata_interrupt(ap); |
1451 | + sata_rcar_ata_interrupt(ap); |
1452 | |
1453 | if (sataintstat & SATAINTSTAT_SERR) |
1454 | - handled |= sata_rcar_serr_interrupt(ap); |
1455 | + sata_rcar_serr_interrupt(ap); |
1456 | |
1457 | + handled = 1; |
1458 | done: |
1459 | spin_unlock_irqrestore(&host->lock, flags); |
1460 | |
1461 | diff --git a/drivers/block/brd.c b/drivers/block/brd.c |
1462 | index 531ceb3..4e8213a 100644 |
1463 | --- a/drivers/block/brd.c |
1464 | +++ b/drivers/block/brd.c |
1465 | @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) |
1466 | |
1467 | spin_lock(&brd->brd_lock); |
1468 | idx = sector >> PAGE_SECTORS_SHIFT; |
1469 | + page->index = idx; |
1470 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { |
1471 | __free_page(page); |
1472 | page = radix_tree_lookup(&brd->brd_pages, idx); |
1473 | BUG_ON(!page); |
1474 | BUG_ON(page->index != idx); |
1475 | - } else |
1476 | - page->index = idx; |
1477 | + } |
1478 | spin_unlock(&brd->brd_lock); |
1479 | |
1480 | radix_tree_preload_end(); |
1481 | diff --git a/drivers/char/random.c b/drivers/char/random.c |
1482 | index 32a6c57..eccd7cc 100644 |
1483 | --- a/drivers/char/random.c |
1484 | +++ b/drivers/char/random.c |
1485 | @@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, |
1486 | if (r->entropy_count / 8 < min + reserved) { |
1487 | nbytes = 0; |
1488 | } else { |
1489 | + int entropy_count, orig; |
1490 | +retry: |
1491 | + entropy_count = orig = ACCESS_ONCE(r->entropy_count); |
1492 | /* If limited, never pull more than available */ |
1493 | - if (r->limit && nbytes + reserved >= r->entropy_count / 8) |
1494 | - nbytes = r->entropy_count/8 - reserved; |
1495 | - |
1496 | - if (r->entropy_count / 8 >= nbytes + reserved) |
1497 | - r->entropy_count -= nbytes*8; |
1498 | - else |
1499 | - r->entropy_count = reserved; |
1500 | + if (r->limit && nbytes + reserved >= entropy_count / 8) |
1501 | + nbytes = entropy_count/8 - reserved; |
1502 | + |
1503 | + if (entropy_count / 8 >= nbytes + reserved) { |
1504 | + entropy_count -= nbytes*8; |
1505 | + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
1506 | + goto retry; |
1507 | + } else { |
1508 | + entropy_count = reserved; |
1509 | + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
1510 | + goto retry; |
1511 | + } |
1512 | |
1513 | - if (r->entropy_count < random_write_wakeup_thresh) |
1514 | + if (entropy_count < random_write_wakeup_thresh) |
1515 | wakeup_write = 1; |
1516 | } |
1517 | |
1518 | @@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
1519 | { |
1520 | ssize_t ret = 0, i; |
1521 | __u8 tmp[EXTRACT_SIZE]; |
1522 | + unsigned long flags; |
1523 | |
1524 | /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ |
1525 | - if (fips_enabled && !r->last_data_init) |
1526 | - nbytes += EXTRACT_SIZE; |
1527 | + if (fips_enabled) { |
1528 | + spin_lock_irqsave(&r->lock, flags); |
1529 | + if (!r->last_data_init) { |
1530 | + r->last_data_init = true; |
1531 | + spin_unlock_irqrestore(&r->lock, flags); |
1532 | + trace_extract_entropy(r->name, EXTRACT_SIZE, |
1533 | + r->entropy_count, _RET_IP_); |
1534 | + xfer_secondary_pool(r, EXTRACT_SIZE); |
1535 | + extract_buf(r, tmp); |
1536 | + spin_lock_irqsave(&r->lock, flags); |
1537 | + memcpy(r->last_data, tmp, EXTRACT_SIZE); |
1538 | + } |
1539 | + spin_unlock_irqrestore(&r->lock, flags); |
1540 | + } |
1541 | |
1542 | trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); |
1543 | xfer_secondary_pool(r, nbytes); |
1544 | @@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
1545 | extract_buf(r, tmp); |
1546 | |
1547 | if (fips_enabled) { |
1548 | - unsigned long flags; |
1549 | - |
1550 | - |
1551 | - /* prime last_data value if need be, per fips 140-2 */ |
1552 | - if (!r->last_data_init) { |
1553 | - spin_lock_irqsave(&r->lock, flags); |
1554 | - memcpy(r->last_data, tmp, EXTRACT_SIZE); |
1555 | - r->last_data_init = true; |
1556 | - nbytes -= EXTRACT_SIZE; |
1557 | - spin_unlock_irqrestore(&r->lock, flags); |
1558 | - extract_buf(r, tmp); |
1559 | - } |
1560 | - |
1561 | spin_lock_irqsave(&r->lock, flags); |
1562 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) |
1563 | panic("Hardware RNG duplicated output!\n"); |
1564 | diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c |
1565 | index cf268b1..d482b12 100644 |
1566 | --- a/drivers/crypto/caam/caamalg.c |
1567 | +++ b/drivers/crypto/caam/caamalg.c |
1568 | @@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
1569 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
1570 | |
1571 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1572 | - DMA_BIDIRECTIONAL, assoc_chained); |
1573 | + DMA_TO_DEVICE, assoc_chained); |
1574 | if (likely(req->src == req->dst)) { |
1575 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1576 | DMA_BIDIRECTIONAL, src_chained); |
1577 | @@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request |
1578 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); |
1579 | |
1580 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1581 | - DMA_BIDIRECTIONAL, assoc_chained); |
1582 | + DMA_TO_DEVICE, assoc_chained); |
1583 | if (likely(req->src == req->dst)) { |
1584 | sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, |
1585 | DMA_BIDIRECTIONAL, src_chained); |
1586 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
1587 | index e9b5789..49393e5 100644 |
1588 | --- a/drivers/gpu/drm/i915/i915_drv.c |
1589 | +++ b/drivers/gpu/drm/i915/i915_drv.c |
1590 | @@ -359,40 +359,64 @@ static const struct pci_device_id pciidlist[] = { /* aka */ |
1591 | INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ |
1592 | INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ |
1593 | INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ |
1594 | - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ |
1595 | + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ |
1596 | INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ |
1597 | INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ |
1598 | - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ |
1599 | + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ |
1600 | INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ |
1601 | INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ |
1602 | INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ |
1603 | + INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ |
1604 | + INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ |
1605 | + INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ |
1606 | + INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ |
1607 | + INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ |
1608 | + INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ |
1609 | INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ |
1610 | INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ |
1611 | - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ |
1612 | + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ |
1613 | INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ |
1614 | INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ |
1615 | - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ |
1616 | + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ |
1617 | INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ |
1618 | INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ |
1619 | - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ |
1620 | + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ |
1621 | + INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ |
1622 | + INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ |
1623 | + INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ |
1624 | + INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ |
1625 | + INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ |
1626 | + INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ |
1627 | INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ |
1628 | INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ |
1629 | - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ |
1630 | + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ |
1631 | INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ |
1632 | INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ |
1633 | - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ |
1634 | + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ |
1635 | INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ |
1636 | INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ |
1637 | - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ |
1638 | + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ |
1639 | + INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ |
1640 | + INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ |
1641 | + INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ |
1642 | + INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ |
1643 | + INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ |
1644 | + INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ |
1645 | INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ |
1646 | INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ |
1647 | - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ |
1648 | + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ |
1649 | INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ |
1650 | INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ |
1651 | - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ |
1652 | + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ |
1653 | INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ |
1654 | INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ |
1655 | - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ |
1656 | + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ |
1657 | + INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ |
1658 | + INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ |
1659 | + INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ |
1660 | + INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ |
1661 | + INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ |
1662 | + INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ |
1663 | INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), |
1664 | INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), |
1665 | INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), |
1666 | diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c |
1667 | index 4393eb4..2391b1b 100644 |
1668 | --- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c |
1669 | +++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c |
1670 | @@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device) |
1671 | device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; |
1672 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; |
1673 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; |
1674 | - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; |
1675 | device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; |
1676 | break; |
1677 | case 0xce: |
1678 | @@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device) |
1679 | device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; |
1680 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; |
1681 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; |
1682 | - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; |
1683 | device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; |
1684 | break; |
1685 | case 0xc8: |
1686 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
1687 | index 44b8034..5073665 100644 |
1688 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
1689 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
1690 | @@ -435,18 +435,17 @@ bool radeon_card_posted(struct radeon_device *rdev) |
1691 | return false; |
1692 | |
1693 | /* first check CRTCs */ |
1694 | - if (ASIC_IS_DCE41(rdev)) { |
1695 | + if (ASIC_IS_DCE4(rdev)) { |
1696 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
1697 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); |
1698 | - if (reg & EVERGREEN_CRTC_MASTER_EN) |
1699 | - return true; |
1700 | - } else if (ASIC_IS_DCE4(rdev)) { |
1701 | - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | |
1702 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | |
1703 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
1704 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | |
1705 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | |
1706 | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); |
1707 | + if (rdev->num_crtc >= 4) { |
1708 | + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | |
1709 | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); |
1710 | + } |
1711 | + if (rdev->num_crtc >= 6) { |
1712 | + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | |
1713 | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); |
1714 | + } |
1715 | if (reg & EVERGREEN_CRTC_MASTER_EN) |
1716 | return true; |
1717 | } else if (ASIC_IS_AVIVO(rdev)) { |
1718 | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c |
1719 | index 287248c..aefefd5 100644 |
1720 | --- a/drivers/gpu/drm/radeon/si.c |
1721 | +++ b/drivers/gpu/drm/radeon/si.c |
1722 | @@ -1645,7 +1645,7 @@ static void si_gpu_init(struct radeon_device *rdev) |
1723 | default: |
1724 | rdev->config.si.max_shader_engines = 1; |
1725 | rdev->config.si.max_tile_pipes = 4; |
1726 | - rdev->config.si.max_cu_per_sh = 2; |
1727 | + rdev->config.si.max_cu_per_sh = 5; |
1728 | rdev->config.si.max_sh_per_se = 2; |
1729 | rdev->config.si.max_backends_per_se = 4; |
1730 | rdev->config.si.max_texture_channel_caches = 4; |
1731 | diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c |
1732 | index ff1be16..421d607 100644 |
1733 | --- a/drivers/hv/channel_mgmt.c |
1734 | +++ b/drivers/hv/channel_mgmt.c |
1735 | @@ -318,7 +318,7 @@ static u32 get_vp_index(uuid_le *type_guid) |
1736 | return 0; |
1737 | } |
1738 | cur_cpu = (++next_vp % max_cpus); |
1739 | - return cur_cpu; |
1740 | + return hv_context.vp_index[cur_cpu]; |
1741 | } |
1742 | |
1743 | /* |
1744 | diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c |
1745 | index be1edb0..68ebb7f 100644 |
1746 | --- a/drivers/infiniband/ulp/iser/iser_memory.c |
1747 | +++ b/drivers/infiniband/ulp/iser/iser_memory.c |
1748 | @@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, |
1749 | for (i=0 ; i<ib_conn->page_vec->length ; i++) |
1750 | iser_err("page_vec[%d] = 0x%llx\n", i, |
1751 | (unsigned long long) ib_conn->page_vec->pages[i]); |
1752 | - return err; |
1753 | } |
1754 | + if (err) |
1755 | + return err; |
1756 | } |
1757 | return 0; |
1758 | } |
1759 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1760 | index c09d41b..b4a76d1 100644 |
1761 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
1762 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1763 | @@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch) |
1764 | } |
1765 | |
1766 | /** |
1767 | + * srpt_shutdown_session() - Whether or not a session may be shut down. |
1768 | + */ |
1769 | +static int srpt_shutdown_session(struct se_session *se_sess) |
1770 | +{ |
1771 | + struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; |
1772 | + unsigned long flags; |
1773 | + |
1774 | + spin_lock_irqsave(&ch->spinlock, flags); |
1775 | + if (ch->in_shutdown) { |
1776 | + spin_unlock_irqrestore(&ch->spinlock, flags); |
1777 | + return true; |
1778 | + } |
1779 | + |
1780 | + ch->in_shutdown = true; |
1781 | + target_sess_cmd_list_set_waiting(se_sess); |
1782 | + spin_unlock_irqrestore(&ch->spinlock, flags); |
1783 | + |
1784 | + return true; |
1785 | +} |
1786 | + |
1787 | +/** |
1788 | * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. |
1789 | * @cm_id: Pointer to the CM ID of the channel to be drained. |
1790 | * |
1791 | @@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) |
1792 | spin_unlock_irq(&sdev->spinlock); |
1793 | |
1794 | if (do_reset) { |
1795 | + if (ch->sess) |
1796 | + srpt_shutdown_session(ch->sess); |
1797 | + |
1798 | ret = srpt_ch_qp_err(ch); |
1799 | if (ret < 0) |
1800 | printk(KERN_ERR "Setting queue pair in error state" |
1801 | @@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) |
1802 | } |
1803 | |
1804 | /** |
1805 | - * srpt_shutdown_session() - Whether or not a session may be shut down. |
1806 | - */ |
1807 | -static int srpt_shutdown_session(struct se_session *se_sess) |
1808 | -{ |
1809 | - return true; |
1810 | -} |
1811 | - |
1812 | -/** |
1813 | * srpt_close_session() - Forcibly close a session. |
1814 | * |
1815 | * Callback function invoked by the TCM core to clean up sessions associated |
1816 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h |
1817 | index 4caf55c..3dae156 100644 |
1818 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.h |
1819 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h |
1820 | @@ -325,6 +325,7 @@ struct srpt_rdma_ch { |
1821 | u8 sess_name[36]; |
1822 | struct work_struct release_work; |
1823 | struct completion *release_done; |
1824 | + bool in_shutdown; |
1825 | }; |
1826 | |
1827 | /** |
1828 | diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c |
1829 | index 17c9097..39f3df8 100644 |
1830 | --- a/drivers/input/touchscreen/egalax_ts.c |
1831 | +++ b/drivers/input/touchscreen/egalax_ts.c |
1832 | @@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client, |
1833 | input_set_abs_params(input_dev, |
1834 | ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); |
1835 | input_set_abs_params(input_dev, |
1836 | - ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); |
1837 | + ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0); |
1838 | input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); |
1839 | |
1840 | input_set_drvdata(input_dev, ts); |
1841 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
1842 | index cbb1645..1a5285b 100644 |
1843 | --- a/drivers/iommu/amd_iommu.c |
1844 | +++ b/drivers/iommu/amd_iommu.c |
1845 | @@ -700,11 +700,23 @@ retry: |
1846 | |
1847 | static void iommu_poll_events(struct amd_iommu *iommu) |
1848 | { |
1849 | - u32 head, tail; |
1850 | + u32 head, tail, status; |
1851 | unsigned long flags; |
1852 | |
1853 | spin_lock_irqsave(&iommu->lock, flags); |
1854 | |
1855 | + /* enable event interrupts again */ |
1856 | + do { |
1857 | + /* |
1858 | + * Workaround for Erratum ERBT1312 |
1859 | + * Clearing the EVT_INT bit may race in the hardware, so read |
1860 | + * it again and make sure it was really cleared |
1861 | + */ |
1862 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
1863 | + writel(MMIO_STATUS_EVT_INT_MASK, |
1864 | + iommu->mmio_base + MMIO_STATUS_OFFSET); |
1865 | + } while (status & MMIO_STATUS_EVT_INT_MASK); |
1866 | + |
1867 | head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
1868 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
1869 | |
1870 | @@ -741,16 +753,25 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
1871 | static void iommu_poll_ppr_log(struct amd_iommu *iommu) |
1872 | { |
1873 | unsigned long flags; |
1874 | - u32 head, tail; |
1875 | + u32 head, tail, status; |
1876 | |
1877 | if (iommu->ppr_log == NULL) |
1878 | return; |
1879 | |
1880 | - /* enable ppr interrupts again */ |
1881 | - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); |
1882 | - |
1883 | spin_lock_irqsave(&iommu->lock, flags); |
1884 | |
1885 | + /* enable ppr interrupts again */ |
1886 | + do { |
1887 | + /* |
1888 | + * Workaround for Erratum ERBT1312 |
1889 | + * Clearing the PPR_INT bit may race in the hardware, so read |
1890 | + * it again and make sure it was really cleared |
1891 | + */ |
1892 | + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); |
1893 | + writel(MMIO_STATUS_PPR_INT_MASK, |
1894 | + iommu->mmio_base + MMIO_STATUS_OFFSET); |
1895 | + } while (status & MMIO_STATUS_PPR_INT_MASK); |
1896 | + |
1897 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
1898 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
1899 | |
1900 | diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h |
1901 | index e38ab43..083f98c 100644 |
1902 | --- a/drivers/iommu/amd_iommu_types.h |
1903 | +++ b/drivers/iommu/amd_iommu_types.h |
1904 | @@ -99,6 +99,7 @@ |
1905 | #define PASID_MASK 0x000fffff |
1906 | |
1907 | /* MMIO status bits */ |
1908 | +#define MMIO_STATUS_EVT_INT_MASK (1 << 1) |
1909 | #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) |
1910 | #define MMIO_STATUS_PPR_INT_MASK (1 << 6) |
1911 | |
1912 | diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c |
1913 | index a0d931b..b02b679 100644 |
1914 | --- a/drivers/leds/leds-gpio.c |
1915 | +++ b/drivers/leds/leds-gpio.c |
1916 | @@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template, |
1917 | return 0; |
1918 | } |
1919 | |
1920 | + ret = devm_gpio_request(parent, template->gpio, template->name); |
1921 | + if (ret < 0) |
1922 | + return ret; |
1923 | + |
1924 | led_dat->cdev.name = template->name; |
1925 | led_dat->cdev.default_trigger = template->default_trigger; |
1926 | led_dat->gpio = template->gpio; |
1927 | @@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template, |
1928 | if (!template->retain_state_suspended) |
1929 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; |
1930 | |
1931 | - ret = devm_gpio_request_one(parent, template->gpio, |
1932 | - (led_dat->active_low ^ state) ? |
1933 | - GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, |
1934 | - template->name); |
1935 | + ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); |
1936 | if (ret < 0) |
1937 | return ret; |
1938 | |
1939 | diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c |
1940 | index ee14662..98cae52 100644 |
1941 | --- a/drivers/leds/leds-ot200.c |
1942 | +++ b/drivers/leds/leds-ot200.c |
1943 | @@ -47,37 +47,37 @@ static struct ot200_led leds[] = { |
1944 | { |
1945 | .name = "led_1", |
1946 | .port = 0x49, |
1947 | - .mask = BIT(7), |
1948 | + .mask = BIT(6), |
1949 | }, |
1950 | { |
1951 | .name = "led_2", |
1952 | .port = 0x49, |
1953 | - .mask = BIT(6), |
1954 | + .mask = BIT(5), |
1955 | }, |
1956 | { |
1957 | .name = "led_3", |
1958 | .port = 0x49, |
1959 | - .mask = BIT(5), |
1960 | + .mask = BIT(4), |
1961 | }, |
1962 | { |
1963 | .name = "led_4", |
1964 | .port = 0x49, |
1965 | - .mask = BIT(4), |
1966 | + .mask = BIT(3), |
1967 | }, |
1968 | { |
1969 | .name = "led_5", |
1970 | .port = 0x49, |
1971 | - .mask = BIT(3), |
1972 | + .mask = BIT(2), |
1973 | }, |
1974 | { |
1975 | .name = "led_6", |
1976 | .port = 0x49, |
1977 | - .mask = BIT(2), |
1978 | + .mask = BIT(1), |
1979 | }, |
1980 | { |
1981 | .name = "led_7", |
1982 | .port = 0x49, |
1983 | - .mask = BIT(1), |
1984 | + .mask = BIT(0), |
1985 | } |
1986 | }; |
1987 | |
1988 | diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c |
1989 | index 45cb9f3..3b95465 100644 |
1990 | --- a/drivers/net/can/usb/kvaser_usb.c |
1991 | +++ b/drivers/net/can/usb/kvaser_usb.c |
1992 | @@ -136,6 +136,9 @@ |
1993 | #define KVASER_CTRL_MODE_SELFRECEPTION 3 |
1994 | #define KVASER_CTRL_MODE_OFF 4 |
1995 | |
1996 | +/* log message */ |
1997 | +#define KVASER_EXTENDED_FRAME BIT(31) |
1998 | + |
1999 | struct kvaser_msg_simple { |
2000 | u8 tid; |
2001 | u8 channel; |
2002 | @@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, |
2003 | priv = dev->nets[channel]; |
2004 | stats = &priv->netdev->stats; |
2005 | |
2006 | - if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | |
2007 | - MSG_FLAG_OVERRUN)) { |
2008 | + if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) && |
2009 | + (msg->id == CMD_LOG_MESSAGE)) { |
2010 | + kvaser_usb_rx_error(dev, msg); |
2011 | + return; |
2012 | + } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | |
2013 | + MSG_FLAG_NERR | |
2014 | + MSG_FLAG_OVERRUN)) { |
2015 | kvaser_usb_rx_can_err(priv, msg); |
2016 | return; |
2017 | } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) { |
2018 | @@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, |
2019 | return; |
2020 | } |
2021 | |
2022 | - cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | |
2023 | - (msg->u.rx_can.msg[1] & 0x3f); |
2024 | - cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); |
2025 | + if (msg->id == CMD_LOG_MESSAGE) { |
2026 | + cf->can_id = le32_to_cpu(msg->u.log_message.id); |
2027 | + if (cf->can_id & KVASER_EXTENDED_FRAME) |
2028 | + cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; |
2029 | + else |
2030 | + cf->can_id &= CAN_SFF_MASK; |
2031 | |
2032 | - if (msg->id == CMD_RX_EXT_MESSAGE) { |
2033 | - cf->can_id <<= 18; |
2034 | - cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | |
2035 | - ((msg->u.rx_can.msg[3] & 0xff) << 6) | |
2036 | - (msg->u.rx_can.msg[4] & 0x3f); |
2037 | - cf->can_id |= CAN_EFF_FLAG; |
2038 | - } |
2039 | + cf->can_dlc = get_can_dlc(msg->u.log_message.dlc); |
2040 | |
2041 | - if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) |
2042 | - cf->can_id |= CAN_RTR_FLAG; |
2043 | - else |
2044 | - memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc); |
2045 | + if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME) |
2046 | + cf->can_id |= CAN_RTR_FLAG; |
2047 | + else |
2048 | + memcpy(cf->data, &msg->u.log_message.data, |
2049 | + cf->can_dlc); |
2050 | + } else { |
2051 | + cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | |
2052 | + (msg->u.rx_can.msg[1] & 0x3f); |
2053 | + |
2054 | + if (msg->id == CMD_RX_EXT_MESSAGE) { |
2055 | + cf->can_id <<= 18; |
2056 | + cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | |
2057 | + ((msg->u.rx_can.msg[3] & 0xff) << 6) | |
2058 | + (msg->u.rx_can.msg[4] & 0x3f); |
2059 | + cf->can_id |= CAN_EFF_FLAG; |
2060 | + } |
2061 | + |
2062 | + cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); |
2063 | + |
2064 | + if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) |
2065 | + cf->can_id |= CAN_RTR_FLAG; |
2066 | + else |
2067 | + memcpy(cf->data, &msg->u.rx_can.msg[6], |
2068 | + cf->can_dlc); |
2069 | + } |
2070 | |
2071 | netif_rx(skb); |
2072 | |
2073 | @@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, |
2074 | |
2075 | case CMD_RX_STD_MESSAGE: |
2076 | case CMD_RX_EXT_MESSAGE: |
2077 | + case CMD_LOG_MESSAGE: |
2078 | kvaser_usb_rx_can_msg(dev, msg); |
2079 | break; |
2080 | |
2081 | @@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, |
2082 | kvaser_usb_rx_error(dev, msg); |
2083 | break; |
2084 | |
2085 | - case CMD_LOG_MESSAGE: |
2086 | - if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME) |
2087 | - kvaser_usb_rx_error(dev, msg); |
2088 | - break; |
2089 | - |
2090 | case CMD_TX_ACKNOWLEDGE: |
2091 | kvaser_usb_tx_acknowledge(dev, msg); |
2092 | break; |
2093 | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
2094 | index 17a9727..6f42e57 100644 |
2095 | --- a/drivers/net/ethernet/broadcom/tg3.c |
2096 | +++ b/drivers/net/ethernet/broadcom/tg3.c |
2097 | @@ -2921,6 +2921,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) |
2098 | return 0; |
2099 | } |
2100 | |
2101 | +static bool tg3_phy_power_bug(struct tg3 *tp) |
2102 | +{ |
2103 | + switch (tg3_asic_rev(tp)) { |
2104 | + case ASIC_REV_5700: |
2105 | + case ASIC_REV_5704: |
2106 | + return true; |
2107 | + case ASIC_REV_5780: |
2108 | + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) |
2109 | + return true; |
2110 | + return false; |
2111 | + case ASIC_REV_5717: |
2112 | + if (!tp->pci_fn) |
2113 | + return true; |
2114 | + return false; |
2115 | + case ASIC_REV_5719: |
2116 | + case ASIC_REV_5720: |
2117 | + if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && |
2118 | + !tp->pci_fn) |
2119 | + return true; |
2120 | + return false; |
2121 | + } |
2122 | + |
2123 | + return false; |
2124 | +} |
2125 | + |
2126 | static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) |
2127 | { |
2128 | u32 val; |
2129 | @@ -2977,12 +3002,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) |
2130 | /* The PHY should not be powered down on some chips because |
2131 | * of bugs. |
2132 | */ |
2133 | - if (tg3_asic_rev(tp) == ASIC_REV_5700 || |
2134 | - tg3_asic_rev(tp) == ASIC_REV_5704 || |
2135 | - (tg3_asic_rev(tp) == ASIC_REV_5780 && |
2136 | - (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || |
2137 | - (tg3_asic_rev(tp) == ASIC_REV_5717 && |
2138 | - !tp->pci_fn)) |
2139 | + if (tg3_phy_power_bug(tp)) |
2140 | return; |
2141 | |
2142 | if (tg3_chip_rev(tp) == CHIPREV_5784_AX || |
2143 | @@ -7058,6 +7078,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) |
2144 | return (base > 0xffffdcc0) && (base + len + 8 < base); |
2145 | } |
2146 | |
2147 | +/* Test for TSO DMA buffers that cross into regions which are within MSS bytes |
2148 | + * of any 4GB boundaries: 4G, 8G, etc |
2149 | + */ |
2150 | +static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, |
2151 | + u32 len, u32 mss) |
2152 | +{ |
2153 | + if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { |
2154 | + u32 base = (u32) mapping & 0xffffffff; |
2155 | + |
2156 | + return ((base + len + (mss & 0x3fff)) < base); |
2157 | + } |
2158 | + return 0; |
2159 | +} |
2160 | + |
2161 | /* Test for DMA addresses > 40-bit */ |
2162 | static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, |
2163 | int len) |
2164 | @@ -7094,6 +7128,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, |
2165 | if (tg3_4g_overflow_test(map, len)) |
2166 | hwbug = true; |
2167 | |
2168 | + if (tg3_4g_tso_overflow_test(tp, map, len, mss)) |
2169 | + hwbug = true; |
2170 | + |
2171 | if (tg3_40bit_overflow_test(tp, map, len)) |
2172 | hwbug = true; |
2173 | |
2174 | @@ -9056,6 +9093,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp) |
2175 | } |
2176 | } |
2177 | |
2178 | +static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) |
2179 | +{ |
2180 | + if (tg3_asic_rev(tp) == ASIC_REV_5719) |
2181 | + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; |
2182 | + else |
2183 | + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; |
2184 | +} |
2185 | + |
2186 | /* tp->lock is held. */ |
2187 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) |
2188 | { |
2189 | @@ -9735,16 +9780,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) |
2190 | tw32_f(RDMAC_MODE, rdmac_mode); |
2191 | udelay(40); |
2192 | |
2193 | - if (tg3_asic_rev(tp) == ASIC_REV_5719) { |
2194 | + if (tg3_asic_rev(tp) == ASIC_REV_5719 || |
2195 | + tg3_asic_rev(tp) == ASIC_REV_5720) { |
2196 | for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { |
2197 | if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) |
2198 | break; |
2199 | } |
2200 | if (i < TG3_NUM_RDMA_CHANNELS) { |
2201 | val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); |
2202 | - val |= TG3_LSO_RD_DMA_TX_LENGTH_WA; |
2203 | + val |= tg3_lso_rd_dma_workaround_bit(tp); |
2204 | tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); |
2205 | - tg3_flag_set(tp, 5719_RDMA_BUG); |
2206 | + tg3_flag_set(tp, 5719_5720_RDMA_BUG); |
2207 | } |
2208 | } |
2209 | |
2210 | @@ -10101,15 +10147,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp) |
2211 | TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); |
2212 | TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); |
2213 | TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); |
2214 | - if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) && |
2215 | + if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && |
2216 | (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + |
2217 | sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { |
2218 | u32 val; |
2219 | |
2220 | val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); |
2221 | - val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA; |
2222 | + val &= ~tg3_lso_rd_dma_workaround_bit(tp); |
2223 | tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); |
2224 | - tg3_flag_clear(tp, 5719_RDMA_BUG); |
2225 | + tg3_flag_clear(tp, 5719_5720_RDMA_BUG); |
2226 | } |
2227 | |
2228 | TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); |
2229 | diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h |
2230 | index 8d7d4c2..25309bf 100644 |
2231 | --- a/drivers/net/ethernet/broadcom/tg3.h |
2232 | +++ b/drivers/net/ethernet/broadcom/tg3.h |
2233 | @@ -1422,7 +1422,8 @@ |
2234 | #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 |
2235 | #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 |
2236 | #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 |
2237 | -#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000 |
2238 | +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000 |
2239 | +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000 |
2240 | /* 0x4914 --> 0x4be0 unused */ |
2241 | |
2242 | #define TG3_NUM_RDMA_CHANNELS 4 |
2243 | @@ -3043,7 +3044,7 @@ enum TG3_FLAGS { |
2244 | TG3_FLAG_APE_HAS_NCSI, |
2245 | TG3_FLAG_TX_TSTAMP_EN, |
2246 | TG3_FLAG_4K_FIFO_LIMIT, |
2247 | - TG3_FLAG_5719_RDMA_BUG, |
2248 | + TG3_FLAG_5719_5720_RDMA_BUG, |
2249 | TG3_FLAG_RESET_TASK_PENDING, |
2250 | TG3_FLAG_PTP_CAPABLE, |
2251 | TG3_FLAG_5705_PLUS, |
2252 | diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c |
2253 | index f76c3ca..21fa267 100644 |
2254 | --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c |
2255 | +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c |
2256 | @@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, |
2257 | { |
2258 | int i; |
2259 | |
2260 | - if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) |
2261 | + if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) |
2262 | return; |
2263 | |
2264 | for (i = 0; i < AR9300_MAX_CHAINS; i++) { |
2265 | diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h |
2266 | index 54ba42f..874f657 100644 |
2267 | --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h |
2268 | +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h |
2269 | @@ -68,13 +68,16 @@ |
2270 | #define AR9300_BASE_ADDR 0x3ff |
2271 | #define AR9300_BASE_ADDR_512 0x1ff |
2272 | |
2273 | -#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) |
2274 | -#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) |
2275 | +#define AR9300_OTP_BASE \ |
2276 | + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) |
2277 | +#define AR9300_OTP_STATUS \ |
2278 | + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) |
2279 | #define AR9300_OTP_STATUS_TYPE 0x7 |
2280 | #define AR9300_OTP_STATUS_VALID 0x4 |
2281 | #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 |
2282 | #define AR9300_OTP_STATUS_SM_BUSY 0x1 |
2283 | -#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) |
2284 | +#define AR9300_OTP_READ_DATA \ |
2285 | + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) |
2286 | |
2287 | enum targetPowerHTRates { |
2288 | HT_TARGET_RATE_0_8_16, |
2289 | diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h |
2290 | index 712f415..88ff1d7 100644 |
2291 | --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h |
2292 | +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h |
2293 | @@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { |
2294 | {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, |
2295 | {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
2296 | {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
2297 | - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, |
2298 | + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, |
2299 | {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, |
2300 | {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, |
2301 | {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
2302 | diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c |
2303 | index 3714b97..f25a320 100644 |
2304 | --- a/drivers/net/wireless/ath/ath9k/debug.c |
2305 | +++ b/drivers/net/wireless/ath/ath9k/debug.c |
2306 | @@ -2003,6 +2003,14 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, |
2307 | WARN_ON(i != ATH9K_SSTATS_LEN); |
2308 | } |
2309 | |
2310 | +void ath9k_deinit_debug(struct ath_softc *sc) |
2311 | +{ |
2312 | + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { |
2313 | + relay_close(sc->rfs_chan_spec_scan); |
2314 | + sc->rfs_chan_spec_scan = NULL; |
2315 | + } |
2316 | +} |
2317 | + |
2318 | int ath9k_init_debug(struct ath_hw *ah) |
2319 | { |
2320 | struct ath_common *common = ath9k_hw_common(ah); |
2321 | diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h |
2322 | index 410d6d8..f939457 100644 |
2323 | --- a/drivers/net/wireless/ath/ath9k/debug.h |
2324 | +++ b/drivers/net/wireless/ath/ath9k/debug.h |
2325 | @@ -302,6 +302,7 @@ struct ath9k_debug { |
2326 | }; |
2327 | |
2328 | int ath9k_init_debug(struct ath_hw *ah); |
2329 | +void ath9k_deinit_debug(struct ath_softc *sc); |
2330 | |
2331 | void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); |
2332 | void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, |
2333 | @@ -337,6 +338,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah) |
2334 | return 0; |
2335 | } |
2336 | |
2337 | +static inline void ath9k_deinit_debug(struct ath_softc *sc) |
2338 | +{ |
2339 | +} |
2340 | + |
2341 | static inline void ath_debug_stat_interrupt(struct ath_softc *sc, |
2342 | enum ath9k_int status) |
2343 | { |
2344 | diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c |
2345 | index af932c9..26db547 100644 |
2346 | --- a/drivers/net/wireless/ath/ath9k/init.c |
2347 | +++ b/drivers/net/wireless/ath/ath9k/init.c |
2348 | @@ -885,7 +885,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, |
2349 | if (!ath_is_world_regd(reg)) { |
2350 | error = regulatory_hint(hw->wiphy, reg->alpha2); |
2351 | if (error) |
2352 | - goto unregister; |
2353 | + goto debug_cleanup; |
2354 | } |
2355 | |
2356 | ath_init_leds(sc); |
2357 | @@ -893,6 +893,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, |
2358 | |
2359 | return 0; |
2360 | |
2361 | +debug_cleanup: |
2362 | + ath9k_deinit_debug(sc); |
2363 | unregister: |
2364 | ieee80211_unregister_hw(hw); |
2365 | rx_cleanup: |
2366 | @@ -921,11 +923,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) |
2367 | sc->dfs_detector->exit(sc->dfs_detector); |
2368 | |
2369 | ath9k_eeprom_release(sc); |
2370 | - |
2371 | - if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { |
2372 | - relay_close(sc->rfs_chan_spec_scan); |
2373 | - sc->rfs_chan_spec_scan = NULL; |
2374 | - } |
2375 | } |
2376 | |
2377 | void ath9k_deinit_device(struct ath_softc *sc) |
2378 | @@ -939,6 +936,7 @@ void ath9k_deinit_device(struct ath_softc *sc) |
2379 | |
2380 | ath9k_ps_restore(sc); |
2381 | |
2382 | + ath9k_deinit_debug(sc); |
2383 | ieee80211_unregister_hw(hw); |
2384 | ath_rx_cleanup(sc); |
2385 | ath9k_deinit_softc(sc); |
2386 | diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c |
2387 | index db183b4..c3c13ce 100644 |
2388 | --- a/drivers/net/wireless/iwlwifi/dvm/sta.c |
2389 | +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c |
2390 | @@ -735,7 +735,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) |
2391 | memcpy(&lq, priv->stations[i].lq, |
2392 | sizeof(struct iwl_link_quality_cmd)); |
2393 | |
2394 | - if (!memcmp(&lq, &zero_lq, sizeof(lq))) |
2395 | + if (memcmp(&lq, &zero_lq, sizeof(lq))) |
2396 | send_lq = true; |
2397 | } |
2398 | spin_unlock_bh(&priv->sta_lock); |
2399 | diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2400 | index 2adb61f..4464382 100644 |
2401 | --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2402 | +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2403 | @@ -165,6 +165,8 @@ enum { |
2404 | REPLY_DEBUG_CMD = 0xf0, |
2405 | DEBUG_LOG_MSG = 0xf7, |
2406 | |
2407 | + MCAST_FILTER_CMD = 0xd0, |
2408 | + |
2409 | /* D3 commands/notifications */ |
2410 | D3_CONFIG_CMD = 0xd3, |
2411 | PROT_OFFLOAD_CONFIG_CMD = 0xd4, |
2412 | @@ -951,4 +953,29 @@ struct iwl_set_calib_default_cmd { |
2413 | u8 data[0]; |
2414 | } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ |
2415 | |
2416 | +#define MAX_PORT_ID_NUM 2 |
2417 | + |
2418 | +/** |
2419 | + * struct iwl_mcast_filter_cmd - configure multicast filter. |
2420 | + * @filter_own: Set 1 to filter out multicast packets sent by station itself |
2421 | + * @port_id: Multicast MAC addresses array specifier. This is a strange way |
2422 | + * to identify network interface adopted in host-device IF. |
2423 | + * It is used by FW as index in array of addresses. This array has |
2424 | + * MAX_PORT_ID_NUM members. |
2425 | + * @count: Number of MAC addresses in the array |
2426 | + * @pass_all: Set 1 to pass all multicast packets. |
2427 | + * @bssid: current association BSSID. |
2428 | + * @addr_list: Place holder for array of MAC addresses. |
2429 | + * IMPORTANT: add padding if necessary to ensure DWORD alignment. |
2430 | + */ |
2431 | +struct iwl_mcast_filter_cmd { |
2432 | + u8 filter_own; |
2433 | + u8 port_id; |
2434 | + u8 count; |
2435 | + u8 pass_all; |
2436 | + u8 bssid[6]; |
2437 | + u8 reserved[2]; |
2438 | + u8 addr_list[0]; |
2439 | +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ |
2440 | + |
2441 | #endif /* __fw_api_h__ */ |
2442 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c |
2443 | index 341dbc0..bf76b17 100644 |
2444 | --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c |
2445 | +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c |
2446 | @@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, |
2447 | */ |
2448 | static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, |
2449 | struct ieee80211_vif *vif, |
2450 | - struct iwl_mac_data_sta *ctxt_sta) |
2451 | + struct iwl_mac_data_sta *ctxt_sta, |
2452 | + bool force_assoc_off) |
2453 | { |
2454 | /* We need the dtim_period to set the MAC as associated */ |
2455 | - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { |
2456 | + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && |
2457 | + !force_assoc_off) { |
2458 | u32 dtim_offs; |
2459 | |
2460 | /* |
2461 | @@ -652,7 +654,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, |
2462 | iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); |
2463 | |
2464 | /* Fill the data specific for station mode */ |
2465 | - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); |
2466 | + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta, |
2467 | + action == FW_CTXT_ACTION_ADD); |
2468 | |
2469 | return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); |
2470 | } |
2471 | @@ -669,7 +672,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, |
2472 | iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); |
2473 | |
2474 | /* Fill the data specific for station mode */ |
2475 | - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); |
2476 | + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta, |
2477 | + action == FW_CTXT_ACTION_ADD); |
2478 | |
2479 | cmd.p2p_sta.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); |
2480 | |
2481 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2482 | index 7e169b0..8572358 100644 |
2483 | --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2484 | +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2485 | @@ -82,15 +82,6 @@ static const struct ieee80211_iface_limit iwl_mvm_limits[] = { |
2486 | .types = BIT(NL80211_IFTYPE_STATION) | |
2487 | BIT(NL80211_IFTYPE_AP), |
2488 | }, |
2489 | - { |
2490 | - .max = 1, |
2491 | - .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | |
2492 | - BIT(NL80211_IFTYPE_P2P_GO), |
2493 | - }, |
2494 | - { |
2495 | - .max = 1, |
2496 | - .types = BIT(NL80211_IFTYPE_P2P_DEVICE), |
2497 | - }, |
2498 | }; |
2499 | |
2500 | static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { |
2501 | @@ -136,10 +127,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) |
2502 | hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt); |
2503 | |
2504 | hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | |
2505 | - BIT(NL80211_IFTYPE_P2P_CLIENT) | |
2506 | - BIT(NL80211_IFTYPE_AP) | |
2507 | - BIT(NL80211_IFTYPE_P2P_GO) | |
2508 | - BIT(NL80211_IFTYPE_P2P_DEVICE); |
2509 | + BIT(NL80211_IFTYPE_AP); |
2510 | |
2511 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | |
2512 | WIPHY_FLAG_DISABLE_BEACON_HINTS | |
2513 | @@ -657,6 +645,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, |
2514 | *total_flags = 0; |
2515 | } |
2516 | |
2517 | +static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm, |
2518 | + struct ieee80211_vif *vif) |
2519 | +{ |
2520 | + struct iwl_mcast_filter_cmd mcast_filter_cmd = { |
2521 | + .pass_all = 1, |
2522 | + }; |
2523 | + |
2524 | + memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); |
2525 | + |
2526 | + return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, |
2527 | + sizeof(mcast_filter_cmd), |
2528 | + &mcast_filter_cmd); |
2529 | +} |
2530 | + |
2531 | static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, |
2532 | struct ieee80211_vif *vif, |
2533 | struct ieee80211_bss_conf *bss_conf, |
2534 | @@ -677,6 +679,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, |
2535 | IWL_ERR(mvm, "failed to update quotas\n"); |
2536 | return; |
2537 | } |
2538 | + iwl_mvm_configure_mcast_filter(mvm, vif); |
2539 | } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { |
2540 | /* remove AP station now that the MAC is unassoc */ |
2541 | ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); |
2542 | @@ -886,7 +889,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, |
2543 | |
2544 | switch (cmd) { |
2545 | case STA_NOTIFY_SLEEP: |
2546 | - if (atomic_read(&mvmsta->pending_frames) > 0) |
2547 | + if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) |
2548 | ieee80211_sta_block_awake(hw, sta, true); |
2549 | /* |
2550 | * The fw updates the STA to be asleep. Tx packets on the Tx |
2551 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h |
2552 | index bdae700..dc59ef5 100644 |
2553 | --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h |
2554 | +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h |
2555 | @@ -293,6 +293,7 @@ struct iwl_mvm { |
2556 | struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; |
2557 | struct work_struct sta_drained_wk; |
2558 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
2559 | + atomic_t pending_frames[IWL_MVM_STATION_COUNT]; |
2560 | |
2561 | /* configured by mac80211 */ |
2562 | u32 rts_threshold; |
2563 | diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c |
2564 | index d0f9c1e..ddac833 100644 |
2565 | --- a/drivers/net/wireless/iwlwifi/mvm/ops.c |
2566 | +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c |
2567 | @@ -293,6 +293,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { |
2568 | CMD(NET_DETECT_PROFILES_CMD), |
2569 | CMD(NET_DETECT_HOTSPOTS_CMD), |
2570 | CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), |
2571 | + CMD(MCAST_FILTER_CMD), |
2572 | }; |
2573 | #undef CMD |
2574 | |
2575 | diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c |
2576 | index 9b21b92..5c52faa 100644 |
2577 | --- a/drivers/net/wireless/iwlwifi/mvm/scan.c |
2578 | +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c |
2579 | @@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, |
2580 | else |
2581 | cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); |
2582 | |
2583 | + /* |
2584 | + * TODO: This is a WA due to a bug in the FW AUX framework that does not |
2585 | + * properly handle time events that fail to be scheduled |
2586 | + */ |
2587 | + cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); |
2588 | + |
2589 | cmd->repeats = cpu_to_le32(1); |
2590 | |
2591 | /* |
2592 | diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c |
2593 | index 274f44e..7b8644e 100644 |
2594 | --- a/drivers/net/wireless/iwlwifi/mvm/sta.c |
2595 | +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c |
2596 | @@ -172,7 +172,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, |
2597 | mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
2598 | |
2599 | /* HW restart, don't assume the memory has been zeroed */ |
2600 | - atomic_set(&mvm_sta->pending_frames, 0); |
2601 | + atomic_set(&mvm->pending_frames[sta_id], 0); |
2602 | mvm_sta->tid_disable_agg = 0; |
2603 | mvm_sta->tfd_queue_msk = 0; |
2604 | for (i = 0; i < IEEE80211_NUM_ACS; i++) |
2605 | @@ -360,14 +360,21 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, |
2606 | } |
2607 | |
2608 | /* |
2609 | + * Make sure that the tx response code sees the station as -EBUSY and |
2610 | + * calls the drain worker. |
2611 | + */ |
2612 | + spin_lock_bh(&mvm_sta->lock); |
2613 | + /* |
2614 | * There are frames pending on the AC queues for this station. |
2615 | * We need to wait until all the frames are drained... |
2616 | */ |
2617 | - if (atomic_read(&mvm_sta->pending_frames)) { |
2618 | - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
2619 | + if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { |
2620 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], |
2621 | ERR_PTR(-EBUSY)); |
2622 | + spin_unlock_bh(&mvm_sta->lock); |
2623 | + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); |
2624 | } else { |
2625 | + spin_unlock_bh(&mvm_sta->lock); |
2626 | ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); |
2627 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); |
2628 | } |
2629 | diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h |
2630 | index 896f88a..2dbf7ba 100644 |
2631 | --- a/drivers/net/wireless/iwlwifi/mvm/sta.h |
2632 | +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h |
2633 | @@ -273,7 +273,6 @@ struct iwl_mvm_tid_data { |
2634 | * @max_agg_bufsize: the maximal size of the AGG buffer for this station |
2635 | * @lock: lock to protect the whole struct. Since %tid_data is access from Tx |
2636 | * and from Tx response flow, it needs a spinlock. |
2637 | - * @pending_frames: number of frames for this STA on the shared Tx queues. |
2638 | * @tid_data: per tid data. Look at %iwl_mvm_tid_data. |
2639 | * |
2640 | * When mac80211 creates a station it reserves some space (hw->sta_data_size) |
2641 | @@ -288,7 +287,6 @@ struct iwl_mvm_sta { |
2642 | u16 tid_disable_agg; |
2643 | u8 max_agg_bufsize; |
2644 | spinlock_t lock; |
2645 | - atomic_t pending_frames; |
2646 | struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; |
2647 | struct iwl_lq_sta lq_sta; |
2648 | struct ieee80211_vif *vif; |
2649 | diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c |
2650 | index 6645efe..44f26f4 100644 |
2651 | --- a/drivers/net/wireless/iwlwifi/mvm/tx.c |
2652 | +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c |
2653 | @@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, |
2654 | |
2655 | spin_unlock(&mvmsta->lock); |
2656 | |
2657 | - if (mvmsta->vif->type == NL80211_IFTYPE_AP && |
2658 | - txq_id < IWL_FIRST_AMPDU_QUEUE) |
2659 | - atomic_inc(&mvmsta->pending_frames); |
2660 | + if (txq_id < IWL_FIRST_AMPDU_QUEUE) |
2661 | + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); |
2662 | |
2663 | return 0; |
2664 | |
2665 | @@ -678,16 +677,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, |
2666 | /* |
2667 | * If the txq is not an AMPDU queue, there is no chance we freed |
2668 | * several skbs. Check that out... |
2669 | - * If there are no pending frames for this STA, notify mac80211 that |
2670 | - * this station can go to sleep in its STA table. |
2671 | */ |
2672 | - if (txq_id < IWL_FIRST_AMPDU_QUEUE && mvmsta && |
2673 | - !WARN_ON(skb_freed > 1) && |
2674 | - mvmsta->vif->type == NL80211_IFTYPE_AP && |
2675 | - atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { |
2676 | - ieee80211_sta_block_awake(mvm->hw, sta, false); |
2677 | - set_bit(sta_id, mvm->sta_drained); |
2678 | - schedule_work(&mvm->sta_drained_wk); |
2679 | + if (txq_id < IWL_FIRST_AMPDU_QUEUE && !WARN_ON(skb_freed > 1) && |
2680 | + atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { |
2681 | + if (mvmsta) { |
2682 | + /* |
2683 | + * If there are no pending frames for this STA, notify |
2684 | + * mac80211 that this station can go to sleep in its |
2685 | + * STA table. |
2686 | + */ |
2687 | + if (mvmsta->vif->type == NL80211_IFTYPE_AP) |
2688 | + ieee80211_sta_block_awake(mvm->hw, sta, false); |
2689 | + /* |
2690 | + * We might very well have taken mvmsta pointer while |
2691 | + * the station was being removed. The remove flow might |
2692 | + * have seen a pending_frame (because we didn't take |
2693 | + * the lock) even if now the queues are drained. So make |
2694 | + * really sure now that this the station is not being |
2695 | + * removed. If it is, run the drain worker to remove it. |
2696 | + */ |
2697 | + spin_lock_bh(&mvmsta->lock); |
2698 | + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); |
2699 | + if (IS_ERR_OR_NULL(sta)) { |
2700 | + /* |
2701 | + * Station disappeared in the meantime: |
2702 | + * so we are draining. |
2703 | + */ |
2704 | + set_bit(sta_id, mvm->sta_drained); |
2705 | + schedule_work(&mvm->sta_drained_wk); |
2706 | + } |
2707 | + spin_unlock_bh(&mvmsta->lock); |
2708 | + } else if (!mvmsta) { |
2709 | + /* Tx response without STA, so we are draining */ |
2710 | + set_bit(sta_id, mvm->sta_drained); |
2711 | + schedule_work(&mvm->sta_drained_wk); |
2712 | + } |
2713 | } |
2714 | |
2715 | rcu_read_unlock(); |
2716 | diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c |
2717 | index cffdf4f..2b49f48 100644 |
2718 | --- a/drivers/net/wireless/mac80211_hwsim.c |
2719 | +++ b/drivers/net/wireless/mac80211_hwsim.c |
2720 | @@ -2118,7 +2118,6 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = { |
2721 | #endif |
2722 | BIT(NL80211_IFTYPE_AP) | |
2723 | BIT(NL80211_IFTYPE_P2P_GO) }, |
2724 | - { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, |
2725 | }; |
2726 | |
2727 | static struct ieee80211_iface_combination hwsim_if_comb = { |
2728 | @@ -2230,8 +2229,7 @@ static int __init init_mac80211_hwsim(void) |
2729 | BIT(NL80211_IFTYPE_P2P_CLIENT) | |
2730 | BIT(NL80211_IFTYPE_P2P_GO) | |
2731 | BIT(NL80211_IFTYPE_ADHOC) | |
2732 | - BIT(NL80211_IFTYPE_MESH_POINT) | |
2733 | - BIT(NL80211_IFTYPE_P2P_DEVICE); |
2734 | + BIT(NL80211_IFTYPE_MESH_POINT); |
2735 | |
2736 | hw->flags = IEEE80211_HW_MFP_CAPABLE | |
2737 | IEEE80211_HW_SIGNAL_DBM | |
2738 | diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
2739 | index cd49ba9..8099e9d 100644 |
2740 | --- a/drivers/net/xen-netback/netback.c |
2741 | +++ b/drivers/net/xen-netback/netback.c |
2742 | @@ -47,11 +47,33 @@ |
2743 | #include <asm/xen/hypercall.h> |
2744 | #include <asm/xen/page.h> |
2745 | |
2746 | +/* |
2747 | + * This is the maximum slots a skb can have. If a guest sends a skb |
2748 | + * which exceeds this limit it is considered malicious. |
2749 | + */ |
2750 | +#define FATAL_SKB_SLOTS_DEFAULT 20 |
2751 | +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; |
2752 | +module_param(fatal_skb_slots, uint, 0444); |
2753 | + |
2754 | +/* |
2755 | + * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating |
2756 | + * the maximum slots a valid packet can use. Now this value is defined |
2757 | + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by |
2758 | + * all backend. |
2759 | + */ |
2760 | +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN |
2761 | + |
2762 | +typedef unsigned int pending_ring_idx_t; |
2763 | +#define INVALID_PENDING_RING_IDX (~0U) |
2764 | + |
2765 | struct pending_tx_info { |
2766 | - struct xen_netif_tx_request req; |
2767 | + struct xen_netif_tx_request req; /* coalesced tx request */ |
2768 | struct xenvif *vif; |
2769 | + pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX |
2770 | + * if it is head of one or more tx |
2771 | + * reqs |
2772 | + */ |
2773 | }; |
2774 | -typedef unsigned int pending_ring_idx_t; |
2775 | |
2776 | struct netbk_rx_meta { |
2777 | int id; |
2778 | @@ -102,7 +124,11 @@ struct xen_netbk { |
2779 | atomic_t netfront_count; |
2780 | |
2781 | struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; |
2782 | - struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; |
2783 | + /* Coalescing tx requests before copying makes number of grant |
2784 | + * copy ops greater or equal to number of slots required. In |
2785 | + * worst case a tx request consumes 2 gnttab_copy. |
2786 | + */ |
2787 | + struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; |
2788 | |
2789 | u16 pending_ring[MAX_PENDING_REQS]; |
2790 | |
2791 | @@ -118,6 +144,16 @@ struct xen_netbk { |
2792 | static struct xen_netbk *xen_netbk; |
2793 | static int xen_netbk_group_nr; |
2794 | |
2795 | +/* |
2796 | + * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of |
2797 | + * one or more merged tx requests, otherwise it is the continuation of |
2798 | + * previous tx request. |
2799 | + */ |
2800 | +static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) |
2801 | +{ |
2802 | + return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; |
2803 | +} |
2804 | + |
2805 | void xen_netbk_add_xenvif(struct xenvif *vif) |
2806 | { |
2807 | int i; |
2808 | @@ -250,6 +286,7 @@ static int max_required_rx_slots(struct xenvif *vif) |
2809 | { |
2810 | int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); |
2811 | |
2812 | + /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ |
2813 | if (vif->can_sg || vif->gso || vif->gso_prefix) |
2814 | max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ |
2815 | |
2816 | @@ -657,6 +694,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk) |
2817 | __skb_queue_tail(&rxq, skb); |
2818 | |
2819 | /* Filled the batch queue? */ |
2820 | + /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ |
2821 | if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) |
2822 | break; |
2823 | } |
2824 | @@ -902,47 +940,99 @@ static int netbk_count_requests(struct xenvif *vif, |
2825 | int work_to_do) |
2826 | { |
2827 | RING_IDX cons = vif->tx.req_cons; |
2828 | - int frags = 0; |
2829 | + int slots = 0; |
2830 | + int drop_err = 0; |
2831 | + int more_data; |
2832 | |
2833 | if (!(first->flags & XEN_NETTXF_more_data)) |
2834 | return 0; |
2835 | |
2836 | do { |
2837 | - if (frags >= work_to_do) { |
2838 | - netdev_err(vif->dev, "Need more frags\n"); |
2839 | + struct xen_netif_tx_request dropped_tx = { 0 }; |
2840 | + |
2841 | + if (slots >= work_to_do) { |
2842 | + netdev_err(vif->dev, |
2843 | + "Asked for %d slots but exceeds this limit\n", |
2844 | + work_to_do); |
2845 | netbk_fatal_tx_err(vif); |
2846 | return -ENODATA; |
2847 | } |
2848 | |
2849 | - if (unlikely(frags >= MAX_SKB_FRAGS)) { |
2850 | - netdev_err(vif->dev, "Too many frags\n"); |
2851 | + /* This guest is really using too many slots and |
2852 | + * considered malicious. |
2853 | + */ |
2854 | + if (unlikely(slots >= fatal_skb_slots)) { |
2855 | + netdev_err(vif->dev, |
2856 | + "Malicious frontend using %d slots, threshold %u\n", |
2857 | + slots, fatal_skb_slots); |
2858 | netbk_fatal_tx_err(vif); |
2859 | return -E2BIG; |
2860 | } |
2861 | |
2862 | - memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), |
2863 | + /* Xen network protocol had implicit dependency on |
2864 | + * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to |
2865 | + * the historical MAX_SKB_FRAGS value 18 to honor the |
2866 | + * same behavior as before. Any packet using more than |
2867 | + * 18 slots but less than fatal_skb_slots slots is |
2868 | + * dropped |
2869 | + */ |
2870 | + if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { |
2871 | + if (net_ratelimit()) |
2872 | + netdev_dbg(vif->dev, |
2873 | + "Too many slots (%d) exceeding limit (%d), dropping packet\n", |
2874 | + slots, XEN_NETBK_LEGACY_SLOTS_MAX); |
2875 | + drop_err = -E2BIG; |
2876 | + } |
2877 | + |
2878 | + if (drop_err) |
2879 | + txp = &dropped_tx; |
2880 | + |
2881 | + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), |
2882 | sizeof(*txp)); |
2883 | - if (txp->size > first->size) { |
2884 | - netdev_err(vif->dev, "Frag is bigger than frame.\n"); |
2885 | - netbk_fatal_tx_err(vif); |
2886 | - return -EIO; |
2887 | + |
2888 | + /* If the guest submitted a frame >= 64 KiB then |
2889 | + * first->size overflowed and following slots will |
2890 | + * appear to be larger than the frame. |
2891 | + * |
2892 | + * This cannot be fatal error as there are buggy |
2893 | + * frontends that do this. |
2894 | + * |
2895 | + * Consume all slots and drop the packet. |
2896 | + */ |
2897 | + if (!drop_err && txp->size > first->size) { |
2898 | + if (net_ratelimit()) |
2899 | + netdev_dbg(vif->dev, |
2900 | + "Invalid tx request, slot size %u > remaining size %u\n", |
2901 | + txp->size, first->size); |
2902 | + drop_err = -EIO; |
2903 | } |
2904 | |
2905 | first->size -= txp->size; |
2906 | - frags++; |
2907 | + slots++; |
2908 | |
2909 | if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { |
2910 | - netdev_err(vif->dev, "txp->offset: %x, size: %u\n", |
2911 | + netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", |
2912 | txp->offset, txp->size); |
2913 | netbk_fatal_tx_err(vif); |
2914 | return -EINVAL; |
2915 | } |
2916 | - } while ((txp++)->flags & XEN_NETTXF_more_data); |
2917 | - return frags; |
2918 | + |
2919 | + more_data = txp->flags & XEN_NETTXF_more_data; |
2920 | + |
2921 | + if (!drop_err) |
2922 | + txp++; |
2923 | + |
2924 | + } while (more_data); |
2925 | + |
2926 | + if (drop_err) { |
2927 | + netbk_tx_err(vif, first, cons + slots); |
2928 | + return drop_err; |
2929 | + } |
2930 | + |
2931 | + return slots; |
2932 | } |
2933 | |
2934 | static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, |
2935 | - struct sk_buff *skb, |
2936 | u16 pending_idx) |
2937 | { |
2938 | struct page *page; |
2939 | @@ -963,48 +1053,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, |
2940 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2941 | skb_frag_t *frags = shinfo->frags; |
2942 | u16 pending_idx = *((u16 *)skb->data); |
2943 | - int i, start; |
2944 | + u16 head_idx = 0; |
2945 | + int slot, start; |
2946 | + struct page *page; |
2947 | + pending_ring_idx_t index, start_idx = 0; |
2948 | + uint16_t dst_offset; |
2949 | + unsigned int nr_slots; |
2950 | + struct pending_tx_info *first = NULL; |
2951 | + |
2952 | + /* At this point shinfo->nr_frags is in fact the number of |
2953 | + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. |
2954 | + */ |
2955 | + nr_slots = shinfo->nr_frags; |
2956 | |
2957 | /* Skip first skb fragment if it is on same page as header fragment. */ |
2958 | start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); |
2959 | |
2960 | - for (i = start; i < shinfo->nr_frags; i++, txp++) { |
2961 | - struct page *page; |
2962 | - pending_ring_idx_t index; |
2963 | + /* Coalesce tx requests, at this point the packet passed in |
2964 | + * should be <= 64K. Any packets larger than 64K have been |
2965 | + * handled in netbk_count_requests(). |
2966 | + */ |
2967 | + for (shinfo->nr_frags = slot = start; slot < nr_slots; |
2968 | + shinfo->nr_frags++) { |
2969 | struct pending_tx_info *pending_tx_info = |
2970 | netbk->pending_tx_info; |
2971 | |
2972 | - index = pending_index(netbk->pending_cons++); |
2973 | - pending_idx = netbk->pending_ring[index]; |
2974 | - page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
2975 | + page = alloc_page(GFP_KERNEL|__GFP_COLD); |
2976 | if (!page) |
2977 | goto err; |
2978 | |
2979 | - gop->source.u.ref = txp->gref; |
2980 | - gop->source.domid = vif->domid; |
2981 | - gop->source.offset = txp->offset; |
2982 | - |
2983 | - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); |
2984 | - gop->dest.domid = DOMID_SELF; |
2985 | - gop->dest.offset = txp->offset; |
2986 | - |
2987 | - gop->len = txp->size; |
2988 | - gop->flags = GNTCOPY_source_gref; |
2989 | + dst_offset = 0; |
2990 | + first = NULL; |
2991 | + while (dst_offset < PAGE_SIZE && slot < nr_slots) { |
2992 | + gop->flags = GNTCOPY_source_gref; |
2993 | + |
2994 | + gop->source.u.ref = txp->gref; |
2995 | + gop->source.domid = vif->domid; |
2996 | + gop->source.offset = txp->offset; |
2997 | + |
2998 | + gop->dest.domid = DOMID_SELF; |
2999 | + |
3000 | + gop->dest.offset = dst_offset; |
3001 | + gop->dest.u.gmfn = virt_to_mfn(page_address(page)); |
3002 | + |
3003 | + if (dst_offset + txp->size > PAGE_SIZE) { |
3004 | + /* This page can only merge a portion |
3005 | + * of tx request. Do not increment any |
3006 | + * pointer / counter here. The txp |
3007 | + * will be dealt with in future |
3008 | + * rounds, eventually hitting the |
3009 | + * `else` branch. |
3010 | + */ |
3011 | + gop->len = PAGE_SIZE - dst_offset; |
3012 | + txp->offset += gop->len; |
3013 | + txp->size -= gop->len; |
3014 | + dst_offset += gop->len; /* quit loop */ |
3015 | + } else { |
3016 | + /* This tx request can be merged in the page */ |
3017 | + gop->len = txp->size; |
3018 | + dst_offset += gop->len; |
3019 | + |
3020 | + index = pending_index(netbk->pending_cons++); |
3021 | + |
3022 | + pending_idx = netbk->pending_ring[index]; |
3023 | + |
3024 | + memcpy(&pending_tx_info[pending_idx].req, txp, |
3025 | + sizeof(*txp)); |
3026 | + xenvif_get(vif); |
3027 | + |
3028 | + pending_tx_info[pending_idx].vif = vif; |
3029 | + |
3030 | + /* Poison these fields, corresponding |
3031 | + * fields for head tx req will be set |
3032 | + * to correct values after the loop. |
3033 | + */ |
3034 | + netbk->mmap_pages[pending_idx] = (void *)(~0UL); |
3035 | + pending_tx_info[pending_idx].head = |
3036 | + INVALID_PENDING_RING_IDX; |
3037 | + |
3038 | + if (!first) { |
3039 | + first = &pending_tx_info[pending_idx]; |
3040 | + start_idx = index; |
3041 | + head_idx = pending_idx; |
3042 | + } |
3043 | + |
3044 | + txp++; |
3045 | + slot++; |
3046 | + } |
3047 | |
3048 | - gop++; |
3049 | + gop++; |
3050 | + } |
3051 | |
3052 | - memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); |
3053 | - xenvif_get(vif); |
3054 | - pending_tx_info[pending_idx].vif = vif; |
3055 | - frag_set_pending_idx(&frags[i], pending_idx); |
3056 | + first->req.offset = 0; |
3057 | + first->req.size = dst_offset; |
3058 | + first->head = start_idx; |
3059 | + set_page_ext(page, netbk, head_idx); |
3060 | + netbk->mmap_pages[head_idx] = page; |
3061 | + frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); |
3062 | } |
3063 | |
3064 | + BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); |
3065 | + |
3066 | return gop; |
3067 | err: |
3068 | /* Unwind, freeing all pages and sending error responses. */ |
3069 | - while (i-- > start) { |
3070 | - xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), |
3071 | - XEN_NETIF_RSP_ERROR); |
3072 | + while (shinfo->nr_frags-- > start) { |
3073 | + xen_netbk_idx_release(netbk, |
3074 | + frag_get_pending_idx(&frags[shinfo->nr_frags]), |
3075 | + XEN_NETIF_RSP_ERROR); |
3076 | } |
3077 | /* The head too, if necessary. */ |
3078 | if (start) |
3079 | @@ -1020,8 +1176,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, |
3080 | struct gnttab_copy *gop = *gopp; |
3081 | u16 pending_idx = *((u16 *)skb->data); |
3082 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
3083 | + struct pending_tx_info *tx_info; |
3084 | int nr_frags = shinfo->nr_frags; |
3085 | int i, err, start; |
3086 | + u16 peek; /* peek into next tx request */ |
3087 | |
3088 | /* Check status of header. */ |
3089 | err = gop->status; |
3090 | @@ -1033,11 +1191,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, |
3091 | |
3092 | for (i = start; i < nr_frags; i++) { |
3093 | int j, newerr; |
3094 | + pending_ring_idx_t head; |
3095 | |
3096 | pending_idx = frag_get_pending_idx(&shinfo->frags[i]); |
3097 | + tx_info = &netbk->pending_tx_info[pending_idx]; |
3098 | + head = tx_info->head; |
3099 | |
3100 | /* Check error status: if okay then remember grant handle. */ |
3101 | - newerr = (++gop)->status; |
3102 | + do { |
3103 | + newerr = (++gop)->status; |
3104 | + if (newerr) |
3105 | + break; |
3106 | + peek = netbk->pending_ring[pending_index(++head)]; |
3107 | + } while (!pending_tx_is_head(netbk, peek)); |
3108 | + |
3109 | if (likely(!newerr)) { |
3110 | /* Had a previous error? Invalidate this fragment. */ |
3111 | if (unlikely(err)) |
3112 | @@ -1262,11 +1429,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) |
3113 | struct sk_buff *skb; |
3114 | int ret; |
3115 | |
3116 | - while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && |
3117 | + while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX |
3118 | + < MAX_PENDING_REQS) && |
3119 | !list_empty(&netbk->net_schedule_list)) { |
3120 | struct xenvif *vif; |
3121 | struct xen_netif_tx_request txreq; |
3122 | - struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; |
3123 | + struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; |
3124 | struct page *page; |
3125 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; |
3126 | u16 pending_idx; |
3127 | @@ -1354,7 +1522,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) |
3128 | pending_idx = netbk->pending_ring[index]; |
3129 | |
3130 | data_len = (txreq.size > PKT_PROT_LEN && |
3131 | - ret < MAX_SKB_FRAGS) ? |
3132 | + ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? |
3133 | PKT_PROT_LEN : txreq.size; |
3134 | |
3135 | skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, |
3136 | @@ -1381,7 +1549,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) |
3137 | } |
3138 | |
3139 | /* XXX could copy straight to head */ |
3140 | - page = xen_netbk_alloc_page(netbk, skb, pending_idx); |
3141 | + page = xen_netbk_alloc_page(netbk, pending_idx); |
3142 | if (!page) { |
3143 | kfree_skb(skb); |
3144 | netbk_tx_err(vif, &txreq, idx); |
3145 | @@ -1404,6 +1572,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) |
3146 | memcpy(&netbk->pending_tx_info[pending_idx].req, |
3147 | &txreq, sizeof(txreq)); |
3148 | netbk->pending_tx_info[pending_idx].vif = vif; |
3149 | + netbk->pending_tx_info[pending_idx].head = index; |
3150 | *((u16 *)skb->data) = pending_idx; |
3151 | |
3152 | __skb_put(skb, data_len); |
3153 | @@ -1531,7 +1700,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
3154 | { |
3155 | struct xenvif *vif; |
3156 | struct pending_tx_info *pending_tx_info; |
3157 | - pending_ring_idx_t index; |
3158 | + pending_ring_idx_t head; |
3159 | + u16 peek; /* peek into next tx request */ |
3160 | + |
3161 | + BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); |
3162 | |
3163 | /* Already complete? */ |
3164 | if (netbk->mmap_pages[pending_idx] == NULL) |
3165 | @@ -1540,19 +1712,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, |
3166 | pending_tx_info = &netbk->pending_tx_info[pending_idx]; |
3167 | |
3168 | vif = pending_tx_info->vif; |
3169 | + head = pending_tx_info->head; |
3170 | + |
3171 | + BUG_ON(!pending_tx_is_head(netbk, head)); |
3172 | + BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); |
3173 | |
3174 | - make_tx_response(vif, &pending_tx_info->req, status); |
3175 | + do { |
3176 | + pending_ring_idx_t index; |
3177 | + pending_ring_idx_t idx = pending_index(head); |
3178 | + u16 info_idx = netbk->pending_ring[idx]; |
3179 | |
3180 | - index = pending_index(netbk->pending_prod++); |
3181 | - netbk->pending_ring[index] = pending_idx; |
3182 | + pending_tx_info = &netbk->pending_tx_info[info_idx]; |
3183 | + make_tx_response(vif, &pending_tx_info->req, status); |
3184 | |
3185 | - xenvif_put(vif); |
3186 | + /* Setting any number other than |
3187 | + * INVALID_PENDING_RING_IDX indicates this slot is |
3188 | + * starting a new packet / ending a previous packet. |
3189 | + */ |
3190 | + pending_tx_info->head = 0; |
3191 | + |
3192 | + index = pending_index(netbk->pending_prod++); |
3193 | + netbk->pending_ring[index] = netbk->pending_ring[info_idx]; |
3194 | + |
3195 | + xenvif_put(vif); |
3196 | + |
3197 | + peek = netbk->pending_ring[pending_index(++head)]; |
3198 | + |
3199 | + } while (!pending_tx_is_head(netbk, peek)); |
3200 | |
3201 | netbk->mmap_pages[pending_idx]->mapping = 0; |
3202 | put_page(netbk->mmap_pages[pending_idx]); |
3203 | netbk->mmap_pages[pending_idx] = NULL; |
3204 | } |
3205 | |
3206 | + |
3207 | static void make_tx_response(struct xenvif *vif, |
3208 | struct xen_netif_tx_request *txp, |
3209 | s8 st) |
3210 | @@ -1605,8 +1798,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk) |
3211 | static inline int tx_work_todo(struct xen_netbk *netbk) |
3212 | { |
3213 | |
3214 | - if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && |
3215 | - !list_empty(&netbk->net_schedule_list)) |
3216 | + if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX |
3217 | + < MAX_PENDING_REQS) && |
3218 | + !list_empty(&netbk->net_schedule_list)) |
3219 | return 1; |
3220 | |
3221 | return 0; |
3222 | @@ -1689,6 +1883,13 @@ static int __init netback_init(void) |
3223 | if (!xen_domain()) |
3224 | return -ENODEV; |
3225 | |
3226 | + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { |
3227 | + printk(KERN_INFO |
3228 | + "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", |
3229 | + fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); |
3230 | + fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; |
3231 | + } |
3232 | + |
3233 | xen_netbk_group_nr = num_online_cpus(); |
3234 | xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); |
3235 | if (!xen_netbk) |
3236 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
3237 | index 7ffa43b..1f57423 100644 |
3238 | --- a/drivers/net/xen-netfront.c |
3239 | +++ b/drivers/net/xen-netfront.c |
3240 | @@ -36,7 +36,7 @@ |
3241 | #include <linux/skbuff.h> |
3242 | #include <linux/ethtool.h> |
3243 | #include <linux/if_ether.h> |
3244 | -#include <linux/tcp.h> |
3245 | +#include <net/tcp.h> |
3246 | #include <linux/udp.h> |
3247 | #include <linux/moduleparam.h> |
3248 | #include <linux/mm.h> |
3249 | @@ -548,6 +548,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3250 | unsigned int len = skb_headlen(skb); |
3251 | unsigned long flags; |
3252 | |
3253 | + /* If skb->len is too big for wire format, drop skb and alert |
3254 | + * user about misconfiguration. |
3255 | + */ |
3256 | + if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { |
3257 | + net_alert_ratelimited( |
3258 | + "xennet: skb->len = %u, too big for wire format\n", |
3259 | + skb->len); |
3260 | + goto drop; |
3261 | + } |
3262 | + |
3263 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + |
3264 | xennet_count_skb_frag_slots(skb); |
3265 | if (unlikely(slots > MAX_SKB_FRAGS + 1)) { |
3266 | @@ -1064,7 +1074,8 @@ err: |
3267 | |
3268 | static int xennet_change_mtu(struct net_device *dev, int mtu) |
3269 | { |
3270 | - int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; |
3271 | + int max = xennet_can_sg(dev) ? |
3272 | + XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; |
3273 | |
3274 | if (mtu > max) |
3275 | return -EINVAL; |
3276 | @@ -1368,6 +1379,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) |
3277 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); |
3278 | SET_NETDEV_DEV(netdev, &dev->dev); |
3279 | |
3280 | + netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); |
3281 | + |
3282 | np->netdev = netdev; |
3283 | |
3284 | netif_carrier_off(netdev); |
3285 | diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c |
3286 | index 6faba40..a8b2c23 100644 |
3287 | --- a/drivers/rapidio/devices/tsi721.c |
3288 | +++ b/drivers/rapidio/devices/tsi721.c |
3289 | @@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) |
3290 | u32 intval; |
3291 | u32 ch_inte; |
3292 | |
3293 | + /* For MSI mode disable all device-level interrupts */ |
3294 | + if (priv->flags & TSI721_USING_MSI) |
3295 | + iowrite32(0, priv->regs + TSI721_DEV_INTE); |
3296 | + |
3297 | dev_int = ioread32(priv->regs + TSI721_DEV_INT); |
3298 | if (!dev_int) |
3299 | return IRQ_NONE; |
3300 | @@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) |
3301 | } |
3302 | } |
3303 | #endif |
3304 | + |
3305 | + /* For MSI mode re-enable device-level interrupts */ |
3306 | + if (priv->flags & TSI721_USING_MSI) { |
3307 | + dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | |
3308 | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; |
3309 | + iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); |
3310 | + } |
3311 | + |
3312 | return IRQ_HANDLED; |
3313 | } |
3314 | |
3315 | diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c |
3316 | index 39cf146..149fad4 100644 |
3317 | --- a/drivers/regulator/palmas-regulator.c |
3318 | +++ b/drivers/regulator/palmas-regulator.c |
3319 | @@ -677,7 +677,7 @@ static int palmas_probe(struct platform_device *pdev) |
3320 | pmic->desc[id].vsel_mask = SMPS10_VSEL; |
3321 | pmic->desc[id].enable_reg = |
3322 | PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, |
3323 | - PALMAS_SMPS10_STATUS); |
3324 | + PALMAS_SMPS10_CTRL); |
3325 | pmic->desc[id].enable_mask = SMPS10_BOOST_EN; |
3326 | pmic->desc[id].min_uV = 3750000; |
3327 | pmic->desc[id].uV_step = 1250000; |
3328 | diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c |
3329 | index 2197b57..7e64546 100644 |
3330 | --- a/drivers/scsi/ipr.c |
3331 | +++ b/drivers/scsi/ipr.c |
3332 | @@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) |
3333 | ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; |
3334 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
3335 | |
3336 | - if (!ioa_cfg->in_reset_reload) { |
3337 | + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { |
3338 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); |
3339 | dev_err(&ioa_cfg->pdev->dev, |
3340 | "Adapter being reset as a result of error recovery.\n"); |
3341 | @@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) |
3342 | static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) |
3343 | { |
3344 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
3345 | + int i; |
3346 | |
3347 | ENTER; |
3348 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
3349 | @@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) |
3350 | |
3351 | ioa_cfg->in_reset_reload = 0; |
3352 | ioa_cfg->reset_retries = 0; |
3353 | + for (i = 0; i < ioa_cfg->hrrq_num; i++) { |
3354 | + spin_lock(&ioa_cfg->hrrq[i]._lock); |
3355 | + ioa_cfg->hrrq[i].ioa_is_dead = 1; |
3356 | + spin_unlock(&ioa_cfg->hrrq[i]._lock); |
3357 | + } |
3358 | + wmb(); |
3359 | + |
3360 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
3361 | wake_up_all(&ioa_cfg->reset_wait_q); |
3362 | LEAVE; |
3363 | @@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) |
3364 | spin_lock_irqsave(ioa_cfg->host->host_lock, flags); |
3365 | if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) |
3366 | ioa_cfg->sdt_state = ABORT_DUMP; |
3367 | - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; |
3368 | + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; |
3369 | ioa_cfg->in_ioa_bringdown = 1; |
3370 | for (i = 0; i < ioa_cfg->hrrq_num; i++) { |
3371 | spin_lock(&ioa_cfg->hrrq[i]._lock); |
3372 | diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c |
3373 | index bc5e9da..a94e66f 100644 |
3374 | --- a/drivers/staging/vt6656/hostap.c |
3375 | +++ b/drivers/staging/vt6656/hostap.c |
3376 | @@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) |
3377 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", |
3378 | pDevice->dev->name, pDevice->apdev->name); |
3379 | } |
3380 | - kfree(pDevice->apdev); |
3381 | + free_netdev(pDevice->apdev); |
3382 | pDevice->apdev = NULL; |
3383 | pDevice->bEnable8021x = false; |
3384 | pDevice->bEnableHostWEP = false; |
3385 | diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c |
3386 | index 69971f3..60b50d0 100644 |
3387 | --- a/drivers/staging/vt6656/iwctl.c |
3388 | +++ b/drivers/staging/vt6656/iwctl.c |
3389 | @@ -1348,9 +1348,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, |
3390 | return rc; |
3391 | } |
3392 | |
3393 | + spin_lock_irq(&pDevice->lock); |
3394 | + |
3395 | if (wrq->disabled) { |
3396 | pDevice->ePSMode = WMAC_POWER_CAM; |
3397 | PSvDisablePowerSaving(pDevice); |
3398 | + spin_unlock_irq(&pDevice->lock); |
3399 | return rc; |
3400 | } |
3401 | if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { |
3402 | @@ -1361,6 +1364,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, |
3403 | pDevice->ePSMode = WMAC_POWER_FAST; |
3404 | PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); |
3405 | } |
3406 | + |
3407 | + spin_unlock_irq(&pDevice->lock); |
3408 | + |
3409 | switch (wrq->flags & IW_POWER_MODE) { |
3410 | case IW_POWER_UNICAST_R: |
3411 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); |
3412 | diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c |
3413 | index ca2be40..93ae910 100644 |
3414 | --- a/drivers/target/iscsi/iscsi_target_parameters.c |
3415 | +++ b/drivers/target/iscsi/iscsi_target_parameters.c |
3416 | @@ -712,9 +712,9 @@ static int iscsi_add_notunderstood_response( |
3417 | } |
3418 | INIT_LIST_HEAD(&extra_response->er_list); |
3419 | |
3420 | - strncpy(extra_response->key, key, strlen(key) + 1); |
3421 | - strncpy(extra_response->value, NOTUNDERSTOOD, |
3422 | - strlen(NOTUNDERSTOOD) + 1); |
3423 | + strlcpy(extra_response->key, key, sizeof(extra_response->key)); |
3424 | + strlcpy(extra_response->value, NOTUNDERSTOOD, |
3425 | + sizeof(extra_response->value)); |
3426 | |
3427 | list_add_tail(&extra_response->er_list, |
3428 | ¶m_list->extra_response_list); |
3429 | @@ -1583,8 +1583,6 @@ int iscsi_decode_text_input( |
3430 | |
3431 | if (phase & PHASE_SECURITY) { |
3432 | if (iscsi_check_for_auth_key(key) > 0) { |
3433 | - char *tmpptr = key + strlen(key); |
3434 | - *tmpptr = '='; |
3435 | kfree(tmpbuf); |
3436 | return 1; |
3437 | } |
3438 | diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h |
3439 | index 1e1b750..2c536a0 100644 |
3440 | --- a/drivers/target/iscsi/iscsi_target_parameters.h |
3441 | +++ b/drivers/target/iscsi/iscsi_target_parameters.h |
3442 | @@ -1,8 +1,10 @@ |
3443 | #ifndef ISCSI_PARAMETERS_H |
3444 | #define ISCSI_PARAMETERS_H |
3445 | |
3446 | +#include <scsi/iscsi_proto.h> |
3447 | + |
3448 | struct iscsi_extra_response { |
3449 | - char key[64]; |
3450 | + char key[KEY_MAXLEN]; |
3451 | char value[32]; |
3452 | struct list_head er_list; |
3453 | } ____cacheline_aligned; |
3454 | diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c |
3455 | index ca4b219..12191d8 100644 |
3456 | --- a/drivers/target/target_core_file.c |
3457 | +++ b/drivers/target/target_core_file.c |
3458 | @@ -150,6 +150,7 @@ static int fd_configure_device(struct se_device *dev) |
3459 | if (S_ISBLK(inode->i_mode)) { |
3460 | unsigned long long dev_size; |
3461 | |
3462 | + fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); |
3463 | /* |
3464 | * Determine the number of bytes from i_size_read() minus |
3465 | * one (1) logical sector from underlying struct block_device |
3466 | @@ -168,11 +169,11 @@ static int fd_configure_device(struct se_device *dev) |
3467 | " block_device\n"); |
3468 | goto fail; |
3469 | } |
3470 | - } |
3471 | |
3472 | - fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; |
3473 | + fd_dev->fd_block_size = FD_BLOCKSIZE; |
3474 | + } |
3475 | |
3476 | - dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; |
3477 | + dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; |
3478 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; |
3479 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
3480 | |
3481 | @@ -583,11 +584,12 @@ static sector_t fd_get_blocks(struct se_device *dev) |
3482 | * to handle underlying block_device resize operations. |
3483 | */ |
3484 | if (S_ISBLK(i->i_mode)) |
3485 | - dev_size = (i_size_read(i) - fd_dev->fd_block_size); |
3486 | + dev_size = i_size_read(i); |
3487 | else |
3488 | dev_size = fd_dev->fd_dev_size; |
3489 | |
3490 | - return div_u64(dev_size, dev->dev_attrib.block_size); |
3491 | + return div_u64(dev_size - dev->dev_attrib.block_size, |
3492 | + dev->dev_attrib.block_size); |
3493 | } |
3494 | |
3495 | static struct sbc_ops fd_sbc_ops = { |
3496 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
3497 | index 0d46276..fc9a5a0 100644 |
3498 | --- a/drivers/target/target_core_transport.c |
3499 | +++ b/drivers/target/target_core_transport.c |
3500 | @@ -222,6 +222,7 @@ struct se_session *transport_init_session(void) |
3501 | INIT_LIST_HEAD(&se_sess->sess_list); |
3502 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
3503 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
3504 | + INIT_LIST_HEAD(&se_sess->sess_wait_list); |
3505 | spin_lock_init(&se_sess->sess_cmd_lock); |
3506 | kref_init(&se_sess->sess_kref); |
3507 | |
3508 | @@ -2252,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) |
3509 | unsigned long flags; |
3510 | |
3511 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
3512 | - |
3513 | - WARN_ON(se_sess->sess_tearing_down); |
3514 | + if (se_sess->sess_tearing_down) { |
3515 | + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
3516 | + return; |
3517 | + } |
3518 | se_sess->sess_tearing_down = 1; |
3519 | + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); |
3520 | |
3521 | - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) |
3522 | + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) |
3523 | se_cmd->cmd_wait_set = 1; |
3524 | |
3525 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
3526 | @@ -2273,9 +2277,10 @@ void target_wait_for_sess_cmds( |
3527 | { |
3528 | struct se_cmd *se_cmd, *tmp_cmd; |
3529 | bool rc = false; |
3530 | + unsigned long flags; |
3531 | |
3532 | list_for_each_entry_safe(se_cmd, tmp_cmd, |
3533 | - &se_sess->sess_cmd_list, se_cmd_list) { |
3534 | + &se_sess->sess_wait_list, se_cmd_list) { |
3535 | list_del(&se_cmd->se_cmd_list); |
3536 | |
3537 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" |
3538 | @@ -2303,6 +2308,11 @@ void target_wait_for_sess_cmds( |
3539 | |
3540 | se_cmd->se_tfo->release_cmd(se_cmd); |
3541 | } |
3542 | + |
3543 | + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
3544 | + WARN_ON(!list_empty(&se_sess->sess_cmd_list)); |
3545 | + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
3546 | + |
3547 | } |
3548 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
3549 | |
3550 | diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c |
3551 | index 05e72be..1f8cba6 100644 |
3552 | --- a/drivers/tty/n_tty.c |
3553 | +++ b/drivers/tty/n_tty.c |
3554 | @@ -1588,6 +1588,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) |
3555 | ldata->real_raw = 0; |
3556 | } |
3557 | n_tty_set_room(tty); |
3558 | + /* |
3559 | + * Fix tty hang when I_IXON(tty) is cleared, but the tty |
3560 | + * been stopped by STOP_CHAR(tty) before it. |
3561 | + */ |
3562 | + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { |
3563 | + start_tty(tty); |
3564 | + } |
3565 | + |
3566 | /* The termios change make the tty ready for I/O */ |
3567 | wake_up_interruptible(&tty->write_wait); |
3568 | wake_up_interruptible(&tty->read_wait); |
3569 | diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c |
3570 | index b7eb86a..8a7eb77 100644 |
3571 | --- a/drivers/usb/atm/cxacru.c |
3572 | +++ b/drivers/usb/atm/cxacru.c |
3573 | @@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ |
3574 | { |
3575 | int ret, len; |
3576 | __le32 *buf; |
3577 | - int offb, offd; |
3578 | + int offb; |
3579 | + unsigned int offd; |
3580 | const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; |
3581 | int buflen = ((size - 1) / stride + 1 + size * 2) * 4; |
3582 | |
3583 | diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig |
3584 | index 608a2ae..b2df442 100644 |
3585 | --- a/drivers/usb/chipidea/Kconfig |
3586 | +++ b/drivers/usb/chipidea/Kconfig |
3587 | @@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC |
3588 | config USB_CHIPIDEA_HOST |
3589 | bool "ChipIdea host controller" |
3590 | depends on USB=y || USB=USB_CHIPIDEA |
3591 | - depends on USB_EHCI_HCD |
3592 | + depends on USB_EHCI_HCD=y |
3593 | select USB_EHCI_ROOT_HUB_TT |
3594 | help |
3595 | Say Y here to enable host controller functionality of the |
3596 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
3597 | index 3113c1d..e14346a 100644 |
3598 | --- a/drivers/usb/core/quirks.c |
3599 | +++ b/drivers/usb/core/quirks.c |
3600 | @@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
3601 | /* Edirol SD-20 */ |
3602 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
3603 | |
3604 | + /* Alcor Micro Corp. Hub */ |
3605 | + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, |
3606 | + |
3607 | /* appletouch */ |
3608 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, |
3609 | |
3610 | diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c |
3611 | index 180a2b0..007137f 100644 |
3612 | --- a/drivers/usb/host/ohci-hcd.c |
3613 | +++ b/drivers/usb/host/ohci-hcd.c |
3614 | @@ -233,14 +233,14 @@ static int ohci_urb_enqueue ( |
3615 | urb->start_frame = frame; |
3616 | } |
3617 | } else if (ed->type == PIPE_ISOCHRONOUS) { |
3618 | - u16 next = ohci_frame_no(ohci) + 2; |
3619 | + u16 next = ohci_frame_no(ohci) + 1; |
3620 | u16 frame = ed->last_iso + ed->interval; |
3621 | |
3622 | /* Behind the scheduling threshold? */ |
3623 | if (unlikely(tick_before(frame, next))) { |
3624 | |
3625 | /* USB_ISO_ASAP: Round up to the first available slot */ |
3626 | - if (urb->transfer_flags & URB_ISO_ASAP) |
3627 | + if (urb->transfer_flags & URB_ISO_ASAP) { |
3628 | frame += (next - frame + ed->interval - 1) & |
3629 | -ed->interval; |
3630 | |
3631 | @@ -248,21 +248,25 @@ static int ohci_urb_enqueue ( |
3632 | * Not ASAP: Use the next slot in the stream. If |
3633 | * the entire URB falls before the threshold, fail. |
3634 | */ |
3635 | - else if (tick_before(frame + ed->interval * |
3636 | + } else { |
3637 | + if (tick_before(frame + ed->interval * |
3638 | (urb->number_of_packets - 1), next)) { |
3639 | - retval = -EXDEV; |
3640 | - usb_hcd_unlink_urb_from_ep(hcd, urb); |
3641 | - goto fail; |
3642 | - } |
3643 | + retval = -EXDEV; |
3644 | + usb_hcd_unlink_urb_from_ep(hcd, urb); |
3645 | + goto fail; |
3646 | + } |
3647 | |
3648 | - /* |
3649 | - * Some OHCI hardware doesn't handle late TDs |
3650 | - * correctly. After retiring them it proceeds to |
3651 | - * the next ED instead of the next TD. Therefore |
3652 | - * we have to omit the late TDs entirely. |
3653 | - */ |
3654 | - urb_priv->td_cnt = DIV_ROUND_UP(next - frame, |
3655 | - ed->interval); |
3656 | + /* |
3657 | + * Some OHCI hardware doesn't handle late TDs |
3658 | + * correctly. After retiring them it proceeds |
3659 | + * to the next ED instead of the next TD. |
3660 | + * Therefore we have to omit the late TDs |
3661 | + * entirely. |
3662 | + */ |
3663 | + urb_priv->td_cnt = DIV_ROUND_UP( |
3664 | + (u16) (next - frame), |
3665 | + ed->interval); |
3666 | + } |
3667 | } |
3668 | urb->start_frame = frame; |
3669 | } |
3670 | diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c |
3671 | index f87bee6..9189bc9 100644 |
3672 | --- a/drivers/usb/host/uhci-hub.c |
3673 | +++ b/drivers/usb/host/uhci-hub.c |
3674 | @@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) |
3675 | /* auto-stop if nothing connected for 1 second */ |
3676 | if (any_ports_active(uhci)) |
3677 | uhci->rh_state = UHCI_RH_RUNNING; |
3678 | - else if (time_after_eq(jiffies, uhci->auto_stop_time)) |
3679 | + else if (time_after_eq(jiffies, uhci->auto_stop_time) && |
3680 | + !uhci->wait_for_hp) |
3681 | suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); |
3682 | break; |
3683 | |
3684 | diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c |
3685 | index f0976d8..041c6dd 100644 |
3686 | --- a/drivers/usb/host/uhci-q.c |
3687 | +++ b/drivers/usb/host/uhci-q.c |
3688 | @@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, |
3689 | return -EINVAL; /* Can't change the period */ |
3690 | |
3691 | } else { |
3692 | - next = uhci->frame_number + 2; |
3693 | + next = uhci->frame_number + 1; |
3694 | |
3695 | /* Find the next unused frame */ |
3696 | if (list_empty(&qh->queue)) { |
3697 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
3698 | index 6dc238c..fd26470e428 100644 |
3699 | --- a/drivers/usb/host/xhci-mem.c |
3700 | +++ b/drivers/usb/host/xhci-mem.c |
3701 | @@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, |
3702 | ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); |
3703 | |
3704 | /* Set the max packet size and max burst */ |
3705 | + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); |
3706 | + max_burst = 0; |
3707 | switch (udev->speed) { |
3708 | case USB_SPEED_SUPER: |
3709 | - max_packet = usb_endpoint_maxp(&ep->desc); |
3710 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
3711 | /* dig out max burst from ep companion desc */ |
3712 | - max_packet = ep->ss_ep_comp.bMaxBurst; |
3713 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); |
3714 | + max_burst = ep->ss_ep_comp.bMaxBurst; |
3715 | break; |
3716 | case USB_SPEED_HIGH: |
3717 | + /* Some devices get this wrong */ |
3718 | + if (usb_endpoint_xfer_bulk(&ep->desc)) |
3719 | + max_packet = 512; |
3720 | /* bits 11:12 specify the number of additional transaction |
3721 | * opportunities per microframe (USB 2.0, section 9.6.6) |
3722 | */ |
3723 | @@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, |
3724 | usb_endpoint_xfer_int(&ep->desc)) { |
3725 | max_burst = (usb_endpoint_maxp(&ep->desc) |
3726 | & 0x1800) >> 11; |
3727 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); |
3728 | } |
3729 | - /* Fall through */ |
3730 | + break; |
3731 | case USB_SPEED_FULL: |
3732 | case USB_SPEED_LOW: |
3733 | - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); |
3734 | - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
3735 | break; |
3736 | default: |
3737 | BUG(); |
3738 | } |
3739 | + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | |
3740 | + MAX_BURST(max_burst)); |
3741 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); |
3742 | ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); |
3743 | |
3744 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
3745 | index be2dcb0..266ece7 100644 |
3746 | --- a/drivers/usb/serial/ftdi_sio.c |
3747 | +++ b/drivers/usb/serial/ftdi_sio.c |
3748 | @@ -191,6 +191,8 @@ static struct usb_device_id id_table_combined [] = { |
3749 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, |
3750 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, |
3751 | { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, |
3752 | + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, |
3753 | + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, |
3754 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
3755 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
3756 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
3757 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
3758 | index 9852827..6dd7925 100644 |
3759 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
3760 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
3761 | @@ -772,6 +772,8 @@ |
3762 | */ |
3763 | #define NEWPORT_VID 0x104D |
3764 | #define NEWPORT_AGILIS_PID 0x3000 |
3765 | +#define NEWPORT_CONEX_CC_PID 0x3002 |
3766 | +#define NEWPORT_CONEX_AGP_PID 0x3006 |
3767 | |
3768 | /* Interbiometrics USB I/O Board */ |
3769 | /* Developed for Interbiometrics by Rudolf Gugler */ |
3770 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
3771 | index bff059a..87181be 100644 |
3772 | --- a/drivers/usb/serial/option.c |
3773 | +++ b/drivers/usb/serial/option.c |
3774 | @@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb); |
3775 | |
3776 | #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ |
3777 | #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ |
3778 | +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ |
3779 | |
3780 | #define KYOCERA_VENDOR_ID 0x0c88 |
3781 | #define KYOCERA_PRODUCT_KPC650 0x17da |
3782 | @@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb); |
3783 | #define CINTERION_PRODUCT_EU3_E 0x0051 |
3784 | #define CINTERION_PRODUCT_EU3_P 0x0052 |
3785 | #define CINTERION_PRODUCT_PH8 0x0053 |
3786 | -#define CINTERION_PRODUCT_AH6 0x0055 |
3787 | -#define CINTERION_PRODUCT_PLS8 0x0060 |
3788 | +#define CINTERION_PRODUCT_AHXX 0x0055 |
3789 | +#define CINTERION_PRODUCT_PLXX 0x0060 |
3790 | |
3791 | /* Olivetti products */ |
3792 | #define OLIVETTI_VENDOR_ID 0x0b3c |
3793 | @@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = { |
3794 | { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ |
3795 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, |
3796 | { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, |
3797 | + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, |
3798 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ |
3799 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
3800 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
3801 | @@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = { |
3802 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
3803 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, |
3804 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, |
3805 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ |
3806 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
3807 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, |
3808 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, |
3809 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), |
3810 | @@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = { |
3811 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, |
3812 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, |
3813 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, |
3814 | - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, |
3815 | - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, |
3816 | + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, |
3817 | + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), |
3818 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
3819 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, |
3820 | { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, |
3821 | { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, |
3822 | diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c |
3823 | index 210fce2..47c1155 100644 |
3824 | --- a/fs/cifs/cifs_dfs_ref.c |
3825 | +++ b/fs/cifs/cifs_dfs_ref.c |
3826 | @@ -18,6 +18,7 @@ |
3827 | #include <linux/slab.h> |
3828 | #include <linux/vfs.h> |
3829 | #include <linux/fs.h> |
3830 | +#include <linux/inet.h> |
3831 | #include "cifsglob.h" |
3832 | #include "cifsproto.h" |
3833 | #include "cifsfs.h" |
3834 | @@ -150,7 +151,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, |
3835 | * assuming that we have 'unc=' and 'ip=' in |
3836 | * the original sb_mountdata |
3837 | */ |
3838 | - md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12; |
3839 | + md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 + |
3840 | + INET6_ADDRSTRLEN; |
3841 | mountdata = kzalloc(md_len+1, GFP_KERNEL); |
3842 | if (mountdata == NULL) { |
3843 | rc = -ENOMEM; |
3844 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
3845 | index 20887bf..cb88429 100644 |
3846 | --- a/fs/cifs/inode.c |
3847 | +++ b/fs/cifs/inode.c |
3848 | @@ -169,7 +169,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) |
3849 | |
3850 | if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) |
3851 | inode->i_flags |= S_AUTOMOUNT; |
3852 | - cifs_set_ops(inode); |
3853 | + if (inode->i_state & I_NEW) |
3854 | + cifs_set_ops(inode); |
3855 | } |
3856 | |
3857 | void |
3858 | diff --git a/fs/fat/inode.c b/fs/fat/inode.c |
3859 | index acf6e47..e7a7fde 100644 |
3860 | --- a/fs/fat/inode.c |
3861 | +++ b/fs/fat/inode.c |
3862 | @@ -1223,6 +1223,19 @@ static int fat_read_root(struct inode *inode) |
3863 | return 0; |
3864 | } |
3865 | |
3866 | +static unsigned long calc_fat_clusters(struct super_block *sb) |
3867 | +{ |
3868 | + struct msdos_sb_info *sbi = MSDOS_SB(sb); |
3869 | + |
3870 | + /* Divide first to avoid overflow */ |
3871 | + if (sbi->fat_bits != 12) { |
3872 | + unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; |
3873 | + return ent_per_sec * sbi->fat_length; |
3874 | + } |
3875 | + |
3876 | + return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; |
3877 | +} |
3878 | + |
3879 | /* |
3880 | * Read the super block of an MS-DOS FS. |
3881 | */ |
3882 | @@ -1427,7 +1440,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, |
3883 | sbi->dirty = b->fat16.state & FAT_STATE_DIRTY; |
3884 | |
3885 | /* check that FAT table does not overflow */ |
3886 | - fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; |
3887 | + fat_clusters = calc_fat_clusters(sb); |
3888 | total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); |
3889 | if (total_clusters > MAX_FAT(sb)) { |
3890 | if (!silent) |
3891 | diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c |
3892 | index ff15522..185c479 100644 |
3893 | --- a/fs/fuse/dir.c |
3894 | +++ b/fs/fuse/dir.c |
3895 | @@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_conn *fc) |
3896 | static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) |
3897 | { |
3898 | struct inode *inode; |
3899 | + struct dentry *parent; |
3900 | + struct fuse_conn *fc; |
3901 | |
3902 | inode = ACCESS_ONCE(entry->d_inode); |
3903 | if (inode && is_bad_inode(inode)) |
3904 | @@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) |
3905 | else if (fuse_dentry_time(entry) < get_jiffies_64()) { |
3906 | int err; |
3907 | struct fuse_entry_out outarg; |
3908 | - struct fuse_conn *fc; |
3909 | struct fuse_req *req; |
3910 | struct fuse_forget_link *forget; |
3911 | - struct dentry *parent; |
3912 | u64 attr_version; |
3913 | |
3914 | /* For negative dentries, always do a fresh lookup */ |
3915 | @@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) |
3916 | entry_attr_timeout(&outarg), |
3917 | attr_version); |
3918 | fuse_change_entry_timeout(entry, &outarg); |
3919 | + } else if (inode) { |
3920 | + fc = get_fuse_conn(inode); |
3921 | + if (fc->readdirplus_auto) { |
3922 | + parent = dget_parent(entry); |
3923 | + fuse_advise_use_readdirplus(parent->d_inode); |
3924 | + dput(parent); |
3925 | + } |
3926 | } |
3927 | - fuse_advise_use_readdirplus(inode); |
3928 | return 1; |
3929 | } |
3930 | |
3931 | diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c |
3932 | index 137185c..a215d22 100644 |
3933 | --- a/fs/fuse/inode.c |
3934 | +++ b/fs/fuse/inode.c |
3935 | @@ -864,10 +864,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) |
3936 | fc->dont_mask = 1; |
3937 | if (arg->flags & FUSE_AUTO_INVAL_DATA) |
3938 | fc->auto_inval_data = 1; |
3939 | - if (arg->flags & FUSE_DO_READDIRPLUS) |
3940 | + if (arg->flags & FUSE_DO_READDIRPLUS) { |
3941 | fc->do_readdirplus = 1; |
3942 | - if (arg->flags & FUSE_READDIRPLUS_AUTO) |
3943 | - fc->readdirplus_auto = 1; |
3944 | + if (arg->flags & FUSE_READDIRPLUS_AUTO) |
3945 | + fc->readdirplus_auto = 1; |
3946 | + } |
3947 | } else { |
3948 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
3949 | fc->no_lock = 1; |
3950 | diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c |
3951 | index b7dc47b..77554b6 100644 |
3952 | --- a/fs/jfs/inode.c |
3953 | +++ b/fs/jfs/inode.c |
3954 | @@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
3955 | { |
3956 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
3957 | |
3958 | - if (test_cflag(COMMIT_Nolink, inode)) |
3959 | + if (inode->i_nlink == 0) |
3960 | return 0; |
3961 | /* |
3962 | * If COMMIT_DIRTY is not set, the inode isn't really dirty. |
3963 | diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c |
3964 | index 2eb952c..cbe48ea 100644 |
3965 | --- a/fs/jfs/jfs_logmgr.c |
3966 | +++ b/fs/jfs/jfs_logmgr.c |
3967 | @@ -1058,7 +1058,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync) |
3968 | */ |
3969 | void jfs_syncpt(struct jfs_log *log, int hard_sync) |
3970 | { LOG_LOCK(log); |
3971 | - lmLogSync(log, hard_sync); |
3972 | + if (!test_bit(log_QUIESCE, &log->flag)) |
3973 | + lmLogSync(log, hard_sync); |
3974 | LOG_UNLOCK(log); |
3975 | } |
3976 | |
3977 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
3978 | index 0086401..261e9b9 100644 |
3979 | --- a/fs/nfs/nfs4proc.c |
3980 | +++ b/fs/nfs/nfs4proc.c |
3981 | @@ -1022,7 +1022,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) |
3982 | struct nfs4_state *state = opendata->state; |
3983 | struct nfs_inode *nfsi = NFS_I(state->inode); |
3984 | struct nfs_delegation *delegation; |
3985 | - int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); |
3986 | + int open_mode = opendata->o_arg.open_flags; |
3987 | fmode_t fmode = opendata->o_arg.fmode; |
3988 | nfs4_stateid stateid; |
3989 | int ret = -EAGAIN; |
3990 | diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c |
3991 | index 6b49f14..734c93f 100644 |
3992 | --- a/fs/nilfs2/inode.c |
3993 | +++ b/fs/nilfs2/inode.c |
3994 | @@ -202,13 +202,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) |
3995 | |
3996 | static int nilfs_set_page_dirty(struct page *page) |
3997 | { |
3998 | - int ret = __set_page_dirty_buffers(page); |
3999 | + int ret = __set_page_dirty_nobuffers(page); |
4000 | |
4001 | - if (ret) { |
4002 | + if (page_has_buffers(page)) { |
4003 | struct inode *inode = page->mapping->host; |
4004 | - unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); |
4005 | + unsigned nr_dirty = 0; |
4006 | + struct buffer_head *bh, *head; |
4007 | |
4008 | - nilfs_set_file_dirty(inode, nr_dirty); |
4009 | + /* |
4010 | + * This page is locked by callers, and no other thread |
4011 | + * concurrently marks its buffers dirty since they are |
4012 | + * only dirtied through routines in fs/buffer.c in |
4013 | + * which call sites of mark_buffer_dirty are protected |
4014 | + * by page lock. |
4015 | + */ |
4016 | + bh = head = page_buffers(page); |
4017 | + do { |
4018 | + /* Do not mark hole blocks dirty */ |
4019 | + if (buffer_dirty(bh) || !buffer_mapped(bh)) |
4020 | + continue; |
4021 | + |
4022 | + set_buffer_dirty(bh); |
4023 | + nr_dirty++; |
4024 | + } while (bh = bh->b_this_page, bh != head); |
4025 | + |
4026 | + if (nr_dirty) |
4027 | + nilfs_set_file_dirty(inode, nr_dirty); |
4028 | } |
4029 | return ret; |
4030 | } |
4031 | diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c |
4032 | index 1c39efb..2487116 100644 |
4033 | --- a/fs/ocfs2/extent_map.c |
4034 | +++ b/fs/ocfs2/extent_map.c |
4035 | @@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
4036 | &hole_size, &rec, &is_last); |
4037 | if (ret) { |
4038 | mlog_errno(ret); |
4039 | - goto out; |
4040 | + goto out_unlock; |
4041 | } |
4042 | |
4043 | if (rec.e_blkno == 0ULL) { |
4044 | diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c |
4045 | index 66c53b6..6c2d136 100644 |
4046 | --- a/fs/reiserfs/dir.c |
4047 | +++ b/fs/reiserfs/dir.c |
4048 | @@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, |
4049 | next_pos = deh_offset(deh) + 1; |
4050 | |
4051 | if (item_moved(&tmp_ih, &path_to_entry)) { |
4052 | + set_cpu_key_k_offset(&pos_key, |
4053 | + next_pos); |
4054 | goto research; |
4055 | } |
4056 | } /* for */ |
4057 | diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c |
4058 | index ea5061f..c3a9de6 100644 |
4059 | --- a/fs/reiserfs/inode.c |
4060 | +++ b/fs/reiserfs/inode.c |
4061 | @@ -1810,11 +1810,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, |
4062 | TYPE_STAT_DATA, SD_SIZE, MAX_US_INT); |
4063 | memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); |
4064 | args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); |
4065 | - if (insert_inode_locked4(inode, args.objectid, |
4066 | - reiserfs_find_actor, &args) < 0) { |
4067 | + |
4068 | + reiserfs_write_unlock(inode->i_sb); |
4069 | + err = insert_inode_locked4(inode, args.objectid, |
4070 | + reiserfs_find_actor, &args); |
4071 | + reiserfs_write_lock(inode->i_sb); |
4072 | + if (err) { |
4073 | err = -EINVAL; |
4074 | goto out_bad_inode; |
4075 | } |
4076 | + |
4077 | if (old_format_only(sb)) |
4078 | /* not a perfect generation count, as object ids can be reused, but |
4079 | ** this is as good as reiserfs can do right now. |
4080 | diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c |
4081 | index 4cce1d9..821bcf7 100644 |
4082 | --- a/fs/reiserfs/xattr.c |
4083 | +++ b/fs/reiserfs/xattr.c |
4084 | @@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data) |
4085 | static int chown_one_xattr(struct dentry *dentry, void *data) |
4086 | { |
4087 | struct iattr *attrs = data; |
4088 | - return reiserfs_setattr(dentry, attrs); |
4089 | + int ia_valid = attrs->ia_valid; |
4090 | + int err; |
4091 | + |
4092 | + /* |
4093 | + * We only want the ownership bits. Otherwise, we'll do |
4094 | + * things like change a directory to a regular file if |
4095 | + * ATTR_MODE is set. |
4096 | + */ |
4097 | + attrs->ia_valid &= (ATTR_UID|ATTR_GID); |
4098 | + err = reiserfs_setattr(dentry, attrs); |
4099 | + attrs->ia_valid = ia_valid; |
4100 | + |
4101 | + return err; |
4102 | } |
4103 | |
4104 | /* No i_mutex, but the inode is unconnected. */ |
4105 | diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c |
4106 | index d7c01ef..6c8767f 100644 |
4107 | --- a/fs/reiserfs/xattr_acl.c |
4108 | +++ b/fs/reiserfs/xattr_acl.c |
4109 | @@ -443,6 +443,9 @@ int reiserfs_acl_chmod(struct inode *inode) |
4110 | int depth; |
4111 | int error; |
4112 | |
4113 | + if (IS_PRIVATE(inode)) |
4114 | + return 0; |
4115 | + |
4116 | if (S_ISLNK(inode->i_mode)) |
4117 | return -EOPNOTSUPP; |
4118 | |
4119 | diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c |
4120 | index d82efaa..ca9ecaa 100644 |
4121 | --- a/fs/xfs/xfs_iops.c |
4122 | +++ b/fs/xfs/xfs_iops.c |
4123 | @@ -455,6 +455,28 @@ xfs_vn_getattr( |
4124 | return 0; |
4125 | } |
4126 | |
4127 | +static void |
4128 | +xfs_setattr_mode( |
4129 | + struct xfs_trans *tp, |
4130 | + struct xfs_inode *ip, |
4131 | + struct iattr *iattr) |
4132 | +{ |
4133 | + struct inode *inode = VFS_I(ip); |
4134 | + umode_t mode = iattr->ia_mode; |
4135 | + |
4136 | + ASSERT(tp); |
4137 | + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
4138 | + |
4139 | + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) |
4140 | + mode &= ~S_ISGID; |
4141 | + |
4142 | + ip->i_d.di_mode &= S_IFMT; |
4143 | + ip->i_d.di_mode |= mode & ~S_IFMT; |
4144 | + |
4145 | + inode->i_mode &= S_IFMT; |
4146 | + inode->i_mode |= mode & ~S_IFMT; |
4147 | +} |
4148 | + |
4149 | int |
4150 | xfs_setattr_nonsize( |
4151 | struct xfs_inode *ip, |
4152 | @@ -606,18 +628,8 @@ xfs_setattr_nonsize( |
4153 | /* |
4154 | * Change file access modes. |
4155 | */ |
4156 | - if (mask & ATTR_MODE) { |
4157 | - umode_t mode = iattr->ia_mode; |
4158 | - |
4159 | - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) |
4160 | - mode &= ~S_ISGID; |
4161 | - |
4162 | - ip->i_d.di_mode &= S_IFMT; |
4163 | - ip->i_d.di_mode |= mode & ~S_IFMT; |
4164 | - |
4165 | - inode->i_mode &= S_IFMT; |
4166 | - inode->i_mode |= mode & ~S_IFMT; |
4167 | - } |
4168 | + if (mask & ATTR_MODE) |
4169 | + xfs_setattr_mode(tp, ip, iattr); |
4170 | |
4171 | /* |
4172 | * Change file access or modified times. |
4173 | @@ -714,9 +726,8 @@ xfs_setattr_size( |
4174 | return XFS_ERROR(error); |
4175 | |
4176 | ASSERT(S_ISREG(ip->i_d.di_mode)); |
4177 | - ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| |
4178 | - ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| |
4179 | - ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); |
4180 | + ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| |
4181 | + ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); |
4182 | |
4183 | if (!(flags & XFS_ATTR_NOLOCK)) { |
4184 | lock_flags |= XFS_IOLOCK_EXCL; |
4185 | @@ -860,6 +871,12 @@ xfs_setattr_size( |
4186 | xfs_inode_clear_eofblocks_tag(ip); |
4187 | } |
4188 | |
4189 | + /* |
4190 | + * Change file access modes. |
4191 | + */ |
4192 | + if (mask & ATTR_MODE) |
4193 | + xfs_setattr_mode(tp, ip, iattr); |
4194 | + |
4195 | if (mask & ATTR_CTIME) { |
4196 | inode->i_ctime = iattr->ia_ctime; |
4197 | ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; |
4198 | diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h |
4199 | index 22ba56e..fc93bd3d 100644 |
4200 | --- a/include/acpi/acpi_bus.h |
4201 | +++ b/include/acpi/acpi_bus.h |
4202 | @@ -352,7 +352,6 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle, |
4203 | unsigned long long *sta); |
4204 | int acpi_bus_get_status(struct acpi_device *device); |
4205 | |
4206 | -#ifdef CONFIG_PM |
4207 | int acpi_bus_set_power(acpi_handle handle, int state); |
4208 | const char *acpi_power_state_string(int state); |
4209 | int acpi_device_get_power(struct acpi_device *device, int *state); |
4210 | @@ -360,41 +359,12 @@ int acpi_device_set_power(struct acpi_device *device, int state); |
4211 | int acpi_bus_init_power(struct acpi_device *device); |
4212 | int acpi_bus_update_power(acpi_handle handle, int *state_p); |
4213 | bool acpi_bus_power_manageable(acpi_handle handle); |
4214 | + |
4215 | +#ifdef CONFIG_PM |
4216 | bool acpi_bus_can_wakeup(acpi_handle handle); |
4217 | -#else /* !CONFIG_PM */ |
4218 | -static inline int acpi_bus_set_power(acpi_handle handle, int state) |
4219 | -{ |
4220 | - return 0; |
4221 | -} |
4222 | -static inline const char *acpi_power_state_string(int state) |
4223 | -{ |
4224 | - return "D0"; |
4225 | -} |
4226 | -static inline int acpi_device_get_power(struct acpi_device *device, int *state) |
4227 | -{ |
4228 | - return 0; |
4229 | -} |
4230 | -static inline int acpi_device_set_power(struct acpi_device *device, int state) |
4231 | -{ |
4232 | - return 0; |
4233 | -} |
4234 | -static inline int acpi_bus_init_power(struct acpi_device *device) |
4235 | -{ |
4236 | - return 0; |
4237 | -} |
4238 | -static inline int acpi_bus_update_power(acpi_handle handle, int *state_p) |
4239 | -{ |
4240 | - return 0; |
4241 | -} |
4242 | -static inline bool acpi_bus_power_manageable(acpi_handle handle) |
4243 | -{ |
4244 | - return false; |
4245 | -} |
4246 | -static inline bool acpi_bus_can_wakeup(acpi_handle handle) |
4247 | -{ |
4248 | - return false; |
4249 | -} |
4250 | -#endif /* !CONFIG_PM */ |
4251 | +#else |
4252 | +static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; } |
4253 | +#endif |
4254 | |
4255 | #ifdef CONFIG_ACPI_PROC_EVENT |
4256 | int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); |
4257 | diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h |
4258 | index 16d4d09..6ae7d2c 100644 |
4259 | --- a/include/linux/cgroup.h |
4260 | +++ b/include/linux/cgroup.h |
4261 | @@ -570,7 +570,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); |
4262 | * |
4263 | * If a subsystem synchronizes against the parent in its ->css_online() and |
4264 | * before starting iterating, and synchronizes against @pos on each |
4265 | - * iteration, any descendant cgroup which finished ->css_offline() is |
4266 | + * iteration, any descendant cgroup which finished ->css_online() is |
4267 | * guaranteed to be visible in the future iterations. |
4268 | * |
4269 | * In other words, the following guarantees that a descendant can't escape |
4270 | diff --git a/include/linux/wait.h b/include/linux/wait.h |
4271 | index 7cb64d4..30194a6 100644 |
4272 | --- a/include/linux/wait.h |
4273 | +++ b/include/linux/wait.h |
4274 | @@ -217,6 +217,8 @@ do { \ |
4275 | if (!ret) \ |
4276 | break; \ |
4277 | } \ |
4278 | + if (!ret && (condition)) \ |
4279 | + ret = 1; \ |
4280 | finish_wait(&wq, &__wait); \ |
4281 | } while (0) |
4282 | |
4283 | @@ -233,8 +235,9 @@ do { \ |
4284 | * wake_up() has to be called after changing any variable that could |
4285 | * change the result of the wait condition. |
4286 | * |
4287 | - * The function returns 0 if the @timeout elapsed, and the remaining |
4288 | - * jiffies if the condition evaluated to true before the timeout elapsed. |
4289 | + * The function returns 0 if the @timeout elapsed, or the remaining |
4290 | + * jiffies (at least 1) if the @condition evaluated to %true before |
4291 | + * the @timeout elapsed. |
4292 | */ |
4293 | #define wait_event_timeout(wq, condition, timeout) \ |
4294 | ({ \ |
4295 | @@ -302,6 +305,8 @@ do { \ |
4296 | ret = -ERESTARTSYS; \ |
4297 | break; \ |
4298 | } \ |
4299 | + if (!ret && (condition)) \ |
4300 | + ret = 1; \ |
4301 | finish_wait(&wq, &__wait); \ |
4302 | } while (0) |
4303 | |
4304 | @@ -318,9 +323,10 @@ do { \ |
4305 | * wake_up() has to be called after changing any variable that could |
4306 | * change the result of the wait condition. |
4307 | * |
4308 | - * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it |
4309 | - * was interrupted by a signal, and the remaining jiffies otherwise |
4310 | - * if the condition evaluated to true before the timeout elapsed. |
4311 | + * Returns: |
4312 | + * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by |
4313 | + * a signal, or the remaining jiffies (at least 1) if the @condition |
4314 | + * evaluated to %true before the @timeout elapsed. |
4315 | */ |
4316 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
4317 | ({ \ |
4318 | diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
4319 | index c4af592..f8640f3 100644 |
4320 | --- a/include/target/target_core_base.h |
4321 | +++ b/include/target/target_core_base.h |
4322 | @@ -544,6 +544,7 @@ struct se_session { |
4323 | struct list_head sess_list; |
4324 | struct list_head sess_acl_list; |
4325 | struct list_head sess_cmd_list; |
4326 | + struct list_head sess_wait_list; |
4327 | spinlock_t sess_cmd_lock; |
4328 | struct kref sess_kref; |
4329 | }; |
4330 | diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h |
4331 | index 9dfc120..3ef3fe0 100644 |
4332 | --- a/include/xen/interface/io/netif.h |
4333 | +++ b/include/xen/interface/io/netif.h |
4334 | @@ -13,6 +13,24 @@ |
4335 | #include <xen/interface/grant_table.h> |
4336 | |
4337 | /* |
4338 | + * Older implementation of Xen network frontend / backend has an |
4339 | + * implicit dependency on the MAX_SKB_FRAGS as the maximum number of |
4340 | + * ring slots a skb can use. Netfront / netback may not work as |
4341 | + * expected when frontend and backend have different MAX_SKB_FRAGS. |
4342 | + * |
4343 | + * A better approach is to add mechanism for netfront / netback to |
4344 | + * negotiate this value. However we cannot fix all possible |
4345 | + * frontends, so we need to define a value which states the minimum |
4346 | + * slots backend must support. |
4347 | + * |
4348 | + * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS |
4349 | + * (18), which is proved to work with most frontends. Any new backend |
4350 | + * which doesn't negotiate with frontend should expect frontend to |
4351 | + * send a valid packet using slots up to this value. |
4352 | + */ |
4353 | +#define XEN_NETIF_NR_SLOTS_MIN 18 |
4354 | + |
4355 | +/* |
4356 | * Notifications after enqueuing any type of message should be conditional on |
4357 | * the appropriate req_event or rsp_event field in the shared ring. |
4358 | * If the client sends notification for rx requests then it should specify |
4359 | @@ -47,6 +65,7 @@ |
4360 | #define _XEN_NETTXF_extra_info (3) |
4361 | #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) |
4362 | |
4363 | +#define XEN_NETIF_MAX_TX_SIZE 0xFFFF |
4364 | struct xen_netif_tx_request { |
4365 | grant_ref_t gref; /* Reference to buffer page */ |
4366 | uint16_t offset; /* Offset within buffer page */ |
4367 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c |
4368 | index ba1f977..a48de6a 100644 |
4369 | --- a/kernel/cgroup.c |
4370 | +++ b/kernel/cgroup.c |
4371 | @@ -2747,13 +2747,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, |
4372 | goto out; |
4373 | } |
4374 | |
4375 | + cfe->type = (void *)cft; |
4376 | + cfe->dentry = dentry; |
4377 | + dentry->d_fsdata = cfe; |
4378 | + simple_xattrs_init(&cfe->xattrs); |
4379 | + |
4380 | mode = cgroup_file_mode(cft); |
4381 | error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); |
4382 | if (!error) { |
4383 | - cfe->type = (void *)cft; |
4384 | - cfe->dentry = dentry; |
4385 | - dentry->d_fsdata = cfe; |
4386 | - simple_xattrs_init(&cfe->xattrs); |
4387 | list_add_tail(&cfe->node, &parent->files); |
4388 | cfe = NULL; |
4389 | } |
4390 | @@ -2999,11 +3000,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, |
4391 | WARN_ON_ONCE(!rcu_read_lock_held()); |
4392 | |
4393 | /* if first iteration, pretend we just visited @cgroup */ |
4394 | - if (!pos) { |
4395 | - if (list_empty(&cgroup->children)) |
4396 | - return NULL; |
4397 | + if (!pos) |
4398 | pos = cgroup; |
4399 | - } |
4400 | |
4401 | /* visit the first child if exists */ |
4402 | next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling); |
4403 | @@ -3011,14 +3009,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, |
4404 | return next; |
4405 | |
4406 | /* no child, visit my or the closest ancestor's next sibling */ |
4407 | - do { |
4408 | + while (pos != cgroup) { |
4409 | next = list_entry_rcu(pos->sibling.next, struct cgroup, |
4410 | sibling); |
4411 | if (&next->sibling != &pos->parent->children) |
4412 | return next; |
4413 | |
4414 | pos = pos->parent; |
4415 | - } while (pos != cgroup); |
4416 | + } |
4417 | |
4418 | return NULL; |
4419 | } |
4420 | diff --git a/kernel/module.c b/kernel/module.c |
4421 | index 0925c9a..97f202c 100644 |
4422 | --- a/kernel/module.c |
4423 | +++ b/kernel/module.c |
4424 | @@ -1861,12 +1861,12 @@ static void free_module(struct module *mod) |
4425 | { |
4426 | trace_module_free(mod); |
4427 | |
4428 | - /* Delete from various lists */ |
4429 | - mutex_lock(&module_mutex); |
4430 | - stop_machine(__unlink_module, mod, NULL); |
4431 | - mutex_unlock(&module_mutex); |
4432 | mod_sysfs_teardown(mod); |
4433 | |
4434 | + /* We leave it in list to prevent duplicate loads, but make sure |
4435 | + * that noone uses it while it's being deconstructed. */ |
4436 | + mod->state = MODULE_STATE_UNFORMED; |
4437 | + |
4438 | /* Remove dynamic debug info */ |
4439 | ddebug_remove_module(mod->name); |
4440 | |
4441 | @@ -1879,6 +1879,11 @@ static void free_module(struct module *mod) |
4442 | /* Free any allocated parameters. */ |
4443 | destroy_params(mod->kp, mod->num_kp); |
4444 | |
4445 | + /* Now we can delete it from the lists */ |
4446 | + mutex_lock(&module_mutex); |
4447 | + stop_machine(__unlink_module, mod, NULL); |
4448 | + mutex_unlock(&module_mutex); |
4449 | + |
4450 | /* This may be NULL, but that's OK */ |
4451 | unset_module_init_ro_nx(mod); |
4452 | module_free(mod, mod->module_init); |
4453 | diff --git a/kernel/range.c b/kernel/range.c |
4454 | index 9b8ae2d..98883ed 100644 |
4455 | --- a/kernel/range.c |
4456 | +++ b/kernel/range.c |
4457 | @@ -48,9 +48,11 @@ int add_range_with_merge(struct range *range, int az, int nr_range, |
4458 | final_start = min(range[i].start, start); |
4459 | final_end = max(range[i].end, end); |
4460 | |
4461 | - range[i].start = final_start; |
4462 | - range[i].end = final_end; |
4463 | - return nr_range; |
4464 | + /* clear it and add it back for further merge */ |
4465 | + range[i].start = 0; |
4466 | + range[i].end = 0; |
4467 | + return add_range_with_merge(range, az, nr_range, |
4468 | + final_start, final_end); |
4469 | } |
4470 | |
4471 | /* Need to add it: */ |
4472 | diff --git a/lib/klist.c b/lib/klist.c |
4473 | index 0874e41..358a368 100644 |
4474 | --- a/lib/klist.c |
4475 | +++ b/lib/klist.c |
4476 | @@ -193,10 +193,10 @@ static void klist_release(struct kref *kref) |
4477 | if (waiter->node != n) |
4478 | continue; |
4479 | |
4480 | + list_del(&waiter->list); |
4481 | waiter->woken = 1; |
4482 | mb(); |
4483 | wake_up_process(waiter->process); |
4484 | - list_del(&waiter->list); |
4485 | } |
4486 | spin_unlock(&klist_remove_lock); |
4487 | knode_set_klist(n, NULL); |
4488 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
4489 | index e2f7f5aa..a4510d4 100644 |
4490 | --- a/mm/huge_memory.c |
4491 | +++ b/mm/huge_memory.c |
4492 | @@ -2318,7 +2318,12 @@ static void collapse_huge_page(struct mm_struct *mm, |
4493 | pte_unmap(pte); |
4494 | spin_lock(&mm->page_table_lock); |
4495 | BUG_ON(!pmd_none(*pmd)); |
4496 | - set_pmd_at(mm, address, pmd, _pmd); |
4497 | + /* |
4498 | + * We can only use set_pmd_at when establishing |
4499 | + * hugepmds and never for establishing regular pmds that |
4500 | + * points to regular pagetables. Use pmd_populate for that |
4501 | + */ |
4502 | + pmd_populate(mm, pmd, pmd_pgtable(_pmd)); |
4503 | spin_unlock(&mm->page_table_lock); |
4504 | anon_vma_unlock_write(vma->anon_vma); |
4505 | goto out; |
4506 | diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
4507 | index 2b55222..9630d58 100644 |
4508 | --- a/mm/memcontrol.c |
4509 | +++ b/mm/memcontrol.c |
4510 | @@ -3991,8 +3991,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, |
4511 | if (mem_cgroup_disabled()) |
4512 | return NULL; |
4513 | |
4514 | - VM_BUG_ON(PageSwapCache(page)); |
4515 | - |
4516 | if (PageTransHuge(page)) { |
4517 | nr_pages <<= compound_order(page); |
4518 | VM_BUG_ON(!PageTransHuge(page)); |
4519 | @@ -4088,6 +4086,18 @@ void mem_cgroup_uncharge_page(struct page *page) |
4520 | if (page_mapped(page)) |
4521 | return; |
4522 | VM_BUG_ON(page->mapping && !PageAnon(page)); |
4523 | + /* |
4524 | + * If the page is in swap cache, uncharge should be deferred |
4525 | + * to the swap path, which also properly accounts swap usage |
4526 | + * and handles memcg lifetime. |
4527 | + * |
4528 | + * Note that this check is not stable and reclaim may add the |
4529 | + * page to swap cache at any time after this. However, if the |
4530 | + * page is not in swap cache by the time page->mapcount hits |
4531 | + * 0, there won't be any page table references to the swap |
4532 | + * slot, and reclaim will free it and not actually write the |
4533 | + * page to disk. |
4534 | + */ |
4535 | if (PageSwapCache(page)) |
4536 | return; |
4537 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); |
4538 | diff --git a/mm/migrate.c b/mm/migrate.c |
4539 | index 3bbaf5d..22ed5c1 100644 |
4540 | --- a/mm/migrate.c |
4541 | +++ b/mm/migrate.c |
4542 | @@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, |
4543 | pte = arch_make_huge_pte(pte, vma, new, 0); |
4544 | } |
4545 | #endif |
4546 | - flush_cache_page(vma, addr, pte_pfn(pte)); |
4547 | + flush_dcache_page(new); |
4548 | set_pte_at(mm, addr, ptep, pte); |
4549 | |
4550 | if (PageHuge(new)) { |
4551 | diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c |
4552 | index be04122..6725ff1 100644 |
4553 | --- a/mm/mmu_notifier.c |
4554 | +++ b/mm/mmu_notifier.c |
4555 | @@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm) |
4556 | int id; |
4557 | |
4558 | /* |
4559 | - * srcu_read_lock() here will block synchronize_srcu() in |
4560 | - * mmu_notifier_unregister() until all registered |
4561 | - * ->release() callouts this function makes have |
4562 | - * returned. |
4563 | + * SRCU here will block mmu_notifier_unregister until |
4564 | + * ->release returns. |
4565 | */ |
4566 | id = srcu_read_lock(&srcu); |
4567 | + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) |
4568 | + /* |
4569 | + * If ->release runs before mmu_notifier_unregister it must be |
4570 | + * handled, as it's the only way for the driver to flush all |
4571 | + * existing sptes and stop the driver from establishing any more |
4572 | + * sptes before all the pages in the mm are freed. |
4573 | + */ |
4574 | + if (mn->ops->release) |
4575 | + mn->ops->release(mn, mm); |
4576 | + srcu_read_unlock(&srcu, id); |
4577 | + |
4578 | spin_lock(&mm->mmu_notifier_mm->lock); |
4579 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
4580 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
4581 | struct mmu_notifier, |
4582 | hlist); |
4583 | - |
4584 | /* |
4585 | - * Unlink. This will prevent mmu_notifier_unregister() |
4586 | - * from also making the ->release() callout. |
4587 | + * We arrived before mmu_notifier_unregister so |
4588 | + * mmu_notifier_unregister will do nothing other than to wait |
4589 | + * for ->release to finish and for mmu_notifier_unregister to |
4590 | + * return. |
4591 | */ |
4592 | hlist_del_init_rcu(&mn->hlist); |
4593 | - spin_unlock(&mm->mmu_notifier_mm->lock); |
4594 | - |
4595 | - /* |
4596 | - * Clear sptes. (see 'release' description in mmu_notifier.h) |
4597 | - */ |
4598 | - if (mn->ops->release) |
4599 | - mn->ops->release(mn, mm); |
4600 | - |
4601 | - spin_lock(&mm->mmu_notifier_mm->lock); |
4602 | } |
4603 | spin_unlock(&mm->mmu_notifier_mm->lock); |
4604 | |
4605 | /* |
4606 | - * All callouts to ->release() which we have done are complete. |
4607 | - * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
4608 | - */ |
4609 | - srcu_read_unlock(&srcu, id); |
4610 | - |
4611 | - /* |
4612 | - * mmu_notifier_unregister() may have unlinked a notifier and may |
4613 | - * still be calling out to it. Additionally, other notifiers |
4614 | - * may have been active via vmtruncate() et. al. Block here |
4615 | - * to ensure that all notifier callouts for this mm have been |
4616 | - * completed and the sptes are really cleaned up before returning |
4617 | - * to exit_mmap(). |
4618 | + * synchronize_srcu here prevents mmu_notifier_release from returning to |
4619 | + * exit_mmap (which would proceed with freeing all pages in the mm) |
4620 | + * until the ->release method returns, if it was invoked by |
4621 | + * mmu_notifier_unregister. |
4622 | + * |
4623 | + * The mmu_notifier_mm can't go away from under us because one mm_count |
4624 | + * is held by exit_mmap. |
4625 | */ |
4626 | synchronize_srcu(&srcu); |
4627 | } |
4628 | @@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
4629 | { |
4630 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
4631 | |
4632 | - spin_lock(&mm->mmu_notifier_mm->lock); |
4633 | if (!hlist_unhashed(&mn->hlist)) { |
4634 | + /* |
4635 | + * SRCU here will force exit_mmap to wait for ->release to |
4636 | + * finish before freeing the pages. |
4637 | + */ |
4638 | int id; |
4639 | |
4640 | + id = srcu_read_lock(&srcu); |
4641 | /* |
4642 | - * Ensure we synchronize up with __mmu_notifier_release(). |
4643 | + * exit_mmap will block in mmu_notifier_release to guarantee |
4644 | + * that ->release is called before freeing the pages. |
4645 | */ |
4646 | - id = srcu_read_lock(&srcu); |
4647 | - |
4648 | - hlist_del_rcu(&mn->hlist); |
4649 | - spin_unlock(&mm->mmu_notifier_mm->lock); |
4650 | - |
4651 | if (mn->ops->release) |
4652 | mn->ops->release(mn, mm); |
4653 | + srcu_read_unlock(&srcu, id); |
4654 | |
4655 | + spin_lock(&mm->mmu_notifier_mm->lock); |
4656 | /* |
4657 | - * Allow __mmu_notifier_release() to complete. |
4658 | + * Can not use list_del_rcu() since __mmu_notifier_release |
4659 | + * can delete it before we hold the lock. |
4660 | */ |
4661 | - srcu_read_unlock(&srcu, id); |
4662 | - } else |
4663 | + hlist_del_init_rcu(&mn->hlist); |
4664 | spin_unlock(&mm->mmu_notifier_mm->lock); |
4665 | + } |
4666 | |
4667 | /* |
4668 | - * Wait for any running method to finish, including ->release() if it |
4669 | - * was run by __mmu_notifier_release() instead of us. |
4670 | + * Wait for any running method to finish, of course including |
4671 | + * ->release if it was run by mmu_notifier_relase instead of us. |
4672 | */ |
4673 | synchronize_srcu(&srcu); |
4674 | |
4675 | diff --git a/mm/pagewalk.c b/mm/pagewalk.c |
4676 | index 35aa294..5da2cbc 100644 |
4677 | --- a/mm/pagewalk.c |
4678 | +++ b/mm/pagewalk.c |
4679 | @@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, |
4680 | return 0; |
4681 | } |
4682 | |
4683 | -static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) |
4684 | -{ |
4685 | - struct vm_area_struct *vma; |
4686 | - |
4687 | - /* We don't need vma lookup at all. */ |
4688 | - if (!walk->hugetlb_entry) |
4689 | - return NULL; |
4690 | - |
4691 | - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); |
4692 | - vma = find_vma(walk->mm, addr); |
4693 | - if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) |
4694 | - return vma; |
4695 | - |
4696 | - return NULL; |
4697 | -} |
4698 | - |
4699 | #else /* CONFIG_HUGETLB_PAGE */ |
4700 | -static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) |
4701 | -{ |
4702 | - return NULL; |
4703 | -} |
4704 | - |
4705 | static int walk_hugetlb_range(struct vm_area_struct *vma, |
4706 | unsigned long addr, unsigned long end, |
4707 | struct mm_walk *walk) |
4708 | @@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end, |
4709 | if (!walk->mm) |
4710 | return -EINVAL; |
4711 | |
4712 | + VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); |
4713 | + |
4714 | pgd = pgd_offset(walk->mm, addr); |
4715 | do { |
4716 | - struct vm_area_struct *vma; |
4717 | + struct vm_area_struct *vma = NULL; |
4718 | |
4719 | next = pgd_addr_end(addr, end); |
4720 | |
4721 | /* |
4722 | - * handle hugetlb vma individually because pagetable walk for |
4723 | - * the hugetlb page is dependent on the architecture and |
4724 | - * we can't handled it in the same manner as non-huge pages. |
4725 | + * This function was not intended to be vma based. |
4726 | + * But there are vma special cases to be handled: |
4727 | + * - hugetlb vma's |
4728 | + * - VM_PFNMAP vma's |
4729 | */ |
4730 | - vma = hugetlb_vma(addr, walk); |
4731 | + vma = find_vma(walk->mm, addr); |
4732 | if (vma) { |
4733 | - if (vma->vm_end < next) |
4734 | + /* |
4735 | + * There are no page structures backing a VM_PFNMAP |
4736 | + * range, so do not allow split_huge_page_pmd(). |
4737 | + */ |
4738 | + if ((vma->vm_start <= addr) && |
4739 | + (vma->vm_flags & VM_PFNMAP)) { |
4740 | next = vma->vm_end; |
4741 | + pgd = pgd_offset(walk->mm, next); |
4742 | + continue; |
4743 | + } |
4744 | /* |
4745 | - * Hugepage is very tightly coupled with vma, so |
4746 | - * walk through hugetlb entries within a given vma. |
4747 | + * Handle hugetlb vma individually because pagetable |
4748 | + * walk for the hugetlb page is dependent on the |
4749 | + * architecture and we can't handled it in the same |
4750 | + * manner as non-huge pages. |
4751 | */ |
4752 | - err = walk_hugetlb_range(vma, addr, next, walk); |
4753 | - if (err) |
4754 | - break; |
4755 | - pgd = pgd_offset(walk->mm, next); |
4756 | - continue; |
4757 | + if (walk->hugetlb_entry && (vma->vm_start <= addr) && |
4758 | + is_vm_hugetlb_page(vma)) { |
4759 | + if (vma->vm_end < next) |
4760 | + next = vma->vm_end; |
4761 | + /* |
4762 | + * Hugepage is very tightly coupled with vma, |
4763 | + * so walk through hugetlb entries within a |
4764 | + * given vma. |
4765 | + */ |
4766 | + err = walk_hugetlb_range(vma, addr, next, walk); |
4767 | + if (err) |
4768 | + break; |
4769 | + pgd = pgd_offset(walk->mm, next); |
4770 | + continue; |
4771 | + } |
4772 | } |
4773 | |
4774 | if (pgd_none_or_clear_bad(pgd)) { |
4775 | diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c |
4776 | index d51ca9d..9cbebc2 100644 |
4777 | --- a/net/mac80211/iface.c |
4778 | +++ b/net/mac80211/iface.c |
4779 | @@ -1649,6 +1649,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) |
4780 | |
4781 | ASSERT_RTNL(); |
4782 | |
4783 | + /* |
4784 | + * Close all AP_VLAN interfaces first, as otherwise they |
4785 | + * might be closed while the AP interface they belong to |
4786 | + * is closed, causing unregister_netdevice_many() to crash. |
4787 | + */ |
4788 | + list_for_each_entry(sdata, &local->interfaces, list) |
4789 | + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
4790 | + dev_close(sdata->dev); |
4791 | + |
4792 | mutex_lock(&local->iflist_mtx); |
4793 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { |
4794 | list_del(&sdata->list); |
4795 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
4796 | index 346ad4c..0a60f40 100644 |
4797 | --- a/net/mac80211/mlme.c |
4798 | +++ b/net/mac80211/mlme.c |
4799 | @@ -3182,10 +3182,6 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) |
4800 | if (WARN_ON_ONCE(!auth_data)) |
4801 | return -EINVAL; |
4802 | |
4803 | - if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) |
4804 | - tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | |
4805 | - IEEE80211_TX_INTFL_MLME_CONN_TX; |
4806 | - |
4807 | auth_data->tries++; |
4808 | |
4809 | if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { |
4810 | @@ -3219,6 +3215,10 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) |
4811 | auth_data->expected_transaction = trans; |
4812 | } |
4813 | |
4814 | + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) |
4815 | + tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | |
4816 | + IEEE80211_TX_INTFL_MLME_CONN_TX; |
4817 | + |
4818 | ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, |
4819 | auth_data->data, auth_data->data_len, |
4820 | auth_data->bss->bssid, |
4821 | @@ -3242,12 +3242,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) |
4822 | * will not answer to direct packet in unassociated state. |
4823 | */ |
4824 | ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], |
4825 | - NULL, 0, (u32) -1, true, tx_flags, |
4826 | + NULL, 0, (u32) -1, true, 0, |
4827 | auth_data->bss->channel, false); |
4828 | rcu_read_unlock(); |
4829 | } |
4830 | |
4831 | - if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { |
4832 | + if (tx_flags == 0) { |
4833 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
4834 | ifmgd->auth_data->timeout_started = true; |
4835 | run_again(ifmgd, auth_data->timeout); |
4836 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
4837 | index c6844ad..bb0b457 100644 |
4838 | --- a/net/mac80211/rx.c |
4839 | +++ b/net/mac80211/rx.c |
4840 | @@ -3032,6 +3032,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, |
4841 | * and location updates. Note that mac80211 |
4842 | * itself never looks at these frames. |
4843 | */ |
4844 | + if (!multicast && |
4845 | + !ether_addr_equal(sdata->vif.addr, hdr->addr1)) |
4846 | + return 0; |
4847 | if (ieee80211_is_public_action(hdr, skb->len)) |
4848 | return 1; |
4849 | if (!ieee80211_is_beacon(hdr->frame_control)) |
4850 | diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c |
4851 | index 3ed801d..124b1fd 100644 |
4852 | --- a/net/mac80211/tkip.c |
4853 | +++ b/net/mac80211/tkip.c |
4854 | @@ -208,10 +208,10 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, |
4855 | u32 iv32 = get_unaligned_le32(&data[4]); |
4856 | u16 iv16 = data[2] | (data[0] << 8); |
4857 | |
4858 | - spin_lock_bh(&key->u.tkip.txlock); |
4859 | + spin_lock(&key->u.tkip.txlock); |
4860 | ieee80211_compute_tkip_p1k(key, iv32); |
4861 | tkip_mixing_phase2(tk, ctx, iv16, p2k); |
4862 | - spin_unlock_bh(&key->u.tkip.txlock); |
4863 | + spin_unlock(&key->u.tkip.txlock); |
4864 | } |
4865 | EXPORT_SYMBOL(ieee80211_get_tkip_p2k); |
4866 | |
4867 | diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c |
4868 | index f8529fc..5356b12 100644 |
4869 | --- a/net/sunrpc/sched.c |
4870 | +++ b/net/sunrpc/sched.c |
4871 | @@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
4872 | * Note: If the task is ASYNC, and is being made runnable after sitting on an |
4873 | * rpc_wait_queue, this must be called with the queue spinlock held to protect |
4874 | * the wait queue operation. |
4875 | + * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), |
4876 | + * which is needed to ensure that __rpc_execute() doesn't loop (due to the |
4877 | + * lockless RPC_IS_QUEUED() test) before we've had a chance to test |
4878 | + * the RPC_TASK_RUNNING flag. |
4879 | */ |
4880 | static void rpc_make_runnable(struct rpc_task *task) |
4881 | { |
4882 | + bool need_wakeup = !rpc_test_and_set_running(task); |
4883 | + |
4884 | rpc_clear_queued(task); |
4885 | - if (rpc_test_and_set_running(task)) |
4886 | + if (!need_wakeup) |
4887 | return; |
4888 | if (RPC_IS_ASYNC(task)) { |
4889 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
4890 | diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c |
4891 | index c3f9e1e..06bdf5a 100644 |
4892 | --- a/net/sunrpc/svcauth_unix.c |
4893 | +++ b/net/sunrpc/svcauth_unix.c |
4894 | @@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) |
4895 | goto badcred; |
4896 | argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ |
4897 | argv->iov_len -= slen*4; |
4898 | - |
4899 | + /* |
4900 | + * Note: we skip uid_valid()/gid_valid() checks here for |
4901 | + * backwards compatibility with clients that use -1 id's. |
4902 | + * Instead, -1 uid or gid is later mapped to the |
4903 | + * (export-specific) anonymous id by nfsd_setuser. |
4904 | + * Supplementary gid's will be left alone. |
4905 | + */ |
4906 | cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ |
4907 | cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ |
4908 | - if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid)) |
4909 | - goto badcred; |
4910 | slen = svc_getnl(argv); /* gids length */ |
4911 | if (slen > 16 || (len -= (slen + 2)*4) < 0) |
4912 | goto badcred; |
4913 | @@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) |
4914 | return SVC_CLOSE; |
4915 | for (i = 0; i < slen; i++) { |
4916 | kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); |
4917 | - if (!gid_valid(kgid)) |
4918 | - goto badcred; |
4919 | GROUP_AT(cred->cr_group_info, i) = kgid; |
4920 | } |
4921 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { |
4922 | diff --git a/net/wireless/core.c b/net/wireless/core.c |
4923 | index 6ddf74f..ed56e2b 100644 |
4924 | --- a/net/wireless/core.c |
4925 | +++ b/net/wireless/core.c |
4926 | @@ -638,17 +638,21 @@ int wiphy_register(struct wiphy *wiphy) |
4927 | * cfg80211_mutex lock |
4928 | */ |
4929 | res = rfkill_register(rdev->rfkill); |
4930 | - if (res) |
4931 | - goto out_rm_dev; |
4932 | + if (res) { |
4933 | + device_del(&rdev->wiphy.dev); |
4934 | + |
4935 | + mutex_lock(&cfg80211_mutex); |
4936 | + debugfs_remove_recursive(rdev->wiphy.debugfsdir); |
4937 | + list_del_rcu(&rdev->list); |
4938 | + wiphy_regulatory_deregister(wiphy); |
4939 | + mutex_unlock(&cfg80211_mutex); |
4940 | + return res; |
4941 | + } |
4942 | |
4943 | rtnl_lock(); |
4944 | rdev->wiphy.registered = true; |
4945 | rtnl_unlock(); |
4946 | return 0; |
4947 | - |
4948 | -out_rm_dev: |
4949 | - device_del(&rdev->wiphy.dev); |
4950 | - return res; |
4951 | } |
4952 | EXPORT_SYMBOL(wiphy_register); |
4953 | |
4954 | diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c |
4955 | index 58e13a8..34ef522 100644 |
4956 | --- a/net/wireless/nl80211.c |
4957 | +++ b/net/wireless/nl80211.c |
4958 | @@ -7177,6 +7177,8 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg, |
4959 | &tcp->payload_tok)) |
4960 | return -ENOBUFS; |
4961 | |
4962 | + nla_nest_end(msg, nl_tcp); |
4963 | + |
4964 | return 0; |
4965 | } |
4966 | |
4967 | diff --git a/net/wireless/sme.c b/net/wireless/sme.c |
4968 | index 482c70e..5b2d0a0 100644 |
4969 | --- a/net/wireless/sme.c |
4970 | +++ b/net/wireless/sme.c |
4971 | @@ -227,6 +227,9 @@ void cfg80211_conn_work(struct work_struct *work) |
4972 | mutex_lock(&rdev->sched_scan_mtx); |
4973 | |
4974 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
4975 | + if (!wdev->netdev) |
4976 | + continue; |
4977 | + |
4978 | wdev_lock(wdev); |
4979 | if (!netif_running(wdev->netdev)) { |
4980 | wdev_unlock(wdev); |
4981 | diff --git a/net/wireless/trace.h b/net/wireless/trace.h |
4982 | index 7586de7..3cdf17c 100644 |
4983 | --- a/net/wireless/trace.h |
4984 | +++ b/net/wireless/trace.h |
4985 | @@ -2386,6 +2386,7 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup, |
4986 | TP_STRUCT__entry( |
4987 | WIPHY_ENTRY |
4988 | WDEV_ENTRY |
4989 | + __field(bool, non_wireless) |
4990 | __field(bool, disconnect) |
4991 | __field(bool, magic_pkt) |
4992 | __field(bool, gtk_rekey_failure) |
4993 | @@ -2394,20 +2395,22 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup, |
4994 | __field(bool, rfkill_release) |
4995 | __field(s32, pattern_idx) |
4996 | __field(u32, packet_len) |
4997 | - __dynamic_array(u8, packet, wakeup->packet_present_len) |
4998 | + __dynamic_array(u8, packet, |
4999 | + wakeup ? wakeup->packet_present_len : 0) |
5000 | ), |
5001 | TP_fast_assign( |
5002 | WIPHY_ASSIGN; |
5003 | WDEV_ASSIGN; |
5004 | - __entry->disconnect = wakeup->disconnect; |
5005 | - __entry->magic_pkt = wakeup->magic_pkt; |
5006 | - __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure; |
5007 | - __entry->eap_identity_req = wakeup->eap_identity_req; |
5008 | - __entry->four_way_handshake = wakeup->four_way_handshake; |
5009 | - __entry->rfkill_release = wakeup->rfkill_release; |
5010 | - __entry->pattern_idx = wakeup->pattern_idx; |
5011 | - __entry->packet_len = wakeup->packet_len; |
5012 | - if (wakeup->packet && wakeup->packet_present_len) |
5013 | + __entry->non_wireless = !wakeup; |
5014 | + __entry->disconnect = wakeup ? wakeup->disconnect : false; |
5015 | + __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false; |
5016 | + __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false; |
5017 | + __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false; |
5018 | + __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false; |
5019 | + __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false; |
5020 | + __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false; |
5021 | + __entry->packet_len = wakeup ? wakeup->packet_len : false; |
5022 | + if (wakeup && wakeup->packet && wakeup->packet_present_len) |
5023 | memcpy(__get_dynamic_array(packet), wakeup->packet, |
5024 | wakeup->packet_present_len); |
5025 | ), |
5026 | diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c |
5027 | index 0f6f481..c92a056 100644 |
5028 | --- a/sound/soc/codecs/cs42l52.c |
5029 | +++ b/sound/soc/codecs/cs42l52.c |
5030 | @@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = { |
5031 | { CS42L52_BEEP_VOL, 0x00 }, /* r1D Beep Volume off Time */ |
5032 | { CS42L52_BEEP_TONE_CTL, 0x00 }, /* r1E Beep Tone Cfg. */ |
5033 | { CS42L52_TONE_CTL, 0x00 }, /* r1F Tone Ctl */ |
5034 | - { CS42L52_MASTERA_VOL, 0x88 }, /* r20 Master A Volume */ |
5035 | + { CS42L52_MASTERA_VOL, 0x00 }, /* r20 Master A Volume */ |
5036 | { CS42L52_MASTERB_VOL, 0x00 }, /* r21 Master B Volume */ |
5037 | { CS42L52_HPA_VOL, 0x00 }, /* r22 Headphone A Volume */ |
5038 | { CS42L52_HPB_VOL, 0x00 }, /* r23 Headphone B Volume */ |
5039 | diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c |
5040 | index cdeb301..eaeab83 100644 |
5041 | --- a/sound/soc/codecs/wm5110.c |
5042 | +++ b/sound/soc/codecs/wm5110.c |
5043 | @@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE), |
5044 | ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE), |
5045 | ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE), |
5046 | ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE), |
5047 | -ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), |
5048 | +ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), |
5049 | |
5050 | ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), |
5051 | ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), |
5052 | diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c |
5053 | index 9321e5c..4eee59d 100644 |
5054 | --- a/sound/soc/davinci/davinci-mcasp.c |
5055 | +++ b/sound/soc/davinci/davinci-mcasp.c |
5056 | @@ -626,7 +626,8 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev, |
5057 | int word_length) |
5058 | { |
5059 | u32 fmt; |
5060 | - u32 rotate = (word_length / 4) & 0x7; |
5061 | + u32 tx_rotate = (word_length / 4) & 0x7; |
5062 | + u32 rx_rotate = (32 - word_length) / 4; |
5063 | u32 mask = (1ULL << word_length) - 1; |
5064 | |
5065 | /* |
5066 | @@ -647,9 +648,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev, |
5067 | RXSSZ(fmt), RXSSZ(0x0F)); |
5068 | mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, |
5069 | TXSSZ(fmt), TXSSZ(0x0F)); |
5070 | - mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate), |
5071 | + mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate), |
5072 | TXROT(7)); |
5073 | - mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate), |
5074 | + mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate), |
5075 | RXROT(7)); |
5076 | mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask); |
5077 | mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask); |
5078 | diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py |
5079 | index a4ffc95..4c11605 100755 |
5080 | --- a/tools/perf/scripts/python/net_dropmonitor.py |
5081 | +++ b/tools/perf/scripts/python/net_dropmonitor.py |
5082 | @@ -40,9 +40,9 @@ def get_kallsyms_table(): |
5083 | |
5084 | def get_sym(sloc): |
5085 | loc = int(sloc) |
5086 | - for i in kallsyms: |
5087 | - if (i['loc'] >= loc): |
5088 | - return (i['name'], i['loc']-loc) |
5089 | + for i in kallsyms[::-1]: |
5090 | + if loc >= i['loc']: |
5091 | + return (i['name'], loc - i['loc']) |
5092 | return (None, 0) |
5093 | |
5094 | def print_drop_table(): |
5095 | @@ -64,7 +64,7 @@ def trace_end(): |
5096 | |
5097 | # called from perf, when it finds a correspoinding event |
5098 | def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, |
5099 | - skbaddr, protocol, location): |
5100 | + skbaddr, location, protocol): |
5101 | slocation = str(location) |
5102 | try: |
5103 | drop_log[slocation] = drop_log[slocation] + 1 |