Annotation of /trunk/kernel-lts/patches-3.4/0149-3.4.50-all-fixes.patch
Parent Directory | Revision Log
Revision 2225 -
(hide annotations)
(download)
Mon Jul 1 09:56:21 2013 UTC (11 years, 2 months ago) by niro
File size: 51192 byte(s)
Mon Jul 1 09:56:21 2013 UTC (11 years, 2 months ago) by niro
File size: 51192 byte(s)
-linux-3.4.50
1 | niro | 2225 | diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h |
2 | index d58fc4e..6afb13a 100644 | ||
3 | --- a/arch/powerpc/include/asm/exception-64s.h | ||
4 | +++ b/arch/powerpc/include/asm/exception-64s.h | ||
5 | @@ -320,7 +320,7 @@ label##_common: \ | ||
6 | */ | ||
7 | #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ | ||
8 | EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ | ||
9 | - FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) | ||
10 | + FINISH_NAP;DISABLE_INTS;RUNLATCH_ON) | ||
11 | |||
12 | /* | ||
13 | * When the idle code in power4_idle puts the CPU into NAP mode, | ||
14 | diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c | ||
15 | index d7ebc58..071592b 100644 | ||
16 | --- a/arch/powerpc/kernel/irq.c | ||
17 | +++ b/arch/powerpc/kernel/irq.c | ||
18 | @@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void) | ||
19 | * in case we also had a rollover while hard disabled | ||
20 | */ | ||
21 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | ||
22 | - if (decrementer_check_overflow()) | ||
23 | + if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) | ||
24 | return 0x900; | ||
25 | |||
26 | /* Finally check if an external interrupt happened */ | ||
27 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c | ||
28 | index 94178e5..c1aef40 100644 | ||
29 | --- a/arch/powerpc/kernel/process.c | ||
30 | +++ b/arch/powerpc/kernel/process.c | ||
31 | @@ -1218,7 +1218,7 @@ EXPORT_SYMBOL(dump_stack); | ||
32 | |||
33 | #ifdef CONFIG_PPC64 | ||
34 | /* Called with hard IRQs off */ | ||
35 | -void __ppc64_runlatch_on(void) | ||
36 | +void notrace __ppc64_runlatch_on(void) | ||
37 | { | ||
38 | struct thread_info *ti = current_thread_info(); | ||
39 | unsigned long ctrl; | ||
40 | @@ -1231,7 +1231,7 @@ void __ppc64_runlatch_on(void) | ||
41 | } | ||
42 | |||
43 | /* Called with hard IRQs off */ | ||
44 | -void __ppc64_runlatch_off(void) | ||
45 | +void notrace __ppc64_runlatch_off(void) | ||
46 | { | ||
47 | struct thread_info *ti = current_thread_info(); | ||
48 | unsigned long ctrl; | ||
49 | diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S | ||
50 | index 7a6f3b3..f2bb9c9 100644 | ||
51 | --- a/arch/x86/kernel/relocate_kernel_64.S | ||
52 | +++ b/arch/x86/kernel/relocate_kernel_64.S | ||
53 | @@ -160,7 +160,7 @@ identity_mapped: | ||
54 | xorq %rbp, %rbp | ||
55 | xorq %r8, %r8 | ||
56 | xorq %r9, %r9 | ||
57 | - xorq %r10, %r9 | ||
58 | + xorq %r10, %r10 | ||
59 | xorq %r11, %r11 | ||
60 | xorq %r12, %r12 | ||
61 | xorq %r13, %r13 | ||
62 | diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c | ||
63 | index b0f553b..d3446f6 100644 | ||
64 | --- a/drivers/block/cciss.c | ||
65 | +++ b/drivers/block/cciss.c | ||
66 | @@ -161,8 +161,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); | ||
67 | static int cciss_open(struct block_device *bdev, fmode_t mode); | ||
68 | static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); | ||
69 | static int cciss_release(struct gendisk *disk, fmode_t mode); | ||
70 | -static int do_ioctl(struct block_device *bdev, fmode_t mode, | ||
71 | - unsigned int cmd, unsigned long arg); | ||
72 | static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | ||
73 | unsigned int cmd, unsigned long arg); | ||
74 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); | ||
75 | @@ -229,7 +227,7 @@ static const struct block_device_operations cciss_fops = { | ||
76 | .owner = THIS_MODULE, | ||
77 | .open = cciss_unlocked_open, | ||
78 | .release = cciss_release, | ||
79 | - .ioctl = do_ioctl, | ||
80 | + .ioctl = cciss_ioctl, | ||
81 | .getgeo = cciss_getgeo, | ||
82 | #ifdef CONFIG_COMPAT | ||
83 | .compat_ioctl = cciss_compat_ioctl, | ||
84 | @@ -1140,16 +1138,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode) | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | -static int do_ioctl(struct block_device *bdev, fmode_t mode, | ||
89 | - unsigned cmd, unsigned long arg) | ||
90 | -{ | ||
91 | - int ret; | ||
92 | - mutex_lock(&cciss_mutex); | ||
93 | - ret = cciss_ioctl(bdev, mode, cmd, arg); | ||
94 | - mutex_unlock(&cciss_mutex); | ||
95 | - return ret; | ||
96 | -} | ||
97 | - | ||
98 | #ifdef CONFIG_COMPAT | ||
99 | |||
100 | static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, | ||
101 | @@ -1176,7 +1164,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, | ||
102 | case CCISS_REGNEWD: | ||
103 | case CCISS_RESCANDISK: | ||
104 | case CCISS_GETLUNINFO: | ||
105 | - return do_ioctl(bdev, mode, cmd, arg); | ||
106 | + return cciss_ioctl(bdev, mode, cmd, arg); | ||
107 | |||
108 | case CCISS_PASSTHRU32: | ||
109 | return cciss_ioctl32_passthru(bdev, mode, cmd, arg); | ||
110 | @@ -1216,7 +1204,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, | ||
111 | if (err) | ||
112 | return -EFAULT; | ||
113 | |||
114 | - err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); | ||
115 | + err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); | ||
116 | if (err) | ||
117 | return err; | ||
118 | err |= | ||
119 | @@ -1258,7 +1246,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, | ||
120 | if (err) | ||
121 | return -EFAULT; | ||
122 | |||
123 | - err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); | ||
124 | + err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); | ||
125 | if (err) | ||
126 | return err; | ||
127 | err |= | ||
128 | @@ -1308,11 +1296,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) | ||
129 | static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) | ||
130 | { | ||
131 | cciss_coalint_struct intinfo; | ||
132 | + unsigned long flags; | ||
133 | |||
134 | if (!argp) | ||
135 | return -EINVAL; | ||
136 | + spin_lock_irqsave(&h->lock, flags); | ||
137 | intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); | ||
138 | intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); | ||
139 | + spin_unlock_irqrestore(&h->lock, flags); | ||
140 | if (copy_to_user | ||
141 | (argp, &intinfo, sizeof(cciss_coalint_struct))) | ||
142 | return -EFAULT; | ||
143 | @@ -1353,12 +1344,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) | ||
144 | static int cciss_getnodename(ctlr_info_t *h, void __user *argp) | ||
145 | { | ||
146 | NodeName_type NodeName; | ||
147 | + unsigned long flags; | ||
148 | int i; | ||
149 | |||
150 | if (!argp) | ||
151 | return -EINVAL; | ||
152 | + spin_lock_irqsave(&h->lock, flags); | ||
153 | for (i = 0; i < 16; i++) | ||
154 | NodeName[i] = readb(&h->cfgtable->ServerName[i]); | ||
155 | + spin_unlock_irqrestore(&h->lock, flags); | ||
156 | if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) | ||
157 | return -EFAULT; | ||
158 | return 0; | ||
159 | @@ -1395,10 +1389,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp) | ||
160 | static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) | ||
161 | { | ||
162 | Heartbeat_type heartbeat; | ||
163 | + unsigned long flags; | ||
164 | |||
165 | if (!argp) | ||
166 | return -EINVAL; | ||
167 | + spin_lock_irqsave(&h->lock, flags); | ||
168 | heartbeat = readl(&h->cfgtable->HeartBeat); | ||
169 | + spin_unlock_irqrestore(&h->lock, flags); | ||
170 | if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) | ||
171 | return -EFAULT; | ||
172 | return 0; | ||
173 | @@ -1407,10 +1404,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) | ||
174 | static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) | ||
175 | { | ||
176 | BusTypes_type BusTypes; | ||
177 | + unsigned long flags; | ||
178 | |||
179 | if (!argp) | ||
180 | return -EINVAL; | ||
181 | + spin_lock_irqsave(&h->lock, flags); | ||
182 | BusTypes = readl(&h->cfgtable->BusTypes); | ||
183 | + spin_unlock_irqrestore(&h->lock, flags); | ||
184 | if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) | ||
185 | return -EFAULT; | ||
186 | return 0; | ||
187 | diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c | ||
188 | index be84559..1ee297b 100644 | ||
189 | --- a/drivers/gpu/drm/gma500/cdv_intel_display.c | ||
190 | +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c | ||
191 | @@ -1439,6 +1439,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc) | ||
192 | kfree(psb_intel_crtc); | ||
193 | } | ||
194 | |||
195 | +static void cdv_intel_crtc_disable(struct drm_crtc *crtc) | ||
196 | +{ | ||
197 | + struct gtt_range *gt; | ||
198 | + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
199 | + | ||
200 | + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
201 | + | ||
202 | + if (crtc->fb) { | ||
203 | + gt = to_psb_fb(crtc->fb)->gtt; | ||
204 | + psb_gtt_unpin(gt); | ||
205 | + } | ||
206 | +} | ||
207 | + | ||
208 | const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { | ||
209 | .dpms = cdv_intel_crtc_dpms, | ||
210 | .mode_fixup = cdv_intel_crtc_mode_fixup, | ||
211 | @@ -1446,6 +1459,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { | ||
212 | .mode_set_base = cdv_intel_pipe_set_base, | ||
213 | .prepare = cdv_intel_crtc_prepare, | ||
214 | .commit = cdv_intel_crtc_commit, | ||
215 | + .disable = cdv_intel_crtc_disable, | ||
216 | }; | ||
217 | |||
218 | const struct drm_crtc_funcs cdv_intel_crtc_funcs = { | ||
219 | diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c | ||
220 | index 2616558..454a9d8 100644 | ||
221 | --- a/drivers/gpu/drm/gma500/psb_intel_display.c | ||
222 | +++ b/drivers/gpu/drm/gma500/psb_intel_display.c | ||
223 | @@ -1262,6 +1262,19 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc) | ||
224 | kfree(psb_intel_crtc); | ||
225 | } | ||
226 | |||
227 | +static void psb_intel_crtc_disable(struct drm_crtc *crtc) | ||
228 | +{ | ||
229 | + struct gtt_range *gt; | ||
230 | + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | ||
231 | + | ||
232 | + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | ||
233 | + | ||
234 | + if (crtc->fb) { | ||
235 | + gt = to_psb_fb(crtc->fb)->gtt; | ||
236 | + psb_gtt_unpin(gt); | ||
237 | + } | ||
238 | +} | ||
239 | + | ||
240 | const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { | ||
241 | .dpms = psb_intel_crtc_dpms, | ||
242 | .mode_fixup = psb_intel_crtc_mode_fixup, | ||
243 | @@ -1269,6 +1282,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { | ||
244 | .mode_set_base = psb_intel_pipe_set_base, | ||
245 | .prepare = psb_intel_crtc_prepare, | ||
246 | .commit = psb_intel_crtc_commit, | ||
247 | + .disable = psb_intel_crtc_disable, | ||
248 | }; | ||
249 | |||
250 | const struct drm_crtc_funcs psb_intel_crtc_funcs = { | ||
251 | diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c | ||
252 | index aeb9d6e..c0ba260 100644 | ||
253 | --- a/drivers/gpu/drm/i915/intel_sdvo.c | ||
254 | +++ b/drivers/gpu/drm/i915/intel_sdvo.c | ||
255 | @@ -1582,10 +1582,13 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | ||
256 | * arranged in priority order. | ||
257 | */ | ||
258 | intel_ddc_get_modes(connector, &intel_sdvo->ddc); | ||
259 | - if (list_empty(&connector->probed_modes) == false) | ||
260 | - goto end; | ||
261 | |||
262 | - /* Fetch modes from VBT */ | ||
263 | + /* | ||
264 | + * Fetch modes from VBT. For SDVO prefer the VBT mode since some | ||
265 | + * SDVO->LVDS transcoders can't cope with the EDID mode. Since | ||
266 | + * drm_mode_probed_add adds the mode at the head of the list we add it | ||
267 | + * last. | ||
268 | + */ | ||
269 | if (dev_priv->sdvo_lvds_vbt_mode != NULL) { | ||
270 | newmode = drm_mode_duplicate(connector->dev, | ||
271 | dev_priv->sdvo_lvds_vbt_mode); | ||
272 | @@ -1597,7 +1600,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | ||
273 | } | ||
274 | } | ||
275 | |||
276 | -end: | ||
277 | list_for_each_entry(newmode, &connector->probed_modes, head) { | ||
278 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | ||
279 | intel_sdvo->sdvo_lvds_fixed_mode = | ||
280 | diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c | ||
281 | index df44509..b424a20 100644 | ||
282 | --- a/drivers/md/raid1.c | ||
283 | +++ b/drivers/md/raid1.c | ||
284 | @@ -413,7 +413,17 @@ static void raid1_end_write_request(struct bio *bio, int error) | ||
285 | |||
286 | r1_bio->bios[mirror] = NULL; | ||
287 | to_put = bio; | ||
288 | - set_bit(R1BIO_Uptodate, &r1_bio->state); | ||
289 | + /* | ||
290 | + * Do not set R1BIO_Uptodate if the current device is | ||
291 | + * rebuilding or Faulty. This is because we cannot use | ||
292 | + * such device for properly reading the data back (we could | ||
293 | + * potentially use it, if the current write would have felt | ||
294 | + * before rdev->recovery_offset, but for simplicity we don't | ||
295 | + * check this here. | ||
296 | + */ | ||
297 | + if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) && | ||
298 | + !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)) | ||
299 | + set_bit(R1BIO_Uptodate, &r1_bio->state); | ||
300 | |||
301 | /* Maybe we can clear some bad blocks. */ | ||
302 | if (is_badblock(conf->mirrors[mirror].rdev, | ||
303 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c | ||
304 | index 6137d00..0cc7985 100644 | ||
305 | --- a/drivers/md/raid10.c | ||
306 | +++ b/drivers/md/raid10.c | ||
307 | @@ -452,7 +452,17 @@ static void raid10_end_write_request(struct bio *bio, int error) | ||
308 | sector_t first_bad; | ||
309 | int bad_sectors; | ||
310 | |||
311 | - set_bit(R10BIO_Uptodate, &r10_bio->state); | ||
312 | + /* | ||
313 | + * Do not set R10BIO_Uptodate if the current device is | ||
314 | + * rebuilding or Faulty. This is because we cannot use | ||
315 | + * such device for properly reading the data back (we could | ||
316 | + * potentially use it, if the current write would have felt | ||
317 | + * before rdev->recovery_offset, but for simplicity we don't | ||
318 | + * check this here. | ||
319 | + */ | ||
320 | + if (test_bit(In_sync, &rdev->flags) && | ||
321 | + !test_bit(Faulty, &rdev->flags)) | ||
322 | + set_bit(R10BIO_Uptodate, &r10_bio->state); | ||
323 | |||
324 | /* Maybe we can clear some bad blocks. */ | ||
325 | if (is_badblock(rdev, | ||
326 | diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig | ||
327 | index e507e78..3b8c930 100644 | ||
328 | --- a/drivers/net/wireless/ath/ath9k/Kconfig | ||
329 | +++ b/drivers/net/wireless/ath/ath9k/Kconfig | ||
330 | @@ -89,13 +89,17 @@ config ATH9K_MAC_DEBUG | ||
331 | This option enables collection of statistics for Rx/Tx status | ||
332 | data and some other MAC related statistics | ||
333 | |||
334 | -config ATH9K_RATE_CONTROL | ||
335 | +config ATH9K_LEGACY_RATE_CONTROL | ||
336 | bool "Atheros ath9k rate control" | ||
337 | depends on ATH9K | ||
338 | - default y | ||
339 | + default n | ||
340 | ---help--- | ||
341 | Say Y, if you want to use the ath9k specific rate control | ||
342 | - module instead of minstrel_ht. | ||
343 | + module instead of minstrel_ht. Be warned that there are various | ||
344 | + issues with the ath9k RC and minstrel is a more robust algorithm. | ||
345 | + Note that even if this option is selected, "ath9k_rate_control" | ||
346 | + has to be passed to mac80211 using the module parameter, | ||
347 | + ieee80211_default_rc_algo. | ||
348 | |||
349 | config ATH9K_HTC | ||
350 | tristate "Atheros HTC based wireless cards support" | ||
351 | diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile | ||
352 | index 27d95fe..24ae2e6 100644 | ||
353 | --- a/drivers/net/wireless/ath/ath9k/Makefile | ||
354 | +++ b/drivers/net/wireless/ath/ath9k/Makefile | ||
355 | @@ -6,7 +6,7 @@ ath9k-y += beacon.o \ | ||
356 | xmit.o | ||
357 | |||
358 | ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o | ||
359 | -ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o | ||
360 | +ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o | ||
361 | ath9k-$(CONFIG_ATH9K_PCI) += pci.o | ||
362 | ath9k-$(CONFIG_ATH9K_AHB) += ahb.o | ||
363 | ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o | ||
364 | diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c | ||
365 | index cb00645..cac5b25 100644 | ||
366 | --- a/drivers/net/wireless/ath/ath9k/init.c | ||
367 | +++ b/drivers/net/wireless/ath/ath9k/init.c | ||
368 | @@ -671,8 +671,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | ||
369 | BIT(NL80211_IFTYPE_ADHOC) | | ||
370 | BIT(NL80211_IFTYPE_MESH_POINT); | ||
371 | |||
372 | - if (AR_SREV_5416(sc->sc_ah)) | ||
373 | - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | ||
374 | + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | ||
375 | |||
376 | hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; | ||
377 | hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; | ||
378 | @@ -695,10 +694,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | ||
379 | sc->ant_rx = hw->wiphy->available_antennas_rx; | ||
380 | sc->ant_tx = hw->wiphy->available_antennas_tx; | ||
381 | |||
382 | -#ifdef CONFIG_ATH9K_RATE_CONTROL | ||
383 | - hw->rate_control_algorithm = "ath9k_rate_control"; | ||
384 | -#endif | ||
385 | - | ||
386 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) | ||
387 | hw->wiphy->bands[IEEE80211_BAND_2GHZ] = | ||
388 | &sc->sbands[IEEE80211_BAND_2GHZ]; | ||
389 | diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h | ||
390 | index 75f8e9b..12cf122 100644 | ||
391 | --- a/drivers/net/wireless/ath/ath9k/rc.h | ||
392 | +++ b/drivers/net/wireless/ath/ath9k/rc.h | ||
393 | @@ -219,7 +219,7 @@ struct ath_rate_priv { | ||
394 | struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; | ||
395 | }; | ||
396 | |||
397 | -#ifdef CONFIG_ATH9K_RATE_CONTROL | ||
398 | +#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL | ||
399 | int ath_rate_control_register(void); | ||
400 | void ath_rate_control_unregister(void); | ||
401 | #else | ||
402 | diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c | ||
403 | index b54c750..f8c4499 100644 | ||
404 | --- a/drivers/net/wireless/b43/main.c | ||
405 | +++ b/drivers/net/wireless/b43/main.c | ||
406 | @@ -2449,7 +2449,7 @@ static void b43_request_firmware(struct work_struct *work) | ||
407 | for (i = 0; i < B43_NR_FWTYPES; i++) { | ||
408 | errmsg = ctx->errors[i]; | ||
409 | if (strlen(errmsg)) | ||
410 | - b43err(dev->wl, errmsg); | ||
411 | + b43err(dev->wl, "%s", errmsg); | ||
412 | } | ||
413 | b43_print_fw_helptext(dev->wl, 1); | ||
414 | goto out; | ||
415 | diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c | ||
416 | index 63ccc0f..c5ce195 100644 | ||
417 | --- a/drivers/rtc/rtc-twl.c | ||
418 | +++ b/drivers/rtc/rtc-twl.c | ||
419 | @@ -523,6 +523,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) | ||
420 | } | ||
421 | |||
422 | platform_set_drvdata(pdev, rtc); | ||
423 | + device_init_wakeup(&pdev->dev, 1); | ||
424 | return 0; | ||
425 | |||
426 | out2: | ||
427 | diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c | ||
428 | index 80576d05..bb5fb3d 100644 | ||
429 | --- a/fs/ceph/locks.c | ||
430 | +++ b/fs/ceph/locks.c | ||
431 | @@ -191,27 +191,23 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | - * Encode the flock and fcntl locks for the given inode into the pagelist. | ||
436 | - * Format is: #fcntl locks, sequential fcntl locks, #flock locks, | ||
437 | - * sequential flock locks. | ||
438 | - * Must be called with lock_flocks() already held. | ||
439 | - * If we encounter more of a specific lock type than expected, | ||
440 | - * we return the value 1. | ||
441 | + * Encode the flock and fcntl locks for the given inode into the ceph_filelock | ||
442 | + * array. Must be called with lock_flocks() already held. | ||
443 | + * If we encounter more of a specific lock type than expected, return -ENOSPC. | ||
444 | */ | ||
445 | -int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | ||
446 | - int num_fcntl_locks, int num_flock_locks) | ||
447 | +int ceph_encode_locks_to_buffer(struct inode *inode, | ||
448 | + struct ceph_filelock *flocks, | ||
449 | + int num_fcntl_locks, int num_flock_locks) | ||
450 | { | ||
451 | struct file_lock *lock; | ||
452 | - struct ceph_filelock cephlock; | ||
453 | int err = 0; | ||
454 | int seen_fcntl = 0; | ||
455 | int seen_flock = 0; | ||
456 | + int l = 0; | ||
457 | |||
458 | dout("encoding %d flock and %d fcntl locks", num_flock_locks, | ||
459 | num_fcntl_locks); | ||
460 | - err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); | ||
461 | - if (err) | ||
462 | - goto fail; | ||
463 | + | ||
464 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | ||
465 | if (lock->fl_flags & FL_POSIX) { | ||
466 | ++seen_fcntl; | ||
467 | @@ -219,19 +215,12 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | ||
468 | err = -ENOSPC; | ||
469 | goto fail; | ||
470 | } | ||
471 | - err = lock_to_ceph_filelock(lock, &cephlock); | ||
472 | + err = lock_to_ceph_filelock(lock, &flocks[l]); | ||
473 | if (err) | ||
474 | goto fail; | ||
475 | - err = ceph_pagelist_append(pagelist, &cephlock, | ||
476 | - sizeof(struct ceph_filelock)); | ||
477 | + ++l; | ||
478 | } | ||
479 | - if (err) | ||
480 | - goto fail; | ||
481 | } | ||
482 | - | ||
483 | - err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); | ||
484 | - if (err) | ||
485 | - goto fail; | ||
486 | for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { | ||
487 | if (lock->fl_flags & FL_FLOCK) { | ||
488 | ++seen_flock; | ||
489 | @@ -239,19 +228,51 @@ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, | ||
490 | err = -ENOSPC; | ||
491 | goto fail; | ||
492 | } | ||
493 | - err = lock_to_ceph_filelock(lock, &cephlock); | ||
494 | + err = lock_to_ceph_filelock(lock, &flocks[l]); | ||
495 | if (err) | ||
496 | goto fail; | ||
497 | - err = ceph_pagelist_append(pagelist, &cephlock, | ||
498 | - sizeof(struct ceph_filelock)); | ||
499 | + ++l; | ||
500 | } | ||
501 | - if (err) | ||
502 | - goto fail; | ||
503 | } | ||
504 | fail: | ||
505 | return err; | ||
506 | } | ||
507 | |||
508 | +/** | ||
509 | + * Copy the encoded flock and fcntl locks into the pagelist. | ||
510 | + * Format is: #fcntl locks, sequential fcntl locks, #flock locks, | ||
511 | + * sequential flock locks. | ||
512 | + * Returns zero on success. | ||
513 | + */ | ||
514 | +int ceph_locks_to_pagelist(struct ceph_filelock *flocks, | ||
515 | + struct ceph_pagelist *pagelist, | ||
516 | + int num_fcntl_locks, int num_flock_locks) | ||
517 | +{ | ||
518 | + int err = 0; | ||
519 | + __le32 nlocks; | ||
520 | + | ||
521 | + nlocks = cpu_to_le32(num_fcntl_locks); | ||
522 | + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); | ||
523 | + if (err) | ||
524 | + goto out_fail; | ||
525 | + | ||
526 | + err = ceph_pagelist_append(pagelist, flocks, | ||
527 | + num_fcntl_locks * sizeof(*flocks)); | ||
528 | + if (err) | ||
529 | + goto out_fail; | ||
530 | + | ||
531 | + nlocks = cpu_to_le32(num_flock_locks); | ||
532 | + err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); | ||
533 | + if (err) | ||
534 | + goto out_fail; | ||
535 | + | ||
536 | + err = ceph_pagelist_append(pagelist, | ||
537 | + &flocks[num_fcntl_locks], | ||
538 | + num_flock_locks * sizeof(*flocks)); | ||
539 | +out_fail: | ||
540 | + return err; | ||
541 | +} | ||
542 | + | ||
543 | /* | ||
544 | * Given a pointer to a lock, convert it to a ceph filelock | ||
545 | */ | ||
546 | diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c | ||
547 | index 3fd08ad..cf1b9e0 100644 | ||
548 | --- a/fs/ceph/mds_client.c | ||
549 | +++ b/fs/ceph/mds_client.c | ||
550 | @@ -335,9 +335,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s) | ||
551 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | ||
552 | if (atomic_dec_and_test(&s->s_ref)) { | ||
553 | if (s->s_auth.authorizer) | ||
554 | - s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( | ||
555 | - s->s_mdsc->fsc->client->monc.auth, | ||
556 | - s->s_auth.authorizer); | ||
557 | + ceph_auth_destroy_authorizer( | ||
558 | + s->s_mdsc->fsc->client->monc.auth, | ||
559 | + s->s_auth.authorizer); | ||
560 | kfree(s); | ||
561 | } | ||
562 | } | ||
563 | @@ -2455,39 +2455,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, | ||
564 | |||
565 | if (recon_state->flock) { | ||
566 | int num_fcntl_locks, num_flock_locks; | ||
567 | - struct ceph_pagelist_cursor trunc_point; | ||
568 | - | ||
569 | - ceph_pagelist_set_cursor(pagelist, &trunc_point); | ||
570 | - do { | ||
571 | - lock_flocks(); | ||
572 | - ceph_count_locks(inode, &num_fcntl_locks, | ||
573 | - &num_flock_locks); | ||
574 | - rec.v2.flock_len = (2*sizeof(u32) + | ||
575 | - (num_fcntl_locks+num_flock_locks) * | ||
576 | - sizeof(struct ceph_filelock)); | ||
577 | - unlock_flocks(); | ||
578 | - | ||
579 | - /* pre-alloc pagelist */ | ||
580 | - ceph_pagelist_truncate(pagelist, &trunc_point); | ||
581 | - err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
582 | - if (!err) | ||
583 | - err = ceph_pagelist_reserve(pagelist, | ||
584 | - rec.v2.flock_len); | ||
585 | - | ||
586 | - /* encode locks */ | ||
587 | - if (!err) { | ||
588 | - lock_flocks(); | ||
589 | - err = ceph_encode_locks(inode, | ||
590 | - pagelist, | ||
591 | - num_fcntl_locks, | ||
592 | - num_flock_locks); | ||
593 | - unlock_flocks(); | ||
594 | - } | ||
595 | - } while (err == -ENOSPC); | ||
596 | + struct ceph_filelock *flocks; | ||
597 | + | ||
598 | +encode_again: | ||
599 | + lock_flocks(); | ||
600 | + ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); | ||
601 | + unlock_flocks(); | ||
602 | + flocks = kmalloc((num_fcntl_locks+num_flock_locks) * | ||
603 | + sizeof(struct ceph_filelock), GFP_NOFS); | ||
604 | + if (!flocks) { | ||
605 | + err = -ENOMEM; | ||
606 | + goto out_free; | ||
607 | + } | ||
608 | + lock_flocks(); | ||
609 | + err = ceph_encode_locks_to_buffer(inode, flocks, | ||
610 | + num_fcntl_locks, | ||
611 | + num_flock_locks); | ||
612 | + unlock_flocks(); | ||
613 | + if (err) { | ||
614 | + kfree(flocks); | ||
615 | + if (err == -ENOSPC) | ||
616 | + goto encode_again; | ||
617 | + goto out_free; | ||
618 | + } | ||
619 | + /* | ||
620 | + * number of encoded locks is stable, so copy to pagelist | ||
621 | + */ | ||
622 | + rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + | ||
623 | + (num_fcntl_locks+num_flock_locks) * | ||
624 | + sizeof(struct ceph_filelock)); | ||
625 | + err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
626 | + if (!err) | ||
627 | + err = ceph_locks_to_pagelist(flocks, pagelist, | ||
628 | + num_fcntl_locks, | ||
629 | + num_flock_locks); | ||
630 | + kfree(flocks); | ||
631 | } else { | ||
632 | err = ceph_pagelist_append(pagelist, &rec, reclen); | ||
633 | } | ||
634 | - | ||
635 | out_free: | ||
636 | kfree(path); | ||
637 | out_dput: | ||
638 | @@ -3414,13 +3419,17 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | ||
639 | struct ceph_auth_handshake *auth = &s->s_auth; | ||
640 | |||
641 | if (force_new && auth->authorizer) { | ||
642 | - if (ac->ops && ac->ops->destroy_authorizer) | ||
643 | - ac->ops->destroy_authorizer(ac, auth->authorizer); | ||
644 | + ceph_auth_destroy_authorizer(ac, auth->authorizer); | ||
645 | auth->authorizer = NULL; | ||
646 | } | ||
647 | - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { | ||
648 | - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | ||
649 | - auth); | ||
650 | + if (!auth->authorizer) { | ||
651 | + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | ||
652 | + auth); | ||
653 | + if (ret) | ||
654 | + return ERR_PTR(ret); | ||
655 | + } else { | ||
656 | + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | ||
657 | + auth); | ||
658 | if (ret) | ||
659 | return ERR_PTR(ret); | ||
660 | } | ||
661 | @@ -3436,7 +3445,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) | ||
662 | struct ceph_mds_client *mdsc = s->s_mdsc; | ||
663 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; | ||
664 | |||
665 | - return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len); | ||
666 | + return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); | ||
667 | } | ||
668 | |||
669 | static int invalidate_authorizer(struct ceph_connection *con) | ||
670 | @@ -3445,8 +3454,7 @@ static int invalidate_authorizer(struct ceph_connection *con) | ||
671 | struct ceph_mds_client *mdsc = s->s_mdsc; | ||
672 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; | ||
673 | |||
674 | - if (ac->ops->invalidate_authorizer) | ||
675 | - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); | ||
676 | + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); | ||
677 | |||
678 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); | ||
679 | } | ||
680 | diff --git a/fs/ceph/super.c b/fs/ceph/super.c | ||
681 | index f363918..f4fa5cf 100644 | ||
682 | --- a/fs/ceph/super.c | ||
683 | +++ b/fs/ceph/super.c | ||
684 | @@ -70,8 +70,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) | ||
685 | /* | ||
686 | * express utilization in terms of large blocks to avoid | ||
687 | * overflow on 32-bit machines. | ||
688 | + * | ||
689 | + * NOTE: for the time being, we make bsize == frsize to humor | ||
690 | + * not-yet-ancient versions of glibc that are broken. | ||
691 | + * Someday, we will probably want to report a real block | ||
692 | + * size... whatever that may mean for a network file system! | ||
693 | */ | ||
694 | buf->f_bsize = 1 << CEPH_BLOCK_SHIFT; | ||
695 | + buf->f_frsize = 1 << CEPH_BLOCK_SHIFT; | ||
696 | buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10); | ||
697 | buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10); | ||
698 | buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10); | ||
699 | @@ -79,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) | ||
700 | buf->f_files = le64_to_cpu(st.num_objects); | ||
701 | buf->f_ffree = -1; | ||
702 | buf->f_namelen = NAME_MAX; | ||
703 | - buf->f_frsize = PAGE_CACHE_SIZE; | ||
704 | |||
705 | /* leave fsid little-endian, regardless of host endianness */ | ||
706 | fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1); | ||
707 | diff --git a/fs/ceph/super.h b/fs/ceph/super.h | ||
708 | index fc35036..d2e01a6 100644 | ||
709 | --- a/fs/ceph/super.h | ||
710 | +++ b/fs/ceph/super.h | ||
711 | @@ -21,7 +21,7 @@ | ||
712 | |||
713 | /* large granularity for statfs utilization stats to facilitate | ||
714 | * large volume sizes on 32-bit machines. */ | ||
715 | -#define CEPH_BLOCK_SHIFT 20 /* 1 MB */ | ||
716 | +#define CEPH_BLOCK_SHIFT 22 /* 4 MB */ | ||
717 | #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) | ||
718 | |||
719 | #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ | ||
720 | @@ -847,8 +847,13 @@ extern const struct export_operations ceph_export_ops; | ||
721 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); | ||
722 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); | ||
723 | extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); | ||
724 | -extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p, | ||
725 | - int p_locks, int f_locks); | ||
726 | +extern int ceph_encode_locks_to_buffer(struct inode *inode, | ||
727 | + struct ceph_filelock *flocks, | ||
728 | + int num_fcntl_locks, | ||
729 | + int num_flock_locks); | ||
730 | +extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, | ||
731 | + struct ceph_pagelist *pagelist, | ||
732 | + int num_fcntl_locks, int num_flock_locks); | ||
733 | extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); | ||
734 | |||
735 | /* debugfs.c */ | ||
736 | diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h | ||
737 | index d4080f3..5f33868 100644 | ||
738 | --- a/include/linux/ceph/auth.h | ||
739 | +++ b/include/linux/ceph/auth.h | ||
740 | @@ -52,6 +52,9 @@ struct ceph_auth_client_ops { | ||
741 | */ | ||
742 | int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, | ||
743 | struct ceph_auth_handshake *auth); | ||
744 | + /* ensure that an existing authorizer is up to date */ | ||
745 | + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, | ||
746 | + struct ceph_auth_handshake *auth); | ||
747 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, | ||
748 | struct ceph_authorizer *a, size_t len); | ||
749 | void (*destroy_authorizer)(struct ceph_auth_client *ac, | ||
750 | @@ -75,6 +78,8 @@ struct ceph_auth_client { | ||
751 | u64 global_id; /* our unique id in system */ | ||
752 | const struct ceph_crypto_key *key; /* our secret key */ | ||
753 | unsigned want_keys; /* which services we want */ | ||
754 | + | ||
755 | + struct mutex mutex; | ||
756 | }; | ||
757 | |||
758 | extern struct ceph_auth_client *ceph_auth_init(const char *name, | ||
759 | @@ -94,5 +99,18 @@ extern int ceph_build_auth(struct ceph_auth_client *ac, | ||
760 | void *msg_buf, size_t msg_len); | ||
761 | |||
762 | extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); | ||
763 | +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, | ||
764 | + int peer_type, | ||
765 | + struct ceph_auth_handshake *auth); | ||
766 | +extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, | ||
767 | + struct ceph_authorizer *a); | ||
768 | +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, | ||
769 | + int peer_type, | ||
770 | + struct ceph_auth_handshake *a); | ||
771 | +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, | ||
772 | + struct ceph_authorizer *a, | ||
773 | + size_t len); | ||
774 | +extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, | ||
775 | + int peer_type); | ||
776 | |||
777 | #endif | ||
778 | diff --git a/include/linux/cpu.h b/include/linux/cpu.h | ||
779 | index 78ed62f..25fd741 100644 | ||
780 | --- a/include/linux/cpu.h | ||
781 | +++ b/include/linux/cpu.h | ||
782 | @@ -177,6 +177,8 @@ extern struct bus_type cpu_subsys; | ||
783 | |||
784 | extern void get_online_cpus(void); | ||
785 | extern void put_online_cpus(void); | ||
786 | +extern void cpu_hotplug_disable(void); | ||
787 | +extern void cpu_hotplug_enable(void); | ||
788 | #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) | ||
789 | #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) | ||
790 | #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) | ||
791 | @@ -199,6 +201,8 @@ static inline void cpu_hotplug_driver_unlock(void) | ||
792 | |||
793 | #define get_online_cpus() do { } while (0) | ||
794 | #define put_online_cpus() do { } while (0) | ||
795 | +#define cpu_hotplug_disable() do { } while (0) | ||
796 | +#define cpu_hotplug_enable() do { } while (0) | ||
797 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | ||
798 | /* These aren't inline functions due to a GCC bug. */ | ||
799 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | ||
800 | diff --git a/include/linux/swapops.h b/include/linux/swapops.h | ||
801 | index 47ead51..c5fd30d 100644 | ||
802 | --- a/include/linux/swapops.h | ||
803 | +++ b/include/linux/swapops.h | ||
804 | @@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry) | ||
805 | |||
806 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | ||
807 | unsigned long address); | ||
808 | +extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); | ||
809 | #else | ||
810 | |||
811 | #define make_migration_entry(page, write) swp_entry(0, 0) | ||
812 | @@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp) | ||
813 | static inline void make_migration_entry_read(swp_entry_t *entryp) { } | ||
814 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | ||
815 | unsigned long address) { } | ||
816 | +static inline void migration_entry_wait_huge(struct mm_struct *mm, | ||
817 | + pte_t *pte) { } | ||
818 | static inline int is_write_migration_entry(swp_entry_t entry) | ||
819 | { | ||
820 | return 0; | ||
821 | diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h | ||
822 | index db1c5df..4886c11 100644 | ||
823 | --- a/include/net/bluetooth/hci_core.h | ||
824 | +++ b/include/net/bluetooth/hci_core.h | ||
825 | @@ -976,6 +976,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); | ||
826 | int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); | ||
827 | int mgmt_index_added(struct hci_dev *hdev); | ||
828 | int mgmt_index_removed(struct hci_dev *hdev); | ||
829 | +int mgmt_set_powered_failed(struct hci_dev *hdev, int err); | ||
830 | int mgmt_powered(struct hci_dev *hdev, u8 powered); | ||
831 | int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); | ||
832 | int mgmt_connectable(struct hci_dev *hdev, u8 connectable); | ||
833 | diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h | ||
834 | index ebfd91f..8098e87 100644 | ||
835 | --- a/include/net/bluetooth/mgmt.h | ||
836 | +++ b/include/net/bluetooth/mgmt.h | ||
837 | @@ -42,6 +42,7 @@ | ||
838 | #define MGMT_STATUS_NOT_POWERED 0x0f | ||
839 | #define MGMT_STATUS_CANCELLED 0x10 | ||
840 | #define MGMT_STATUS_INVALID_INDEX 0x11 | ||
841 | +#define MGMT_STATUS_RFKILLED 0x12 | ||
842 | |||
843 | struct mgmt_hdr { | ||
844 | __le16 opcode; | ||
845 | diff --git a/kernel/cpu.c b/kernel/cpu.c | ||
846 | index 2060c6e..26feaa9 100644 | ||
847 | --- a/kernel/cpu.c | ||
848 | +++ b/kernel/cpu.c | ||
849 | @@ -124,6 +124,27 @@ static void cpu_hotplug_done(void) | ||
850 | mutex_unlock(&cpu_hotplug.lock); | ||
851 | } | ||
852 | |||
853 | +/* | ||
854 | + * Wait for currently running CPU hotplug operations to complete (if any) and | ||
855 | + * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects | ||
856 | + * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the | ||
857 | + * hotplug path before performing hotplug operations. So acquiring that lock | ||
858 | + * guarantees mutual exclusion from any currently running hotplug operations. | ||
859 | + */ | ||
860 | +void cpu_hotplug_disable(void) | ||
861 | +{ | ||
862 | + cpu_maps_update_begin(); | ||
863 | + cpu_hotplug_disabled = 1; | ||
864 | + cpu_maps_update_done(); | ||
865 | +} | ||
866 | + | ||
867 | +void cpu_hotplug_enable(void) | ||
868 | +{ | ||
869 | + cpu_maps_update_begin(); | ||
870 | + cpu_hotplug_disabled = 0; | ||
871 | + cpu_maps_update_done(); | ||
872 | +} | ||
873 | + | ||
874 | #else /* #if CONFIG_HOTPLUG_CPU */ | ||
875 | static void cpu_hotplug_begin(void) {} | ||
876 | static void cpu_hotplug_done(void) {} | ||
877 | @@ -479,36 +500,6 @@ static int __init alloc_frozen_cpus(void) | ||
878 | core_initcall(alloc_frozen_cpus); | ||
879 | |||
880 | /* | ||
881 | - * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU | ||
882 | - * hotplug when tasks are about to be frozen. Also, don't allow the freezer | ||
883 | - * to continue until any currently running CPU hotplug operation gets | ||
884 | - * completed. | ||
885 | - * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the | ||
886 | - * 'cpu_add_remove_lock'. And this same lock is also taken by the regular | ||
887 | - * CPU hotplug path and released only after it is complete. Thus, we | ||
888 | - * (and hence the freezer) will block here until any currently running CPU | ||
889 | - * hotplug operation gets completed. | ||
890 | - */ | ||
891 | -void cpu_hotplug_disable_before_freeze(void) | ||
892 | -{ | ||
893 | - cpu_maps_update_begin(); | ||
894 | - cpu_hotplug_disabled = 1; | ||
895 | - cpu_maps_update_done(); | ||
896 | -} | ||
897 | - | ||
898 | - | ||
899 | -/* | ||
900 | - * When tasks have been thawed, re-enable regular CPU hotplug (which had been | ||
901 | - * disabled while beginning to freeze tasks). | ||
902 | - */ | ||
903 | -void cpu_hotplug_enable_after_thaw(void) | ||
904 | -{ | ||
905 | - cpu_maps_update_begin(); | ||
906 | - cpu_hotplug_disabled = 0; | ||
907 | - cpu_maps_update_done(); | ||
908 | -} | ||
909 | - | ||
910 | -/* | ||
911 | * When callbacks for CPU hotplug notifications are being executed, we must | ||
912 | * ensure that the state of the system with respect to the tasks being frozen | ||
913 | * or not, as reported by the notification, remains unchanged *throughout the | ||
914 | @@ -527,12 +518,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb, | ||
915 | |||
916 | case PM_SUSPEND_PREPARE: | ||
917 | case PM_HIBERNATION_PREPARE: | ||
918 | - cpu_hotplug_disable_before_freeze(); | ||
919 | + cpu_hotplug_disable(); | ||
920 | break; | ||
921 | |||
922 | case PM_POST_SUSPEND: | ||
923 | case PM_POST_HIBERNATION: | ||
924 | - cpu_hotplug_enable_after_thaw(); | ||
925 | + cpu_hotplug_enable(); | ||
926 | break; | ||
927 | |||
928 | default: | ||
929 | diff --git a/kernel/sys.c b/kernel/sys.c | ||
930 | index 6a74b83..3449d26 100644 | ||
931 | --- a/kernel/sys.c | ||
932 | +++ b/kernel/sys.c | ||
933 | @@ -353,6 +353,29 @@ int unregister_reboot_notifier(struct notifier_block *nb) | ||
934 | } | ||
935 | EXPORT_SYMBOL(unregister_reboot_notifier); | ||
936 | |||
937 | +/* Add backwards compatibility for stable trees. */ | ||
938 | +#ifndef PF_NO_SETAFFINITY | ||
939 | +#define PF_NO_SETAFFINITY PF_THREAD_BOUND | ||
940 | +#endif | ||
941 | + | ||
942 | +static void migrate_to_reboot_cpu(void) | ||
943 | +{ | ||
944 | + /* The boot cpu is always logical cpu 0 */ | ||
945 | + int cpu = 0; | ||
946 | + | ||
947 | + cpu_hotplug_disable(); | ||
948 | + | ||
949 | + /* Make certain the cpu I'm about to reboot on is online */ | ||
950 | + if (!cpu_online(cpu)) | ||
951 | + cpu = cpumask_first(cpu_online_mask); | ||
952 | + | ||
953 | + /* Prevent races with other tasks migrating this task */ | ||
954 | + current->flags |= PF_NO_SETAFFINITY; | ||
955 | + | ||
956 | + /* Make certain I only run on the appropriate processor */ | ||
957 | + set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
958 | +} | ||
959 | + | ||
960 | /** | ||
961 | * kernel_restart - reboot the system | ||
962 | * @cmd: pointer to buffer containing command to execute for restart | ||
963 | @@ -364,7 +387,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier); | ||
964 | void kernel_restart(char *cmd) | ||
965 | { | ||
966 | kernel_restart_prepare(cmd); | ||
967 | - disable_nonboot_cpus(); | ||
968 | + migrate_to_reboot_cpu(); | ||
969 | syscore_shutdown(); | ||
970 | if (!cmd) | ||
971 | printk(KERN_EMERG "Restarting system.\n"); | ||
972 | @@ -391,7 +414,7 @@ static void kernel_shutdown_prepare(enum system_states state) | ||
973 | void kernel_halt(void) | ||
974 | { | ||
975 | kernel_shutdown_prepare(SYSTEM_HALT); | ||
976 | - disable_nonboot_cpus(); | ||
977 | + migrate_to_reboot_cpu(); | ||
978 | syscore_shutdown(); | ||
979 | printk(KERN_EMERG "System halted.\n"); | ||
980 | kmsg_dump(KMSG_DUMP_HALT); | ||
981 | @@ -410,7 +433,7 @@ void kernel_power_off(void) | ||
982 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | ||
983 | if (pm_power_off_prepare) | ||
984 | pm_power_off_prepare(); | ||
985 | - disable_nonboot_cpus(); | ||
986 | + migrate_to_reboot_cpu(); | ||
987 | syscore_shutdown(); | ||
988 | printk(KERN_EMERG "Power down.\n"); | ||
989 | kmsg_dump(KMSG_DUMP_POWEROFF); | ||
990 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c | ||
991 | index 69b21bb..a692439 100644 | ||
992 | --- a/mm/hugetlb.c | ||
993 | +++ b/mm/hugetlb.c | ||
994 | @@ -2768,7 +2768,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | ||
995 | if (ptep) { | ||
996 | entry = huge_ptep_get(ptep); | ||
997 | if (unlikely(is_hugetlb_entry_migration(entry))) { | ||
998 | - migration_entry_wait(mm, (pmd_t *)ptep, address); | ||
999 | + migration_entry_wait_huge(mm, ptep); | ||
1000 | return 0; | ||
1001 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | ||
1002 | return VM_FAULT_HWPOISON_LARGE | | ||
1003 | diff --git a/mm/migrate.c b/mm/migrate.c | ||
1004 | index 37cd07b..5f588b1 100644 | ||
1005 | --- a/mm/migrate.c | ||
1006 | +++ b/mm/migrate.c | ||
1007 | @@ -180,15 +180,14 @@ static void remove_migration_ptes(struct page *old, struct page *new) | ||
1008 | * get to the page and wait until migration is finished. | ||
1009 | * When we return from this function the fault will be retried. | ||
1010 | */ | ||
1011 | -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | ||
1012 | - unsigned long address) | ||
1013 | +static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, | ||
1014 | + spinlock_t *ptl) | ||
1015 | { | ||
1016 | - pte_t *ptep, pte; | ||
1017 | - spinlock_t *ptl; | ||
1018 | + pte_t pte; | ||
1019 | swp_entry_t entry; | ||
1020 | struct page *page; | ||
1021 | |||
1022 | - ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
1023 | + spin_lock(ptl); | ||
1024 | pte = *ptep; | ||
1025 | if (!is_swap_pte(pte)) | ||
1026 | goto out; | ||
1027 | @@ -216,6 +215,20 @@ out: | ||
1028 | pte_unmap_unlock(ptep, ptl); | ||
1029 | } | ||
1030 | |||
1031 | +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | ||
1032 | + unsigned long address) | ||
1033 | +{ | ||
1034 | + spinlock_t *ptl = pte_lockptr(mm, pmd); | ||
1035 | + pte_t *ptep = pte_offset_map(pmd, address); | ||
1036 | + __migration_entry_wait(mm, ptep, ptl); | ||
1037 | +} | ||
1038 | + | ||
1039 | +void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) | ||
1040 | +{ | ||
1041 | + spinlock_t *ptl = &(mm)->page_table_lock; | ||
1042 | + __migration_entry_wait(mm, pte, ptl); | ||
1043 | +} | ||
1044 | + | ||
1045 | #ifdef CONFIG_BLOCK | ||
1046 | /* Returns true if all buffers are successfully locked */ | ||
1047 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, | ||
1048 | diff --git a/mm/swap_state.c b/mm/swap_state.c | ||
1049 | index 4c5ff7f..1fa9220 100644 | ||
1050 | --- a/mm/swap_state.c | ||
1051 | +++ b/mm/swap_state.c | ||
1052 | @@ -313,8 +313,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | ||
1053 | * Swap entry may have been freed since our caller observed it. | ||
1054 | */ | ||
1055 | err = swapcache_prepare(entry); | ||
1056 | - if (err == -EEXIST) { /* seems racy */ | ||
1057 | + if (err == -EEXIST) { | ||
1058 | radix_tree_preload_end(); | ||
1059 | + /* | ||
1060 | + * We might race against get_swap_page() and stumble | ||
1061 | + * across a SWAP_HAS_CACHE swap_map entry whose page | ||
1062 | + * has not been brought into the swapcache yet, while | ||
1063 | + * the other end is scheduled away waiting on discard | ||
1064 | + * I/O completion at scan_swap_map(). | ||
1065 | + * | ||
1066 | + * In order to avoid turning this transitory state | ||
1067 | + * into a permanent loop around this -EEXIST case | ||
1068 | + * if !CONFIG_PREEMPT and the I/O completion happens | ||
1069 | + * to be waiting on the CPU waitqueue where we are now | ||
1070 | + * busy looping, we just conditionally invoke the | ||
1071 | + * scheduler here, if there are some more important | ||
1072 | + * tasks to run. | ||
1073 | + */ | ||
1074 | + cond_resched(); | ||
1075 | continue; | ||
1076 | } | ||
1077 | if (err) { /* swp entry is obsolete ? */ | ||
1078 | diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c | ||
1079 | index 13b6c28..9197ae7 100644 | ||
1080 | --- a/net/bluetooth/hci_core.c | ||
1081 | +++ b/net/bluetooth/hci_core.c | ||
1082 | @@ -1120,11 +1120,15 @@ EXPORT_SYMBOL(hci_free_dev); | ||
1083 | static void hci_power_on(struct work_struct *work) | ||
1084 | { | ||
1085 | struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); | ||
1086 | + int err; | ||
1087 | |||
1088 | BT_DBG("%s", hdev->name); | ||
1089 | |||
1090 | - if (hci_dev_open(hdev->id) < 0) | ||
1091 | + err = hci_dev_open(hdev->id); | ||
1092 | + if (err < 0) { | ||
1093 | + mgmt_set_powered_failed(hdev, err); | ||
1094 | return; | ||
1095 | + } | ||
1096 | |||
1097 | if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) | ||
1098 | schedule_delayed_work(&hdev->power_off, | ||
1099 | diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c | ||
1100 | index 8f3d9dc..9f2f206 100644 | ||
1101 | --- a/net/bluetooth/mgmt.c | ||
1102 | +++ b/net/bluetooth/mgmt.c | ||
1103 | @@ -2833,6 +2833,27 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) | ||
1104 | return err; | ||
1105 | } | ||
1106 | |||
1107 | +int mgmt_set_powered_failed(struct hci_dev *hdev, int err) | ||
1108 | +{ | ||
1109 | + struct pending_cmd *cmd; | ||
1110 | + u8 status; | ||
1111 | + | ||
1112 | + cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); | ||
1113 | + if (!cmd) | ||
1114 | + return -ENOENT; | ||
1115 | + | ||
1116 | + if (err == -ERFKILL) | ||
1117 | + status = MGMT_STATUS_RFKILLED; | ||
1118 | + else | ||
1119 | + status = MGMT_STATUS_FAILED; | ||
1120 | + | ||
1121 | + err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); | ||
1122 | + | ||
1123 | + mgmt_pending_remove(cmd); | ||
1124 | + | ||
1125 | + return err; | ||
1126 | +} | ||
1127 | + | ||
1128 | int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) | ||
1129 | { | ||
1130 | struct cmd_lookup match = { NULL, hdev }; | ||
1131 | diff --git a/net/ceph/auth.c b/net/ceph/auth.c | ||
1132 | index b4bf4ac..6b923bc 100644 | ||
1133 | --- a/net/ceph/auth.c | ||
1134 | +++ b/net/ceph/auth.c | ||
1135 | @@ -47,6 +47,7 @@ struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_cryp | ||
1136 | if (!ac) | ||
1137 | goto out; | ||
1138 | |||
1139 | + mutex_init(&ac->mutex); | ||
1140 | ac->negotiating = true; | ||
1141 | if (name) | ||
1142 | ac->name = name; | ||
1143 | @@ -73,10 +74,12 @@ void ceph_auth_destroy(struct ceph_auth_client *ac) | ||
1144 | */ | ||
1145 | void ceph_auth_reset(struct ceph_auth_client *ac) | ||
1146 | { | ||
1147 | + mutex_lock(&ac->mutex); | ||
1148 | dout("auth_reset %p\n", ac); | ||
1149 | if (ac->ops && !ac->negotiating) | ||
1150 | ac->ops->reset(ac); | ||
1151 | ac->negotiating = true; | ||
1152 | + mutex_unlock(&ac->mutex); | ||
1153 | } | ||
1154 | |||
1155 | int ceph_entity_name_encode(const char *name, void **p, void *end) | ||
1156 | @@ -102,6 +105,7 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) | ||
1157 | int i, num; | ||
1158 | int ret; | ||
1159 | |||
1160 | + mutex_lock(&ac->mutex); | ||
1161 | dout("auth_build_hello\n"); | ||
1162 | monhdr->have_version = 0; | ||
1163 | monhdr->session_mon = cpu_to_le16(-1); | ||
1164 | @@ -122,15 +126,19 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) | ||
1165 | |||
1166 | ret = ceph_entity_name_encode(ac->name, &p, end); | ||
1167 | if (ret < 0) | ||
1168 | - return ret; | ||
1169 | + goto out; | ||
1170 | ceph_decode_need(&p, end, sizeof(u64), bad); | ||
1171 | ceph_encode_64(&p, ac->global_id); | ||
1172 | |||
1173 | ceph_encode_32(&lenp, p - lenp - sizeof(u32)); | ||
1174 | - return p - buf; | ||
1175 | + ret = p - buf; | ||
1176 | +out: | ||
1177 | + mutex_unlock(&ac->mutex); | ||
1178 | + return ret; | ||
1179 | |||
1180 | bad: | ||
1181 | - return -ERANGE; | ||
1182 | + ret = -ERANGE; | ||
1183 | + goto out; | ||
1184 | } | ||
1185 | |||
1186 | static int ceph_build_auth_request(struct ceph_auth_client *ac, | ||
1187 | @@ -151,11 +159,13 @@ static int ceph_build_auth_request(struct ceph_auth_client *ac, | ||
1188 | if (ret < 0) { | ||
1189 | pr_err("error %d building auth method %s request\n", ret, | ||
1190 | ac->ops->name); | ||
1191 | - return ret; | ||
1192 | + goto out; | ||
1193 | } | ||
1194 | dout(" built request %d bytes\n", ret); | ||
1195 | ceph_encode_32(&p, ret); | ||
1196 | - return p + ret - msg_buf; | ||
1197 | + ret = p + ret - msg_buf; | ||
1198 | +out: | ||
1199 | + return ret; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | @@ -176,6 +186,7 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, | ||
1204 | int result_msg_len; | ||
1205 | int ret = -EINVAL; | ||
1206 | |||
1207 | + mutex_lock(&ac->mutex); | ||
1208 | dout("handle_auth_reply %p %p\n", p, end); | ||
1209 | ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); | ||
1210 | protocol = ceph_decode_32(&p); | ||
1211 | @@ -227,33 +238,103 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, | ||
1212 | |||
1213 | ret = ac->ops->handle_reply(ac, result, payload, payload_end); | ||
1214 | if (ret == -EAGAIN) { | ||
1215 | - return ceph_build_auth_request(ac, reply_buf, reply_len); | ||
1216 | + ret = ceph_build_auth_request(ac, reply_buf, reply_len); | ||
1217 | } else if (ret) { | ||
1218 | pr_err("auth method '%s' error %d\n", ac->ops->name, ret); | ||
1219 | - return ret; | ||
1220 | } | ||
1221 | - return 0; | ||
1222 | |||
1223 | -bad: | ||
1224 | - pr_err("failed to decode auth msg\n"); | ||
1225 | out: | ||
1226 | + mutex_unlock(&ac->mutex); | ||
1227 | return ret; | ||
1228 | + | ||
1229 | +bad: | ||
1230 | + pr_err("failed to decode auth msg\n"); | ||
1231 | + ret = -EINVAL; | ||
1232 | + goto out; | ||
1233 | } | ||
1234 | |||
1235 | int ceph_build_auth(struct ceph_auth_client *ac, | ||
1236 | void *msg_buf, size_t msg_len) | ||
1237 | { | ||
1238 | + int ret = 0; | ||
1239 | + | ||
1240 | + mutex_lock(&ac->mutex); | ||
1241 | if (!ac->protocol) | ||
1242 | - return ceph_auth_build_hello(ac, msg_buf, msg_len); | ||
1243 | - BUG_ON(!ac->ops); | ||
1244 | - if (ac->ops->should_authenticate(ac)) | ||
1245 | - return ceph_build_auth_request(ac, msg_buf, msg_len); | ||
1246 | - return 0; | ||
1247 | + ret = ceph_auth_build_hello(ac, msg_buf, msg_len); | ||
1248 | + else if (ac->ops->should_authenticate(ac)) | ||
1249 | + ret = ceph_build_auth_request(ac, msg_buf, msg_len); | ||
1250 | + mutex_unlock(&ac->mutex); | ||
1251 | + return ret; | ||
1252 | } | ||
1253 | |||
1254 | int ceph_auth_is_authenticated(struct ceph_auth_client *ac) | ||
1255 | { | ||
1256 | - if (!ac->ops) | ||
1257 | - return 0; | ||
1258 | - return ac->ops->is_authenticated(ac); | ||
1259 | + int ret = 0; | ||
1260 | + | ||
1261 | + mutex_lock(&ac->mutex); | ||
1262 | + if (ac->ops) | ||
1263 | + ret = ac->ops->is_authenticated(ac); | ||
1264 | + mutex_unlock(&ac->mutex); | ||
1265 | + return ret; | ||
1266 | +} | ||
1267 | +EXPORT_SYMBOL(ceph_auth_is_authenticated); | ||
1268 | + | ||
1269 | +int ceph_auth_create_authorizer(struct ceph_auth_client *ac, | ||
1270 | + int peer_type, | ||
1271 | + struct ceph_auth_handshake *auth) | ||
1272 | +{ | ||
1273 | + int ret = 0; | ||
1274 | + | ||
1275 | + mutex_lock(&ac->mutex); | ||
1276 | + if (ac->ops && ac->ops->create_authorizer) | ||
1277 | + ret = ac->ops->create_authorizer(ac, peer_type, auth); | ||
1278 | + mutex_unlock(&ac->mutex); | ||
1279 | + return ret; | ||
1280 | +} | ||
1281 | +EXPORT_SYMBOL(ceph_auth_create_authorizer); | ||
1282 | + | ||
1283 | +void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, | ||
1284 | + struct ceph_authorizer *a) | ||
1285 | +{ | ||
1286 | + mutex_lock(&ac->mutex); | ||
1287 | + if (ac->ops && ac->ops->destroy_authorizer) | ||
1288 | + ac->ops->destroy_authorizer(ac, a); | ||
1289 | + mutex_unlock(&ac->mutex); | ||
1290 | +} | ||
1291 | +EXPORT_SYMBOL(ceph_auth_destroy_authorizer); | ||
1292 | + | ||
1293 | +int ceph_auth_update_authorizer(struct ceph_auth_client *ac, | ||
1294 | + int peer_type, | ||
1295 | + struct ceph_auth_handshake *a) | ||
1296 | +{ | ||
1297 | + int ret = 0; | ||
1298 | + | ||
1299 | + mutex_lock(&ac->mutex); | ||
1300 | + if (ac->ops && ac->ops->update_authorizer) | ||
1301 | + ret = ac->ops->update_authorizer(ac, peer_type, a); | ||
1302 | + mutex_unlock(&ac->mutex); | ||
1303 | + return ret; | ||
1304 | +} | ||
1305 | +EXPORT_SYMBOL(ceph_auth_update_authorizer); | ||
1306 | + | ||
1307 | +int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, | ||
1308 | + struct ceph_authorizer *a, size_t len) | ||
1309 | +{ | ||
1310 | + int ret = 0; | ||
1311 | + | ||
1312 | + mutex_lock(&ac->mutex); | ||
1313 | + if (ac->ops && ac->ops->verify_authorizer_reply) | ||
1314 | + ret = ac->ops->verify_authorizer_reply(ac, a, len); | ||
1315 | + mutex_unlock(&ac->mutex); | ||
1316 | + return ret; | ||
1317 | +} | ||
1318 | +EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); | ||
1319 | + | ||
1320 | +void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) | ||
1321 | +{ | ||
1322 | + mutex_lock(&ac->mutex); | ||
1323 | + if (ac->ops && ac->ops->invalidate_authorizer) | ||
1324 | + ac->ops->invalidate_authorizer(ac, peer_type); | ||
1325 | + mutex_unlock(&ac->mutex); | ||
1326 | } | ||
1327 | +EXPORT_SYMBOL(ceph_auth_invalidate_authorizer); | ||
1328 | diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c | ||
1329 | index a16bf14..96238ba 100644 | ||
1330 | --- a/net/ceph/auth_x.c | ||
1331 | +++ b/net/ceph/auth_x.c | ||
1332 | @@ -298,6 +298,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, | ||
1333 | return -ENOMEM; | ||
1334 | } | ||
1335 | au->service = th->service; | ||
1336 | + au->secret_id = th->secret_id; | ||
1337 | |||
1338 | msg_a = au->buf->vec.iov_base; | ||
1339 | msg_a->struct_v = 1; | ||
1340 | @@ -555,6 +556,26 @@ static int ceph_x_create_authorizer( | ||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | +static int ceph_x_update_authorizer( | ||
1345 | + struct ceph_auth_client *ac, int peer_type, | ||
1346 | + struct ceph_auth_handshake *auth) | ||
1347 | +{ | ||
1348 | + struct ceph_x_authorizer *au; | ||
1349 | + struct ceph_x_ticket_handler *th; | ||
1350 | + | ||
1351 | + th = get_ticket_handler(ac, peer_type); | ||
1352 | + if (IS_ERR(th)) | ||
1353 | + return PTR_ERR(th); | ||
1354 | + | ||
1355 | + au = (struct ceph_x_authorizer *)auth->authorizer; | ||
1356 | + if (au->secret_id < th->secret_id) { | ||
1357 | + dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", | ||
1358 | + au->service, au->secret_id, th->secret_id); | ||
1359 | + return ceph_x_build_authorizer(ac, th, au); | ||
1360 | + } | ||
1361 | + return 0; | ||
1362 | +} | ||
1363 | + | ||
1364 | static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, | ||
1365 | struct ceph_authorizer *a, size_t len) | ||
1366 | { | ||
1367 | @@ -630,7 +651,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, | ||
1368 | |||
1369 | th = get_ticket_handler(ac, peer_type); | ||
1370 | if (!IS_ERR(th)) | ||
1371 | - remove_ticket_handler(ac, th); | ||
1372 | + memset(&th->validity, 0, sizeof(th->validity)); | ||
1373 | } | ||
1374 | |||
1375 | |||
1376 | @@ -641,6 +662,7 @@ static const struct ceph_auth_client_ops ceph_x_ops = { | ||
1377 | .build_request = ceph_x_build_request, | ||
1378 | .handle_reply = ceph_x_handle_reply, | ||
1379 | .create_authorizer = ceph_x_create_authorizer, | ||
1380 | + .update_authorizer = ceph_x_update_authorizer, | ||
1381 | .verify_authorizer_reply = ceph_x_verify_authorizer_reply, | ||
1382 | .destroy_authorizer = ceph_x_destroy_authorizer, | ||
1383 | .invalidate_authorizer = ceph_x_invalidate_authorizer, | ||
1384 | diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h | ||
1385 | index e02da7a..5c2ad4e 100644 | ||
1386 | --- a/net/ceph/auth_x.h | ||
1387 | +++ b/net/ceph/auth_x.h | ||
1388 | @@ -29,6 +29,7 @@ struct ceph_x_authorizer { | ||
1389 | struct ceph_buffer *buf; | ||
1390 | unsigned service; | ||
1391 | u64 nonce; | ||
1392 | + u64 secret_id; | ||
1393 | char reply_buf[128]; /* big enough for encrypted blob */ | ||
1394 | }; | ||
1395 | |||
1396 | diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c | ||
1397 | index ba1037c..7f703ae 100644 | ||
1398 | --- a/net/ceph/messenger.c | ||
1399 | +++ b/net/ceph/messenger.c | ||
1400 | @@ -1542,7 +1542,6 @@ static int process_connect(struct ceph_connection *con) | ||
1401 | con->error_msg = "connect authorization failure"; | ||
1402 | return -1; | ||
1403 | } | ||
1404 | - con->auth_retry = 1; | ||
1405 | con_out_kvec_reset(con); | ||
1406 | ret = prepare_write_connect(con); | ||
1407 | if (ret < 0) | ||
1408 | @@ -1627,7 +1626,7 @@ static int process_connect(struct ceph_connection *con) | ||
1409 | |||
1410 | WARN_ON(con->state != CON_STATE_NEGOTIATING); | ||
1411 | con->state = CON_STATE_OPEN; | ||
1412 | - | ||
1413 | + con->auth_retry = 0; /* we authenticated; clear flag */ | ||
1414 | con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); | ||
1415 | con->connect_seq++; | ||
1416 | con->peer_features = server_feat; | ||
1417 | diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c | ||
1418 | index 89a6409..6765da3 100644 | ||
1419 | --- a/net/ceph/mon_client.c | ||
1420 | +++ b/net/ceph/mon_client.c | ||
1421 | @@ -737,7 +737,7 @@ static void delayed_work(struct work_struct *work) | ||
1422 | |||
1423 | __validate_auth(monc); | ||
1424 | |||
1425 | - if (monc->auth->ops->is_authenticated(monc->auth)) | ||
1426 | + if (ceph_auth_is_authenticated(monc->auth)) | ||
1427 | __send_subscribe(monc); | ||
1428 | } | ||
1429 | __schedule_delayed(monc); | ||
1430 | @@ -893,8 +893,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, | ||
1431 | |||
1432 | mutex_lock(&monc->mutex); | ||
1433 | had_debugfs_info = have_debugfs_info(monc); | ||
1434 | - if (monc->auth->ops) | ||
1435 | - was_auth = monc->auth->ops->is_authenticated(monc->auth); | ||
1436 | + was_auth = ceph_auth_is_authenticated(monc->auth); | ||
1437 | monc->pending_auth = 0; | ||
1438 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, | ||
1439 | msg->front.iov_len, | ||
1440 | @@ -905,7 +904,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, | ||
1441 | wake_up_all(&monc->client->auth_wq); | ||
1442 | } else if (ret > 0) { | ||
1443 | __send_prepared_auth_request(monc, ret); | ||
1444 | - } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { | ||
1445 | + } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { | ||
1446 | dout("authenticated, starting session\n"); | ||
1447 | |||
1448 | monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; | ||
1449 | diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c | ||
1450 | index b16dfa2..8e3aa4d 100644 | ||
1451 | --- a/net/ceph/osd_client.c | ||
1452 | +++ b/net/ceph/osd_client.c | ||
1453 | @@ -671,8 +671,7 @@ static void put_osd(struct ceph_osd *osd) | ||
1454 | if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { | ||
1455 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; | ||
1456 | |||
1457 | - if (ac->ops && ac->ops->destroy_authorizer) | ||
1458 | - ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer); | ||
1459 | + ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); | ||
1460 | kfree(osd); | ||
1461 | } | ||
1462 | } | ||
1463 | @@ -1337,13 +1336,13 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend) | ||
1464 | __register_request(osdc, req); | ||
1465 | __unregister_linger_request(osdc, req); | ||
1466 | } | ||
1467 | + reset_changed_osds(osdc); | ||
1468 | mutex_unlock(&osdc->request_mutex); | ||
1469 | |||
1470 | if (needmap) { | ||
1471 | dout("%d requests for down osds, need new map\n", needmap); | ||
1472 | ceph_monc_request_next_osdmap(&osdc->client->monc); | ||
1473 | } | ||
1474 | - reset_changed_osds(osdc); | ||
1475 | } | ||
1476 | |||
1477 | |||
1478 | @@ -2127,13 +2126,17 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | ||
1479 | struct ceph_auth_handshake *auth = &o->o_auth; | ||
1480 | |||
1481 | if (force_new && auth->authorizer) { | ||
1482 | - if (ac->ops && ac->ops->destroy_authorizer) | ||
1483 | - ac->ops->destroy_authorizer(ac, auth->authorizer); | ||
1484 | + ceph_auth_destroy_authorizer(ac, auth->authorizer); | ||
1485 | auth->authorizer = NULL; | ||
1486 | } | ||
1487 | - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { | ||
1488 | - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | ||
1489 | - auth); | ||
1490 | + if (!auth->authorizer) { | ||
1491 | + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | ||
1492 | + auth); | ||
1493 | + if (ret) | ||
1494 | + return ERR_PTR(ret); | ||
1495 | + } else { | ||
1496 | + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | ||
1497 | + auth); | ||
1498 | if (ret) | ||
1499 | return ERR_PTR(ret); | ||
1500 | } | ||
1501 | @@ -2149,11 +2152,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) | ||
1502 | struct ceph_osd_client *osdc = o->o_osdc; | ||
1503 | struct ceph_auth_client *ac = osdc->client->monc.auth; | ||
1504 | |||
1505 | - /* | ||
1506 | - * XXX If ac->ops or ac->ops->verify_authorizer_reply is null, | ||
1507 | - * XXX which do we do: succeed or fail? | ||
1508 | - */ | ||
1509 | - return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len); | ||
1510 | + return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); | ||
1511 | } | ||
1512 | |||
1513 | static int invalidate_authorizer(struct ceph_connection *con) | ||
1514 | @@ -2162,9 +2161,7 @@ static int invalidate_authorizer(struct ceph_connection *con) | ||
1515 | struct ceph_osd_client *osdc = o->o_osdc; | ||
1516 | struct ceph_auth_client *ac = osdc->client->monc.auth; | ||
1517 | |||
1518 | - if (ac->ops && ac->ops->invalidate_authorizer) | ||
1519 | - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); | ||
1520 | - | ||
1521 | + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); | ||
1522 | return ceph_monc_validate_auth(&osdc->client->monc); | ||
1523 | } | ||
1524 |