Magellan Linux

Annotation of /trunk/kernel-lts/patches-3.4/0144-3.4.45-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2204 - (hide annotations) (download)
Thu Jun 13 10:38:19 2013 UTC (10 years, 11 months ago) by niro
File size: 34603 byte(s)
-linux-3.4.45
1 niro 2204 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
2     index d81f994..762f7a6 100644
3     --- a/arch/powerpc/include/asm/ppc-opcode.h
4     +++ b/arch/powerpc/include/asm/ppc-opcode.h
5     @@ -45,6 +45,10 @@
6     #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
7     #define PPC_INST_MTSPR_DSCR 0x7c1103a6
8     #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
9     +#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
10     +#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
11     +#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
12     +#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
13     #define PPC_INST_SLBFEE 0x7c0007a7
14    
15     #define PPC_INST_STRING 0x7c00042a
16     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
17     index ae0843f..3bb7197 100644
18     --- a/arch/powerpc/kernel/traps.c
19     +++ b/arch/powerpc/kernel/traps.c
20     @@ -960,7 +960,10 @@ static int emulate_instruction(struct pt_regs *regs)
21    
22     #ifdef CONFIG_PPC64
23     /* Emulate the mfspr rD, DSCR. */
24     - if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
25     + if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
26     + PPC_INST_MFSPR_DSCR_USER) ||
27     + ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
28     + PPC_INST_MFSPR_DSCR)) &&
29     cpu_has_feature(CPU_FTR_DSCR)) {
30     PPC_WARN_EMULATED(mfdscr, regs);
31     rd = (instword >> 21) & 0x1f;
32     @@ -968,7 +971,10 @@ static int emulate_instruction(struct pt_regs *regs)
33     return 0;
34     }
35     /* Emulate the mtspr DSCR, rD. */
36     - if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
37     + if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
38     + PPC_INST_MTSPR_DSCR_USER) ||
39     + ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
40     + PPC_INST_MTSPR_DSCR)) &&
41     cpu_has_feature(CPU_FTR_DSCR)) {
42     PPC_WARN_EMULATED(mtdscr, regs);
43     rd = (instword >> 21) & 0x1f;
44     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
45     index 6e8f677..6130719 100644
46     --- a/arch/powerpc/mm/numa.c
47     +++ b/arch/powerpc/mm/numa.c
48     @@ -201,7 +201,7 @@ int __node_distance(int a, int b)
49     int distance = LOCAL_DISTANCE;
50    
51     if (!form1_affinity)
52     - return distance;
53     + return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
54    
55     for (i = 0; i < distance_ref_points_depth; i++) {
56     if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
57     diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
58     index 520b426..fd1a099 100644
59     --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
60     +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
61     @@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
62     * - in case there is no HW filter
63     * - in case the HW filter has errata or limitations
64     */
65     -static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
66     +static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
67     {
68     u64 br_type = event->attr.branch_sample_type;
69     int mask = 0;
70     @@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
71     if (br_type & PERF_SAMPLE_BRANCH_USER)
72     mask |= X86_BR_USER;
73    
74     - if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
75     + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
76     + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
77     + return -EACCES;
78     mask |= X86_BR_KERNEL;
79     + }
80    
81     /* we ignore BRANCH_HV here */
82    
83     @@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
84     * be used by fixup code for some CPU
85     */
86     event->hw.branch_reg.reg = mask;
87     +
88     + return 0;
89     }
90    
91     /*
92     @@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
93     /*
94     * setup SW LBR filter
95     */
96     - intel_pmu_setup_sw_lbr_filter(event);
97     + ret = intel_pmu_setup_sw_lbr_filter(event);
98     + if (ret)
99     + return ret;
100    
101     /*
102     * setup HW LBR filter, if any
103     @@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
104     return X86_BR_NONE;
105    
106     addr = buf;
107     - } else
108     - addr = (void *)from;
109     + } else {
110     + /*
111     + * The LBR logs any address in the IP, even if the IP just
112     + * faulted. This means userspace can control the from address.
113     + * Ensure we don't blindy read any address by validating it is
114     + * a known text address.
115     + */
116     + if (kernel_text_address(from))
117     + addr = (void *)from;
118     + else
119     + return X86_BR_NONE;
120     + }
121    
122     /*
123     * decoder needs to know the ABI especially
124     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
125     index fae7090..71d37f5 100644
126     --- a/arch/x86/mm/init.c
127     +++ b/arch/x86/mm/init.c
128     @@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
129     int i;
130     unsigned long puds = 0, pmds = 0, ptes = 0, tables;
131     unsigned long start = 0, good_end;
132     + unsigned long pgd_extra = 0;
133     phys_addr_t base;
134    
135     for (i = 0; i < nr_range; i++) {
136     unsigned long range, extra;
137    
138     + if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
139     + pgd_extra++;
140     +
141     range = mr[i].end - mr[i].start;
142     puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
143    
144     @@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
145     tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
146     tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
147     tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
148     + tables += (pgd_extra * PAGE_SIZE);
149    
150     #ifdef CONFIG_X86_32
151     /* for fixmap */
152     diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
153     index 53ddbc7..0bf5bd1 100644
154     --- a/drivers/cpufreq/longhaul.c
155     +++ b/drivers/cpufreq/longhaul.c
156     @@ -77,7 +77,7 @@ static unsigned int longhaul_index;
157     static int scale_voltage;
158     static int disable_acpi_c3;
159     static int revid_errata;
160     -
161     +static int enable;
162    
163     /* Clock ratios multiplied by 10 */
164     static int mults[32];
165     @@ -965,6 +965,10 @@ static int __init longhaul_init(void)
166     if (!x86_match_cpu(longhaul_id))
167     return -ENODEV;
168    
169     + if (!enable) {
170     + printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
171     + return -ENODEV;
172     + }
173     #ifdef CONFIG_SMP
174     if (num_online_cpus() > 1) {
175     printk(KERN_ERR PFX "More than 1 CPU detected, "
176     @@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
177     * such. */
178     module_param(revid_errata, int, 0644);
179     MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
180     +/* By default driver is disabled to prevent incompatible
181     + * system freeze. */
182     +module_param(enable, int, 0644);
183     +MODULE_PARM_DESC(enable, "Enable driver");
184    
185     MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
186     MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
187     diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
188     index 020a7d7..69bea56 100644
189     --- a/drivers/gpu/drm/i915/intel_dvo.c
190     +++ b/drivers/gpu/drm/i915/intel_dvo.c
191     @@ -370,6 +370,7 @@ void intel_dvo_init(struct drm_device *dev)
192     const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
193     struct i2c_adapter *i2c;
194     int gpio;
195     + bool dvoinit;
196    
197     /* Allow the I2C driver info to specify the GPIO to be used in
198     * special cases, but otherwise default to what's defined
199     @@ -389,7 +390,17 @@ void intel_dvo_init(struct drm_device *dev)
200     i2c = &dev_priv->gmbus[gpio].adapter;
201    
202     intel_dvo->dev = *dvo;
203     - if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
204     +
205     + /* GMBUS NAK handling seems to be unstable, hence let the
206     + * transmitter detection run in bit banging mode for now.
207     + */
208     + intel_gmbus_force_bit(i2c, true);
209     +
210     + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
211     +
212     + intel_gmbus_force_bit(i2c, false);
213     +
214     + if (!dvoinit)
215     continue;
216    
217     intel_encoder->type = INTEL_OUTPUT_DVO;
218     diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
219     index a8b28c4..1ad5906 100644
220     --- a/drivers/gpu/drm/i915/intel_lvds.c
221     +++ b/drivers/gpu/drm/i915/intel_lvds.c
222     @@ -793,6 +793,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
223     DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
224     },
225     },
226     + {
227     + .callback = intel_no_lvds_dmi_callback,
228     + .ident = "Fujitsu Esprimo Q900",
229     + .matches = {
230     + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
231     + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
232     + },
233     + },
234    
235     { } /* terminating entry */
236     };
237     diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
238     index 5ce9bf5..43672b6 100644
239     --- a/drivers/gpu/drm/radeon/atom.c
240     +++ b/drivers/gpu/drm/radeon/atom.c
241     @@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
242     firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
243    
244     DRM_DEBUG("atom firmware requested %08x %dkb\n",
245     - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
246     - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
247     + le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
248     + le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
249    
250     - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
251     + usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
252     }
253     ctx->scratch_size_bytes = 0;
254     if (usage_bytes == 0)
255     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
256     index c62132c..e458acb 100644
257     --- a/drivers/gpu/drm/radeon/evergreen.c
258     +++ b/drivers/gpu/drm/radeon/evergreen.c
259     @@ -445,6 +445,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
260    
261     list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
262     struct radeon_connector *radeon_connector = to_radeon_connector(connector);
263     +
264     + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
265     + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
266     + /* don't try to enable hpd on eDP or LVDS avoid breaking the
267     + * aux dp channel on imac and help (but not completely fix)
268     + * https://bugzilla.redhat.com/show_bug.cgi?id=726143
269     + * also avoid interrupt storms during dpms.
270     + */
271     + continue;
272     + }
273     switch (radeon_connector->hpd.hpd) {
274     case RADEON_HPD_1:
275     WREG32(DC_HPD1_CONTROL, tmp);
276     @@ -1146,17 +1156,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
277     tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
278     if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
279     radeon_wait_for_vblank(rdev, i);
280     - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
281     WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
282     + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
283     WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
284     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
285     }
286     } else {
287     tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
288     if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
289     radeon_wait_for_vblank(rdev, i);
290     - tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
291     WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
292     + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
293     WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
294     WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
295     }
296     @@ -1168,6 +1177,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
297     break;
298     udelay(1);
299     }
300     +
301     + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
302     + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
303     + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
304     + tmp &= ~EVERGREEN_CRTC_MASTER_EN;
305     + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
306     + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
307     + save->crtc_enabled[i] = false;
308     + /* ***** */
309     } else {
310     save->crtc_enabled[i] = false;
311     }
312     @@ -1185,6 +1203,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
313     }
314     /* wait for the MC to settle */
315     udelay(100);
316     +
317     + /* lock double buffered regs */
318     + for (i = 0; i < rdev->num_crtc; i++) {
319     + if (save->crtc_enabled[i]) {
320     + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
321     + if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
322     + tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
323     + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
324     + }
325     + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
326     + if (!(tmp & 1)) {
327     + tmp |= 1;
328     + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
329     + }
330     + }
331     + }
332     }
333    
334     void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
335     @@ -1206,6 +1240,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
336     WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
337     WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
338    
339     + /* unlock regs and wait for update */
340     + for (i = 0; i < rdev->num_crtc; i++) {
341     + if (save->crtc_enabled[i]) {
342     + tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
343     + if ((tmp & 0x3) != 0) {
344     + tmp &= ~0x3;
345     + WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
346     + }
347     + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
348     + if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
349     + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
350     + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
351     + }
352     + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
353     + if (tmp & 1) {
354     + tmp &= ~1;
355     + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
356     + }
357     + for (j = 0; j < rdev->usec_timeout; j++) {
358     + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
359     + if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
360     + break;
361     + udelay(1);
362     + }
363     + }
364     + }
365     +
366     /* unblackout the MC */
367     tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
368     tmp &= ~BLACKOUT_MODE_MASK;
369     diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
370     index 34a0e85..e534e5d 100644
371     --- a/drivers/gpu/drm/radeon/evergreen_reg.h
372     +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
373     @@ -225,6 +225,8 @@
374     #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
375     #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
376     #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
377     +#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
378     +#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
379    
380     #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
381     #define EVERGREEN_DC_GPIO_HPD_A 0x64b4
382     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
383     index 7dffc57..d706da8 100644
384     --- a/drivers/gpu/drm/radeon/ni.c
385     +++ b/drivers/gpu/drm/radeon/ni.c
386     @@ -668,7 +668,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
387     (rdev->pdev->device == 0x990F) ||
388     (rdev->pdev->device == 0x9910) ||
389     (rdev->pdev->device == 0x9917) ||
390     - (rdev->pdev->device == 0x9999)) {
391     + (rdev->pdev->device == 0x9999) ||
392     + (rdev->pdev->device == 0x999C)) {
393     rdev->config.cayman.max_simds_per_se = 6;
394     rdev->config.cayman.max_backends_per_se = 2;
395     } else if ((rdev->pdev->device == 0x9903) ||
396     @@ -677,7 +678,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
397     (rdev->pdev->device == 0x990D) ||
398     (rdev->pdev->device == 0x990E) ||
399     (rdev->pdev->device == 0x9913) ||
400     - (rdev->pdev->device == 0x9918)) {
401     + (rdev->pdev->device == 0x9918) ||
402     + (rdev->pdev->device == 0x999D)) {
403     rdev->config.cayman.max_simds_per_se = 4;
404     rdev->config.cayman.max_backends_per_se = 2;
405     } else if ((rdev->pdev->device == 0x9919) ||
406     @@ -911,6 +913,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
407     WREG32(GB_BACKEND_MAP, gb_backend_map);
408     WREG32(GB_ADDR_CONFIG, gb_addr_config);
409     WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
410     + if (ASIC_IS_DCE6(rdev))
411     + WREG32(DMIF_ADDR_CALC, gb_addr_config);
412     WREG32(HDP_ADDR_CONFIG, gb_addr_config);
413    
414     /* primary versions */
415     diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
416     index 2aa7046..d90b8b7 100644
417     --- a/drivers/gpu/drm/radeon/nid.h
418     +++ b/drivers/gpu/drm/radeon/nid.h
419     @@ -42,6 +42,10 @@
420     #define CAYMAN_MAX_TCC_MASK 0xFF
421    
422     #define DMIF_ADDR_CONFIG 0xBD4
423     +
424     +/* DCE6 only */
425     +#define DMIF_ADDR_CALC 0xC00
426     +
427     #define SRBM_GFX_CNTL 0x0E44
428     #define RINGID(x) (((x) & 0x3) << 0)
429     #define VMID(x) (((x) & 0x7) << 0)
430     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
431     index 5e30e12..38d87e1 100644
432     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
433     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
434     @@ -1998,6 +1998,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
435     num_modes = power_info->info.ucNumOfPowerModeEntries;
436     if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
437     num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
438     + if (num_modes == 0)
439     + return state_index;
440     rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
441     if (!rdev->pm.power_state)
442     return state_index;
443     @@ -2396,6 +2398,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
444     power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
445    
446     radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
447     + if (power_info->pplib.ucNumStates == 0)
448     + return state_index;
449     rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
450     power_info->pplib.ucNumStates, GFP_KERNEL);
451     if (!rdev->pm.power_state)
452     @@ -2478,6 +2482,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
453     int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
454     u16 data_offset;
455     u8 frev, crev;
456     + u8 *power_state_offset;
457    
458     if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
459     &frev, &crev, &data_offset))
460     @@ -2494,15 +2499,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
461     non_clock_info_array = (struct _NonClockInfoArray *)
462     (mode_info->atom_context->bios + data_offset +
463     le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
464     + if (state_array->ucNumEntries == 0)
465     + return state_index;
466     rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
467     state_array->ucNumEntries, GFP_KERNEL);
468     if (!rdev->pm.power_state)
469     return state_index;
470     + power_state_offset = (u8 *)state_array->states;
471     for (i = 0; i < state_array->ucNumEntries; i++) {
472     mode_index = 0;
473     - power_state = (union pplib_power_state *)&state_array->states[i];
474     - /* XXX this might be an inagua bug... */
475     - non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
476     + power_state = (union pplib_power_state *)power_state_offset;
477     + non_clock_array_index = power_state->v2.nonClockInfoIndex;
478     non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
479     &non_clock_info_array->nonClockInfo[non_clock_array_index];
480     rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
481     @@ -2514,9 +2521,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
482     if (power_state->v2.ucNumDPMLevels) {
483     for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
484     clock_array_index = power_state->v2.clockInfoIndex[j];
485     - /* XXX this might be an inagua bug... */
486     - if (clock_array_index >= clock_info_array->ucNumEntries)
487     - continue;
488     clock_info = (union pplib_clock_info *)
489     &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
490     valid = radeon_atombios_parse_pplib_clock_info(rdev,
491     @@ -2538,6 +2542,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
492     non_clock_info);
493     state_index++;
494     }
495     + power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
496     }
497     /* if multiple clock modes, mark the lowest as no display */
498     for (i = 0; i < state_index; i++) {
499     @@ -2584,7 +2589,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
500     default:
501     break;
502     }
503     - } else {
504     + }
505     +
506     + if (state_index == 0) {
507     rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
508     if (rdev->pm.power_state) {
509     rdev->pm.power_state[0].clock_info =
510     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
511     index b8459bd..bf6ca2d 100644
512     --- a/drivers/gpu/drm/radeon/radeon_pm.c
513     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
514     @@ -872,7 +872,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
515     struct radeon_device *rdev = dev->dev_private;
516    
517     seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
518     - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
519     + /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
520     + if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
521     + seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
522     + else
523     + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
524     seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
525     if (rdev->asic->pm.get_memory_clock)
526     seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
527     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
528     index 1197f21..5508ad7 100644
529     --- a/drivers/gpu/drm/radeon/si.c
530     +++ b/drivers/gpu/drm/radeon/si.c
531     @@ -1799,6 +1799,7 @@ static void si_gpu_init(struct radeon_device *rdev)
532     rdev->config.si.backend_map = gb_backend_map;
533     WREG32(GB_ADDR_CONFIG, gb_addr_config);
534     WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
535     + WREG32(DMIF_ADDR_CALC, gb_addr_config);
536     WREG32(HDP_ADDR_CONFIG, gb_addr_config);
537    
538     /* primary versions */
539     diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
540     index 2c2bc63..45e240d 100644
541     --- a/drivers/gpu/drm/radeon/sid.h
542     +++ b/drivers/gpu/drm/radeon/sid.h
543     @@ -55,6 +55,8 @@
544    
545     #define DMIF_ADDR_CONFIG 0xBD4
546    
547     +#define DMIF_ADDR_CALC 0xC00
548     +
549     #define SRBM_STATUS 0xE50
550    
551     #define CC_SYS_RB_BACKEND_DISABLE 0xe80
552     diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
553     index b68d28a..33a1760 100644
554     --- a/drivers/net/ethernet/ibm/ibmveth.c
555     +++ b/drivers/net/ethernet/ibm/ibmveth.c
556     @@ -1327,7 +1327,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
557     static int __devinit ibmveth_probe(struct vio_dev *dev,
558     const struct vio_device_id *id)
559     {
560     - int rc, i;
561     + int rc, i, mac_len;
562     struct net_device *netdev;
563     struct ibmveth_adapter *adapter;
564     unsigned char *mac_addr_p;
565     @@ -1337,11 +1337,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
566     dev->unit_address);
567    
568     mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
569     - NULL);
570     + &mac_len);
571     if (!mac_addr_p) {
572     dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
573     return -EINVAL;
574     }
575     + /* Workaround for old/broken pHyp */
576     + if (mac_len == 8)
577     + mac_addr_p += 2;
578     + else if (mac_len != 6) {
579     + dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
580     + mac_len);
581     + return -EINVAL;
582     + }
583    
584     mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
585     VETH_MCAST_FILTER_SIZE, NULL);
586     @@ -1366,17 +1374,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
587    
588     netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
589    
590     - /*
591     - * Some older boxes running PHYP non-natively have an OF that returns
592     - * a 8-byte local-mac-address field (and the first 2 bytes have to be
593     - * ignored) while newer boxes' OF return a 6-byte field. Note that
594     - * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
595     - * The RPA doc specifies that the first byte must be 10b, so we'll
596     - * just look for it to solve this 8 vs. 6 byte field issue
597     - */
598     - if ((*mac_addr_p & 0x3) != 0x02)
599     - mac_addr_p += 2;
600     -
601     adapter->mac_addr = 0;
602     memcpy(&adapter->mac_addr, mac_addr_p, 6);
603    
604     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
605     index 978af21..dd037dd 100644
606     --- a/drivers/net/ethernet/realtek/r8169.c
607     +++ b/drivers/net/ethernet/realtek/r8169.c
608     @@ -5168,6 +5168,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
609     goto err_stop_0;
610     }
611    
612     + /* 8168evl does not automatically pad to minimum length. */
613     + if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
614     + skb->len < ETH_ZLEN)) {
615     + if (skb_padto(skb, ETH_ZLEN))
616     + goto err_update_stats;
617     + skb_put(skb, ETH_ZLEN - skb->len);
618     + }
619     +
620     if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
621     goto err_stop_0;
622    
623     @@ -5239,6 +5247,7 @@ err_dma_1:
624     rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
625     err_dma_0:
626     dev_kfree_skb(skb);
627     +err_update_stats:
628     dev->stats.tx_dropped++;
629     return NETDEV_TX_OK;
630    
631     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
632     index a3c9374..843b3e8 100644
633     --- a/drivers/usb/host/xhci-ring.c
634     +++ b/drivers/usb/host/xhci-ring.c
635     @@ -2459,14 +2459,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
636     * TD list.
637     */
638     if (list_empty(&ep_ring->td_list)) {
639     - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
640     - "with no TDs queued?\n",
641     - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
642     - ep_index);
643     - xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
644     - (le32_to_cpu(event->flags) &
645     - TRB_TYPE_BITMASK)>>10);
646     - xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
647     + /*
648     + * A stopped endpoint may generate an extra completion
649     + * event if the device was suspended. Don't print
650     + * warnings.
651     + */
652     + if (!(trb_comp_code == COMP_STOP ||
653     + trb_comp_code == COMP_STOP_INVAL)) {
654     + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
655     + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
656     + ep_index);
657     + xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
658     + (le32_to_cpu(event->flags) &
659     + TRB_TYPE_BITMASK)>>10);
660     + xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
661     + }
662     if (ep->skip) {
663     ep->skip = false;
664     xhci_dbg(xhci, "td_list is empty while skip "
665     diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
666     index 1feb68e..b1cdb0a 100644
667     --- a/fs/autofs4/expire.c
668     +++ b/fs/autofs4/expire.c
669     @@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
670     /* This is an autofs submount, we can't expire it */
671     if (autofs_type_indirect(sbi->type))
672     goto done;
673     -
674     - /*
675     - * Otherwise it's an offset mount and we need to check
676     - * if we can umount its mount, if there is one.
677     - */
678     - if (!d_mountpoint(path.dentry)) {
679     - status = 0;
680     - goto done;
681     - }
682     }
683    
684     /* Update the expiry counter if fs is busy */
685     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
686     index 9202b22..86a8c88 100644
687     --- a/fs/ext4/resize.c
688     +++ b/fs/ext4/resize.c
689     @@ -1630,6 +1630,10 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
690     return 0;
691    
692     ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
693     + if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
694     + ext4_warning(sb, "resize would cause inodes_count overflow");
695     + return -EINVAL;
696     + }
697     ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
698    
699     n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
700     diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
701     index d9928c1..1a13caa 100644
702     --- a/include/drm/drm_pciids.h
703     +++ b/include/drm/drm_pciids.h
704     @@ -231,6 +231,7 @@
705     {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
706     {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
707     {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
708     + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
709     {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
710     {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
711     {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
712     @@ -238,11 +239,13 @@
713     {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
714     {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
715     {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
716     + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
717     {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
718     {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
719     {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
720     {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
721     {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
722     + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
723     {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
724     {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
725     {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
726     @@ -594,6 +597,8 @@
727     {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
728     {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
729     {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
730     + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
731     + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
732     {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
733     {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
734     {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
735     diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
736     index 31fdc48..0caf1f8 100644
737     --- a/kernel/audit_tree.c
738     +++ b/kernel/audit_tree.c
739     @@ -608,9 +608,9 @@ void audit_trim_trees(void)
740     }
741     spin_unlock(&hash_lock);
742     trim_marked(tree);
743     - put_tree(tree);
744     drop_collected_mounts(root_mnt);
745     skip_it:
746     + put_tree(tree);
747     mutex_lock(&audit_filter_mutex);
748     }
749     list_del(&cursor);
750     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
751     index b29ebd3..75c11bf 100644
752     --- a/kernel/trace/trace.c
753     +++ b/kernel/trace/trace.c
754     @@ -4855,36 +4855,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
755     iter->cpu_file = TRACE_PIPE_ALL_CPU;
756     }
757    
758     -static void
759     -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
760     +void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
761     {
762     - static arch_spinlock_t ftrace_dump_lock =
763     - (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
764     /* use static because iter can be a bit big for the stack */
765     static struct trace_iterator iter;
766     + static atomic_t dump_running;
767     unsigned int old_userobj;
768     - static int dump_ran;
769     unsigned long flags;
770     int cnt = 0, cpu;
771    
772     - /* only one dump */
773     - local_irq_save(flags);
774     - arch_spin_lock(&ftrace_dump_lock);
775     - if (dump_ran)
776     - goto out;
777     -
778     - dump_ran = 1;
779     + /* Only allow one dump user at a time. */
780     + if (atomic_inc_return(&dump_running) != 1) {
781     + atomic_dec(&dump_running);
782     + return;
783     + }
784    
785     + /*
786     + * Always turn off tracing when we dump.
787     + * We don't need to show trace output of what happens
788     + * between multiple crashes.
789     + *
790     + * If the user does a sysrq-z, then they can re-enable
791     + * tracing with echo 1 > tracing_on.
792     + */
793     tracing_off();
794    
795     - /* Did function tracer already get disabled? */
796     - if (ftrace_is_dead()) {
797     - printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
798     - printk("# MAY BE MISSING FUNCTION EVENTS\n");
799     - }
800     -
801     - if (disable_tracing)
802     - ftrace_kill();
803     + local_irq_save(flags);
804    
805     trace_init_global_iter(&iter);
806    
807     @@ -4917,6 +4913,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
808    
809     printk(KERN_TRACE "Dumping ftrace buffer:\n");
810    
811     + /* Did function tracer already get disabled? */
812     + if (ftrace_is_dead()) {
813     + printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
814     + printk("# MAY BE MISSING FUNCTION EVENTS\n");
815     + }
816     +
817     /*
818     * We need to stop all tracing on all CPUS to read the
819     * the next buffer. This is a bit expensive, but is
820     @@ -4956,26 +4958,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
821     printk(KERN_TRACE "---------------------------------\n");
822    
823     out_enable:
824     - /* Re-enable tracing if requested */
825     - if (!disable_tracing) {
826     - trace_flags |= old_userobj;
827     + trace_flags |= old_userobj;
828    
829     - for_each_tracing_cpu(cpu) {
830     - atomic_dec(&iter.tr->data[cpu]->disabled);
831     - }
832     - tracing_on();
833     + for_each_tracing_cpu(cpu) {
834     + atomic_dec(&iter.tr->data[cpu]->disabled);
835     }
836     -
837     - out:
838     - arch_spin_unlock(&ftrace_dump_lock);
839     + atomic_dec(&dump_running);
840     local_irq_restore(flags);
841     }
842     -
843     -/* By default: disable tracing after the dump */
844     -void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
845     -{
846     - __ftrace_dump(true, oops_dump_mode);
847     -}
848     EXPORT_SYMBOL_GPL(ftrace_dump);
849    
850     __init static int tracer_alloc_buffers(void)
851     diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
852     index 288541f..09fd98a 100644
853     --- a/kernel/trace/trace_selftest.c
854     +++ b/kernel/trace/trace_selftest.c
855     @@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
856     /* Maximum number of functions to trace before diagnosing a hang */
857     #define GRAPH_MAX_FUNC_TEST 100000000
858    
859     -static void
860     -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
861     static unsigned int graph_hang_thresh;
862    
863     /* Wrap the real function entry probe to avoid possible hanging */
864     @@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
865     if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
866     ftrace_graph_stop();
867     printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
868     - if (ftrace_dump_on_oops)
869     - __ftrace_dump(false, DUMP_ALL);
870     + if (ftrace_dump_on_oops) {
871     + ftrace_dump(DUMP_ALL);
872     + /* ftrace_dump() disables tracing */
873     + tracing_on();
874     + }
875     return 0;
876     }
877    
878     diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
879     index 1aa5cac..55add93 100644
880     --- a/net/netfilter/ipvs/ip_vs_pe_sip.c
881     +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
882     @@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
883     if (ret > 0)
884     break;
885     if (!ret)
886     - return 0;
887     + return -EINVAL;
888     dataoff += *matchoff;
889     }
890    
891     - /* Empty callid is useless */
892     - if (!*matchlen)
893     - return -EINVAL;
894     -
895     /* Too large is useless */
896     if (*matchlen > IP_VS_PEDATA_MAXLEN)
897     return -EINVAL;