Magellan Linux

Contents of /trunk/ati-drivers/patches/fglrx-3.9.0-regparm.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 144 - (show annotations) (download)
Tue May 8 20:06:05 2007 UTC (16 years, 11 months ago) by niro
File size: 58125 byte(s)
-import

1 diff -u --recursive build_mod.old/firegl_public.c build_mod/firegl_public.c
2 --- build_mod.old/firegl_public.c 2004-05-28 17:37:27.247262000 +0300
3 +++ build_mod/firegl_public.c 2004-05-28 17:45:56.029817228 +0300
4 @@ -238,15 +238,24 @@
5
6 // ============================================================
7 /* global structures */
8 +int ip_firegl_open(struct inode* inode, struct file* filp)
9 +{ return firegl_open(inode, filp); }
10 +int ip_firegl_release(struct inode* inode, struct file* filp)
11 +{ return firegl_release(inode, filp); }
12 +int ip_firegl_ioctl(struct inode* inode, struct file* filp, unsigned int cmd, unsigned long arg)
13 +{ return firegl_ioctl(inode, filp, cmd, arg); }
14 +int ip_firegl_mmap(struct file* filp, struct vm_area_struct* vma)
15 +{ return firegl_mmap(filp, vma); }
16 +
17 static struct file_operations firegl_fops =
18 {
19 #ifdef THIS_MODULE
20 owner: THIS_MODULE,
21 #endif
22 - open: firegl_open,
23 - release: firegl_release,
24 - ioctl: firegl_ioctl,
25 - mmap: firegl_mmap,
26 + open: ip_firegl_open,
27 + release: ip_firegl_release,
28 + ioctl: ip_firegl_ioctl,
29 + mmap: ip_firegl_mmap,
30 };
31
32 typedef struct {
33 @@ -674,7 +683,7 @@
34 typedef wait_queue_t* wait_queue_head_t;
35 #endif
36
37 -__ke_wait_queue_head_t* __ke_alloc_wait_queue_head_struct(void)
38 +__ke_wait_queue_head_t* ATI_API_CALL __ke_alloc_wait_queue_head_struct(void)
39 {
40 __ke_wait_queue_head_t* queue_head;
41 queue_head = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
42 @@ -691,13 +700,13 @@
43 return queue_head;
44 }
45
46 -void __ke_free_wait_queue_head_struct(__ke_wait_queue_head_t* queue_head)
47 +void ATI_API_CALL __ke_free_wait_queue_head_struct(__ke_wait_queue_head_t* queue_head)
48 {
49 if (queue_head)
50 kfree(queue_head);
51 }
52
53 -__ke_wait_queue_t* __ke_alloc_wait_queue_struct(void)
54 +__ke_wait_queue_t* ATI_API_CALL __ke_alloc_wait_queue_struct(void)
55 {
56 __ke_wait_queue_t* queue;
57 queue = kmalloc(sizeof(wait_queue_t), GFP_KERNEL);
58 @@ -705,18 +714,18 @@
59 return queue;
60 }
61
62 -void __ke_free_wait_queue_struct(__ke_wait_queue_t* queue)
63 +void ATI_API_CALL __ke_free_wait_queue_struct(__ke_wait_queue_t* queue)
64 {
65 if (queue)
66 kfree(queue);
67 }
68
69 -void __ke_wake_up_interruptible(__ke_wait_queue_head_t* queue_head)
70 +void ATI_API_CALL __ke_wake_up_interruptible(__ke_wait_queue_head_t* queue_head)
71 {
72 wake_up_interruptible((wait_queue_head_t*)(void *)queue_head);
73 }
74
75 -void __ke_add_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry)
76 +void ATI_API_CALL __ke_add_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry)
77 {
78 // initialisation (delayed)
79 #ifdef __WAITQUEUE_INITIALIZER
80 @@ -744,7 +753,7 @@
81 add_wait_queue((wait_queue_head_t*)(void *)queue_head, (wait_queue_t*)(void *)entry);
82 }
83
84 -void __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry)
85 +void ATI_API_CALL __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry)
86 {
87 // current->state = TASK_RUNNING;
88 remove_wait_queue((wait_queue_head_t*)(void *)queue_head,
89 @@ -752,27 +761,27 @@
90 }
91
92 // sheduler
93 -void __ke_schedule(void)
94 +void ATI_API_CALL __ke_schedule(void)
95 {
96 schedule();
97 }
98
99 -int __ke_signal_pending(void)
100 +int ATI_API_CALL __ke_signal_pending(void)
101 {
102 return signal_pending(current);
103 }
104
105 -void __ke_set_current_state_task_interruptible(void)
106 +void ATI_API_CALL __ke_set_current_state_task_interruptible(void)
107 {
108 current->state = TASK_INTERRUPTIBLE;
109 }
110
111 -void __ke_set_current_state_task_running(void)
112 +void ATI_API_CALL __ke_set_current_state_task_running(void)
113 {
114 current->state = TASK_RUNNING;
115 }
116
117 -void __ke_configure_sigmask(__ke_sigset_t *pSigMask)
118 +void ATI_API_CALL __ke_configure_sigmask(__ke_sigset_t *pSigMask)
119 {
120 sigemptyset((sigset_t*)(void *)pSigMask);
121 sigaddset((sigset_t*)(void *)pSigMask, SIGSTOP);
122 @@ -781,14 +790,14 @@
123 sigaddset((sigset_t*)(void *)pSigMask, SIGTTOU);
124 }
125
126 -void __ke_block_all_signals(int (*notifier)(void *priv), void *pPriv, __ke_sigset_t *pSigMask)
127 +void ATI_API_CALL __ke_block_all_signals(int (*ATI_API_CALL notifier)(void *priv), void *pPriv, __ke_sigset_t *pSigMask)
128 {
129 #if LINUX_VERSION_CODE >= 0x020400
130 block_all_signals(notifier,pPriv,(sigset_t*)(void *)pSigMask);
131 #endif
132 }
133
134 -void __ke_unblock_all_signals(void)
135 +void ATI_API_CALL __ke_unblock_all_signals(void)
136 {
137 #if LINUX_VERSION_CODE >= 0x020400
138 unblock_all_signals();
139 @@ -830,7 +839,7 @@
140 #endif
141
142 #if !defined(__ia64__)
143 -unsigned long __ke__cmpxchg(volatile void *ptr, unsigned long old,
144 +unsigned long ATI_API_CALL __ke__cmpxchg(volatile void *ptr, unsigned long old,
145 unsigned long new, int size)
146 {
147 #ifndef __HAVE_ARCH_CMPXCHG
148 @@ -843,19 +852,19 @@
149
150 /*****************************************************************************/
151
152 -__ke_dev_t __ke_getdevice(__ke_device_t *dev)
153 +__ke_dev_t ATI_API_CALL __ke_getdevice(__ke_device_t *dev)
154 {
155 return ((device_t*)dev)->device;
156 }
157
158 -const char* __ke_module_parm(void)
159 +const char* ATI_API_CALL __ke_module_parm(void)
160 {
161 return firegl;
162 }
163
164 /*****************************************************************************/
165
166 -int __ke_inode_rdev_minor(struct inode* inode)
167 +int ATI_API_CALL __ke_inode_rdev_minor(struct inode* inode)
168 {
169 #ifndef MINOR
170 return minor(inode->i_rdev);
171 @@ -866,27 +875,27 @@
172
173 /*****************************************************************************/
174
175 -void* __ke_get_file_priv(struct file* filp)
176 +void* ATI_API_CALL __ke_get_file_priv(struct file* filp)
177 {
178 return filp->private_data;
179 }
180
181 -void __ke_set_file_priv(struct file* filp, void* private_data)
182 +void ATI_API_CALL __ke_set_file_priv(struct file* filp, void* private_data)
183 {
184 filp->private_data = private_data;
185 }
186
187 -int __ke_file_excl_open(struct file* filp)
188 +int ATI_API_CALL __ke_file_excl_open(struct file* filp)
189 {
190 return (filp->f_flags & O_EXCL) != 0;
191 }
192
193 -int __ke_file_rw_open(struct file* filp)
194 +int ATI_API_CALL __ke_file_rw_open(struct file* filp)
195 {
196 return (filp->f_flags & 3) != 0;
197 }
198
199 -unsigned int __ke_file_counter(struct file *filp)
200 +unsigned int ATI_API_CALL __ke_file_counter(struct file *filp)
201 {
202 #if LINUX_VERSION_CODE >= 0x020400
203 return filp->f_count.counter;
204 @@ -897,24 +906,24 @@
205
206 /*****************************************************************************/
207
208 -int __ke_getpid(void)
209 +int ATI_API_CALL __ke_getpid(void)
210 {
211 return current->pid;
212 }
213
214 -int __ke_geteuid(void)
215 +int ATI_API_CALL __ke_geteuid(void)
216 {
217 return current->euid;
218 }
219
220 /*****************************************************************************/
221
222 -unsigned long __ke_jiffies(void)
223 +unsigned long ATI_API_CALL __ke_jiffies(void)
224 {
225 return jiffies;
226 }
227
228 -void __ke_udelay(unsigned long usecs) // delay in usec
229 +void ATI_API_CALL __ke_udelay(unsigned long usecs) // delay in usec
230 {
231 unsigned long start;
232 unsigned long stop;
233 @@ -950,7 +959,7 @@
234 udelay(usecs); /* delay value might get checked once again */
235 }
236
237 -void __ke_mdelay(unsigned long msecs) // delay in msec
238 +void ATI_API_CALL __ke_mdelay(unsigned long msecs) // delay in msec
239 {
240 mdelay(msecs);
241 }
242 @@ -958,33 +967,33 @@
243 /*****************************************************************************/
244 // TODO: These here get obsolete in future, use the ia64 code below
245 // Johannes
246 -unsigned long __ke_virt_to_bus(void* address)
247 +unsigned long ATI_API_CALL __ke_virt_to_bus(void* address)
248 {
249 return virt_to_bus(address);
250 }
251
252 -unsigned long __ke_virt_to_phys(void* address)
253 +unsigned long ATI_API_CALL __ke_virt_to_phys(void* address)
254 {
255 return virt_to_phys(address);
256 }
257
258 -void* __ke_high_memory(void)
259 +void* ATI_API_CALL __ke_high_memory(void)
260 {
261 return high_memory;
262 }
263
264 -int __ke_pci_enable_device(__ke_pci_dev_t* dev)
265 +int ATI_API_CALL __ke_pci_enable_device(__ke_pci_dev_t* dev)
266 {
267 return (pci_enable_device( (struct pci_dev*)(void *)dev ));
268 }
269
270 #if defined(__x86_64__) || defined(__ia64__)
271 -void* __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle)
272 +void* ATI_API_CALL __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle)
273 {
274 return (pci_alloc_consistent( (struct pci_dev*)(void *)dev, size, dma_handle));
275 }
276
277 -void __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
278 +void ATI_API_CALL __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
279 unsigned int dma_handle)
280 {
281 pci_free_consistent( (struct pci_dev*)(void *)dev, size, (void *)cpu_addr,
282 @@ -994,7 +1003,7 @@
283
284 /*****************************************************************************/
285
286 -int __ke_error_code(enum __ke_error_num errcode)
287 +int ATI_API_CALL __ke_error_code(enum __ke_error_num errcode)
288 {
289 switch (errcode)
290 {
291 @@ -1029,7 +1038,7 @@
292
293 /*****************************************************************************/
294
295 -void __ke_mod_inc_use_count(void)
296 +void ATI_API_CALL __ke_mod_inc_use_count(void)
297 {
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
299 __module_get(THIS_MODULE);
300 @@ -1038,7 +1047,7 @@
301 #endif
302 }
303
304 -void __ke_mod_dec_use_count(void)
305 +void ATI_API_CALL __ke_mod_dec_use_count(void)
306 {
307 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
308 module_put(THIS_MODULE);
309 @@ -1049,86 +1058,86 @@
310
311 /*****************************************************************************/
312
313 -void __ke_down_struct_sem(__ke_device_t *dev, int index)
314 +void ATI_API_CALL __ke_down_struct_sem(__ke_device_t *dev, int index)
315 {
316 down(&(((device_t*)dev)->struct_sem[index]));
317 }
318
319 -void __ke_up_struct_sem(__ke_device_t *dev, int index)
320 +void ATI_API_CALL __ke_up_struct_sem(__ke_device_t *dev, int index)
321 {
322 up(&(((device_t*)dev)->struct_sem[index]));
323 }
324
325 -void __ke_sema_init(struct semaphore* sem, int value)
326 +void ATI_API_CALL __ke_sema_init(struct semaphore* sem, int value)
327 {
328 sema_init(sem, value);
329 }
330
331 -__ke_size_t __ke_sema_size(void)
332 +__ke_size_t ATI_API_CALL __ke_sema_size(void)
333 {
334 return sizeof(struct semaphore);
335 }
336
337 -void __ke_down(struct semaphore* sem)
338 +void ATI_API_CALL __ke_down(struct semaphore* sem)
339 {
340 down(sem);
341 }
342
343 -void __ke_up(struct semaphore* sem)
344 +void ATI_API_CALL __ke_up(struct semaphore* sem)
345 {
346 up(sem);
347 }
348
349 /*****************************************************************************/
350
351 -void __ke_atomic_inc(void* v)
352 +void ATI_API_CALL __ke_atomic_inc(void* v)
353 {
354 atomic_inc((atomic_t*)v);
355 }
356
357 -void __ke_atomic_dec(void* v)
358 +void ATI_API_CALL __ke_atomic_dec(void* v)
359 {
360 atomic_dec((atomic_t*)v);
361 }
362
363 -void __ke_atomic_add(int val, void* v)
364 +void ATI_API_CALL __ke_atomic_add(int val, void* v)
365 {
366 atomic_add(val, (atomic_t*)v);
367 }
368
369 -void __ke_atomic_sub(int val, void* v)
370 +void ATI_API_CALL __ke_atomic_sub(int val, void* v)
371 {
372 atomic_sub(val, (atomic_t*)v);
373 }
374
375 -int __ke_atomic_read(void* v)
376 +int ATI_API_CALL __ke_atomic_read(void* v)
377 {
378 return atomic_read((atomic_t*)v);
379 }
380
381 -void __ke_atomic_set(void* v, int val)
382 +void ATI_API_CALL __ke_atomic_set(void* v, int val)
383 {
384 atomic_set((atomic_t*)v, val);
385 }
386
387 /*****************************************************************************/
388
389 -void __ke_spin_lock(__ke_device_t *dev, int ndx)
390 +void ATI_API_CALL __ke_spin_lock(__ke_device_t *dev, int ndx)
391 {
392 spin_lock(&(((device_t*)dev)->spinlock[ndx]));
393 }
394
395 -void __ke_spin_unlock(__ke_device_t *dev __attribute__((unused)), int ndx __attribute__((unused)))
396 +void ATI_API_CALL __ke_spin_unlock(__ke_device_t *dev __attribute__((unused)), int ndx __attribute__((unused)))
397 {
398 spin_unlock(&(((device_t*)dev)->spinlock[ndx]));
399 }
400
401 -void __ke_lock_kernel(void)
402 +void ATI_API_CALL __ke_lock_kernel(void)
403 {
404 lock_kernel();
405 }
406
407 -void __ke_unlock_kernel(void)
408 +void ATI_API_CALL __ke_unlock_kernel(void)
409 {
410 unlock_kernel();
411 }
412 @@ -1143,7 +1152,7 @@
413 typedef int (*PFNMUNLOCK)(unsigned long start, __ke_size_t len);
414
415
416 -int __ke_sys_mlock(unsigned long start, __ke_size_t len)
417 +int ATI_API_CALL __ke_sys_mlock(unsigned long start, __ke_size_t len)
418 {
419 #ifdef FGL_USE_SCT
420 PFNMLOCK sys_mlock = (PFNMLOCK)sys_call_table[__NR_mlock];
421 @@ -1158,7 +1167,7 @@
422 #endif
423 }
424
425 -int __ke_sys_munlock(unsigned long start, __ke_size_t len)
426 +int ATI_API_CALL __ke_sys_munlock(unsigned long start, __ke_size_t len)
427 {
428 #ifdef FGL_USE_SCT
429 PFNMUNLOCK sys_munlock = (PFNMUNLOCK)sys_call_table[__NR_munlock];
430 @@ -1176,7 +1185,7 @@
431
432 typedef int (*PFNMODIFYLDT)(int func, void *ptr, unsigned long bytecount);
433
434 -int __ke_sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
435 +int ATI_API_CALL __ke_sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
436 {
437 #ifdef FGL_USE_SCT
438 PFNMODIFYLDT sys_modify_ldt = (PFNMODIFYLDT)sys_call_table[__NR_modify_ldt];
439 @@ -1201,7 +1210,7 @@
440 #ifdef __KE_NO_VSPRINTF
441
442 #if LINUX_VERSION_CODE >= 0x020400
443 -void __ke_printk(const char* fmt, ...)
444 +void ATI_API_CALL __ke_printk(const char* fmt, ...)
445 {
446 char buffer[256];
447 va_list marker;
448 @@ -1232,7 +1241,7 @@
449
450 #else
451
452 -void __ke_print_info(const char* fmt, ...)
453 +void ATI_API_CALL __ke_print_info(const char* fmt, ...)
454 {
455 char msg[256] = KERN_INFO;:
456 va_list marker;
457 @@ -1242,7 +1251,7 @@
458 va_end(marker);
459 }
460
461 -void __ke_print_error(const char* fmt, ...)
462 +void ATI_API_CALL __ke_print_error(const char* fmt, ...)
463 {
464 char msg[256] = KERN_ERR;
465 va_list marker;
466 @@ -1252,7 +1261,7 @@
467 va_end(marker);
468 }
469
470 -void __ke_print_debug(const char* fmt, ...)
471 +void ATI_API_CALL __ke_print_debug(const char* fmt, ...)
472 {
473 char msg[256] = KERN_DEBUG;
474 va_list marker;
475 @@ -1266,7 +1275,7 @@
476
477 /*****************************************************************************/
478
479 -int __ke_capable(enum __ke_cap cap)
480 +int ATI_API_CALL __ke_capable(enum __ke_cap cap)
481 {
482 switch (cap)
483 {
484 @@ -1282,7 +1291,7 @@
485 return capable(cap);
486 }
487
488 -void __ke_cap_effective_raise(enum __ke_cap cap)
489 +void ATI_API_CALL __ke_cap_effective_raise(enum __ke_cap cap)
490 {
491 switch (cap)
492 {
493 @@ -1298,17 +1307,17 @@
494 cap_raise(current->cap_effective, cap);
495 }
496
497 -__ke_u32 __ke_get_cap_effective()
498 +__ke_u32 ATI_API_CALL __ke_get_cap_effective()
499 {
500 return cap_t(current->cap_effective);
501 }
502
503 -void __ke_set_cap_effective(__ke_u32 cap)
504 +void ATI_API_CALL __ke_set_cap_effective(__ke_u32 cap)
505 {
506 cap_t(current->cap_effective) = cap;
507 }
508
509 -unsigned long __ke_ram_available(void)
510 +unsigned long ATI_API_CALL __ke_ram_available(void)
511 {
512 struct sysinfo si;
513
514 @@ -1321,22 +1330,22 @@
515 #endif
516 }
517
518 -int __ke_copy_from_user(void* to, const void* from, __ke_size_t size)
519 +int ATI_API_CALL __ke_copy_from_user(void* to, const void* from, __ke_size_t size)
520 {
521 return copy_from_user(to, from, size);
522 }
523
524 -int __ke_copy_to_user(void* to, const void* from, __ke_size_t size)
525 +int ATI_API_CALL __ke_copy_to_user(void* to, const void* from, __ke_size_t size)
526 {
527 return copy_to_user(to, from, size);
528 }
529
530 -int __ke_verify_area(int type, const void * addr, unsigned long size)
531 +int ATI_API_CALL __ke_verify_area(int type, const void * addr, unsigned long size)
532 {
533 return verify_area(type, addr, size);
534 }
535
536 -int __ke_get_pci_device_info(__ke_pci_dev_t* dev, __ke_pci_device_info_t *pinfo)
537 +int ATI_API_CALL __ke_get_pci_device_info(__ke_pci_dev_t* dev, __ke_pci_device_info_t *pinfo)
538 {
539 if ( dev )
540 {
541 @@ -1348,7 +1357,7 @@
542 return -EINVAL;
543 }
544
545 -int __ke_check_pci(int busnum, int devnum, int funcnum, __ke_u16* vendor, __ke_u16* device, unsigned int* irq)
546 +int ATI_API_CALL __ke_check_pci(int busnum, int devnum, int funcnum, __ke_u16* vendor, __ke_u16* device, unsigned int* irq)
547 {
548 struct pci_dev* pci_dev;
549
550 @@ -1368,7 +1377,7 @@
551 return 1;
552 }
553
554 -int __ke_pci_get_irq(__ke_pci_dev_t *dev, unsigned int* irq)
555 +int ATI_API_CALL __ke_pci_get_irq(__ke_pci_dev_t *dev, unsigned int* irq)
556 {
557 if (!dev)
558 return 0;
559 @@ -1379,52 +1388,52 @@
560 return 1;
561 }
562
563 -__ke_pci_dev_t* __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from)
564 +__ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from)
565 {
566 return (__ke_pci_dev_t*)pci_find_device( vendor, dev, (struct pci_dev *)(void *)from );
567 }
568
569 -void* __ke_malloc(__ke_size_t size)
570 +void* ATI_API_CALL __ke_malloc(__ke_size_t size)
571 {
572 return kmalloc(size, GFP_KERNEL);
573 }
574
575 -void __ke_free_s(void* p, __ke_size_t size)
576 +void ATI_API_CALL __ke_free_s(void* p, __ke_size_t size)
577 {
578 kfree(p);
579 }
580
581 -void* __ke_vmalloc(__ke_size_t size)
582 +void* ATI_API_CALL __ke_vmalloc(__ke_size_t size)
583 {
584 return vmalloc(size);
585 }
586
587 -void __ke_vfree(void* p)
588 +void ATI_API_CALL __ke_vfree(void* p)
589 {
590 return vfree(p);
591 }
592
593 -void* __ke_get_free_page(void)
594 +void* ATI_API_CALL __ke_get_free_page(void)
595 {
596 return (void*)__get_free_page(GFP_KERNEL);
597 }
598
599 -void* __ke_get_free_pages(int order)
600 +void* ATI_API_CALL __ke_get_free_pages(int order)
601 {
602 return (void*)__get_free_pages(GFP_KERNEL, order);
603 }
604
605 -void __ke_free_page(void* pt)
606 +void ATI_API_CALL __ke_free_page(void* pt)
607 {
608 free_page((unsigned long)pt);
609 }
610
611 -void __ke_free_pages(void* pt, int order)
612 +void ATI_API_CALL __ke_free_pages(void* pt, int order)
613 {
614 free_pages((unsigned long)pt, order);
615 }
616
617 -void __ke_mem_map_reserve(void* pt)
618 +void ATI_API_CALL __ke_mem_map_reserve(void* pt)
619 {
620 #if LINUX_VERSION_CODE < 0x020400
621 mem_map_reserve(MAP_NR((unsigned long)pt));
622 @@ -1437,7 +1446,7 @@
623 #endif
624 }
625
626 -void __ke_mem_map_unreserve(void* pt)
627 +void ATI_API_CALL __ke_mem_map_unreserve(void* pt)
628 {
629 #if LINUX_VERSION_CODE < 0x020400
630 mem_map_unreserve(MAP_NR((unsigned long)pt));
631 @@ -1450,7 +1459,7 @@
632 #endif
633 }
634
635 -void __ke_virt_reserve(void* virt)
636 +void ATI_API_CALL __ke_virt_reserve(void* virt)
637 {
638 #if LINUX_VERSION_CODE < 0x020400
639 set_bit(PG_reserved,
640 @@ -1461,7 +1470,7 @@
641 #endif
642 }
643
644 -void __ke_virt_unreserve(void* virt)
645 +void ATI_API_CALL __ke_virt_unreserve(void* virt)
646 {
647 #if LINUX_VERSION_CODE < 0x020400
648 clear_bit(PG_reserved,
649 @@ -1473,48 +1482,48 @@
650 }
651
652 #ifdef __ia64__
653 -void* __ke_get_vmptr( struct _agp_memory* memory )
654 +void* ATI_API_CALL __ke_get_vmptr( struct _agp_memory* memory )
655 {
656 return memory->vmptr;
657 }
658 #endif
659
660 -void* __ke_ioremap(unsigned long offset, unsigned long size)
661 +void* ATI_API_CALL __ke_ioremap(unsigned long offset, unsigned long size)
662 {
663 return ioremap(offset, size);
664 }
665
666 -void* __ke_ioremap_nocache(unsigned long offset, unsigned long size)
667 +void* ATI_API_CALL __ke_ioremap_nocache(unsigned long offset, unsigned long size)
668 {
669 return ioremap_nocache(offset, size);
670 }
671
672 -void __ke_iounmap(void* pt)
673 +void ATI_API_CALL __ke_iounmap(void* pt)
674 {
675 iounmap(pt);
676 }
677
678 -int __ke_verify_read_access(void* addr, __ke_size_t size)
679 +int ATI_API_CALL __ke_verify_read_access(void* addr, __ke_size_t size)
680 {
681 return access_ok(VERIFY_READ, addr, size) ? 0 : -EFAULT;
682 }
683
684 -int __ke_verify_write_access(void* addr, __ke_size_t size)
685 +int ATI_API_CALL __ke_verify_write_access(void* addr, __ke_size_t size)
686 {
687 return access_ok(VERIFY_WRITE, addr, size) ? 0 : -EFAULT;
688 }
689
690 -struct mm_struct* __ke_init_mm(void)
691 +struct mm_struct* ATI_API_CALL __ke_init_mm(void)
692 {
693 return &init_mm;
694 }
695
696 -struct mm_struct* __ke_current_mm(void)
697 +struct mm_struct* ATI_API_CALL __ke_current_mm(void)
698 {
699 return current->mm;
700 }
701
702 -unsigned long __ke_get_vm_phys_addr(struct mm_struct* mm, unsigned long virtual_addr)
703 +unsigned long ATI_API_CALL __ke_get_vm_phys_addr(struct mm_struct* mm, unsigned long virtual_addr)
704 {
705 unsigned long pte_linear;
706 pgd_t* pgd_p;
707 @@ -1549,7 +1558,7 @@
708 #endif /* LINUX_VERSION_CODE < 0x020400 */
709 }
710
711 -unsigned long* __ke_get_vm_phys_addr_list(struct mm_struct* mm, unsigned long virtual_addr, unsigned long pages)
712 +unsigned long* ATI_API_CALL __ke_get_vm_phys_addr_list(struct mm_struct* mm, unsigned long virtual_addr, unsigned long pages)
713 {
714 unsigned long *phys_table, *pt;
715 unsigned long n, va;
716 @@ -1571,42 +1580,42 @@
717 return phys_table;
718 }
719
720 -void* __ke_memset(void* s, int c, __ke_size_t count)
721 +void* ATI_API_CALL __ke_memset(void* s, int c, __ke_size_t count)
722 {
723 return memset(s, c, count);
724 }
725
726 -void* __ke_memcpy(void* d, const void* s, __ke_size_t count)
727 +void* ATI_API_CALL __ke_memcpy(void* d, const void* s, __ke_size_t count)
728 {
729 return memcpy(d, s, count);
730 }
731
732 -__ke_size_t __ke_strlen(const char *s)
733 +__ke_size_t ATI_API_CALL __ke_strlen(const char *s)
734 {
735 return strlen(s);
736 }
737
738 -char* __ke_strcpy(char* d, const char* s)
739 +char* ATI_API_CALL __ke_strcpy(char* d, const char* s)
740 {
741 return strcpy(d, s);
742 }
743
744 -char* __ke_strncpy(char* d, const char* s, __ke_size_t count)
745 +char* ATI_API_CALL __ke_strncpy(char* d, const char* s, __ke_size_t count)
746 {
747 return strncpy(d, s, count);
748 }
749
750 -int __ke_strcmp(const char* string1, const char* string2)
751 +int ATI_API_CALL __ke_strcmp(const char* string1, const char* string2)
752 {
753 return strcmp(string1, string2);
754 }
755
756 -int __ke_strncmp(const char* string1, const char* string2, __ke_size_t count)
757 +int ATI_API_CALL __ke_strncmp(const char* string1, const char* string2, __ke_size_t count)
758 {
759 return strncmp(string1, string2, count);
760 }
761
762 -int __ke_sprintf(char* buf, const char* fmt, ...)
763 +int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...)
764 {
765 va_list marker;
766
767 @@ -1619,12 +1628,12 @@
768
769 /*****************************************************************************/
770
771 -void __ke_set_bit(int nr, volatile void * addr)
772 +void ATI_API_CALL __ke_set_bit(int nr, volatile void * addr)
773 {
774 set_bit(nr, addr);
775 }
776
777 -void __ke_clear_bit(int nr, volatile void * addr)
778 +void ATI_API_CALL __ke_clear_bit(int nr, volatile void * addr)
779 {
780 clear_bit(nr, addr);
781 }
782 @@ -1649,7 +1658,7 @@
783 }
784 #endif /* __SMP__ */
785
786 -int __ke_flush_cache(void)
787 +int ATI_API_CALL __ke_flush_cache(void)
788 {
789 #ifdef __SMP__
790 #if LINUX_VERSION_CODE < 0x020501
791 @@ -1685,7 +1694,7 @@
792
793 /*****************************************************************************/
794
795 -int __ke_config_mtrr(void)
796 +int ATI_API_CALL __ke_config_mtrr(void)
797 {
798 #ifdef CONFIG_MTRR
799 return 1;
800 @@ -1694,7 +1703,7 @@
801 #endif /* !CONFIG_MTRR */
802 }
803
804 -int __ke_mtrr_add_wc(unsigned long base, unsigned long size)
805 +int ATI_API_CALL __ke_mtrr_add_wc(unsigned long base, unsigned long size)
806 {
807 #ifdef CONFIG_MTRR
808 return mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
809 @@ -1703,7 +1712,7 @@
810 #endif /* !CONFIG_MTRR */
811 }
812
813 -int __ke_mtrr_del(int reg, unsigned long base, unsigned long size)
814 +int ATI_API_CALL __ke_mtrr_del(int reg, unsigned long base, unsigned long size)
815 {
816 #ifdef CONFIG_MTRR
817 return mtrr_del(reg, base, size);
818 @@ -1714,64 +1723,64 @@
819
820 /*****************************************************************************/
821
822 -int __ke_pci_read_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 *val)
823 +int ATI_API_CALL __ke_pci_read_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 *val)
824 {
825 return pci_read_config_byte((struct pci_dev*)(void *)dev, where, val);
826 }
827
828 -int __ke_pci_read_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 *val)
829 +int ATI_API_CALL __ke_pci_read_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 *val)
830 {
831 return pci_read_config_word((struct pci_dev*)(void *)dev, where, val);
832 }
833
834 -int __ke_pci_read_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 *val)
835 +int ATI_API_CALL __ke_pci_read_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 *val)
836 {
837 return pci_read_config_dword((struct pci_dev*)(void *)dev, where, val);
838 }
839
840 -int __ke_pci_write_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 val)
841 +int ATI_API_CALL __ke_pci_write_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 val)
842 {
843 return pci_write_config_byte((struct pci_dev*)(void *)dev, where, val);
844 }
845
846 -int __ke_pci_write_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 val)
847 +int ATI_API_CALL __ke_pci_write_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 val)
848 {
849 return pci_write_config_word((struct pci_dev*)(void *)dev, where, val);
850 }
851
852 -int __ke_pci_write_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 val)
853 +int ATI_API_CALL __ke_pci_write_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 val)
854 {
855 return pci_write_config_dword((struct pci_dev*)(void *)dev, where, val);
856 }
857
858 /*****************************************************************************/
859
860 -void __ke_outb(unsigned char value, unsigned short port)
861 +void ATI_API_CALL __ke_outb(unsigned char value, unsigned short port)
862 {
863 outb(value, port);
864 }
865
866 -void __ke_outw(unsigned short value, unsigned short port)
867 +void ATI_API_CALL __ke_outw(unsigned short value, unsigned short port)
868 {
869 outw(value, port);
870 }
871
872 -void __ke_outl(unsigned int value, unsigned short port)
873 +void ATI_API_CALL __ke_outl(unsigned int value, unsigned short port)
874 {
875 outl(value, port);
876 }
877
878 -char __ke_inb(unsigned short port)
879 +char ATI_API_CALL __ke_inb(unsigned short port)
880 {
881 return inb(port);
882 }
883
884 -short __ke_inw(unsigned short port)
885 +short ATI_API_CALL __ke_inw(unsigned short port)
886 {
887 return inw(port);
888 }
889
890 -int __ke_inl(unsigned short port)
891 +int ATI_API_CALL __ke_inl(unsigned short port)
892 {
893 return inl(port);
894 }
895 @@ -1779,18 +1788,18 @@
896 /*****************************************************************************/
897 // Interrupt support
898
899 -void __ke_enable_irq(int irq)
900 +void ATI_API_CALL __ke_enable_irq(int irq)
901 {
902 enable_irq( irq );
903 }
904
905 -void __ke_disable_irq(int irq)
906 +void ATI_API_CALL __ke_disable_irq(int irq)
907 {
908 disable_irq( irq );
909 }
910
911 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71)
912 -int __ke_request_irq(unsigned int irq,
913 +int ATI_API_CALL __ke_request_irq(unsigned int irq,
914 void (*handler)(int, void *, void *),
915 const char *dev_name, void *dev_id)
916 {
917 @@ -1799,7 +1808,7 @@
918 SA_SHIRQ, dev_name, dev_id);
919 }
920
921 -void __ke_free_irq(unsigned int irq, void *dev_id)
922 +void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id)
923 {
924 free_irq(irq, dev_id);
925 }
926 @@ -1812,8 +1821,8 @@
927 return IRQ_HANDLED;
928 }
929
930 -int __ke_request_irq(unsigned int irq,
931 - void (*handler)(int, void *, void *),
932 +int ATI_API_CALL __ke_request_irq(unsigned int irq,
933 + void (*ATI_API_CALL handler)(int, void *, void *),
934 const char *dev_name, void *dev_id)
935 {
936 irq_handler_func = handler;
937 @@ -1822,7 +1831,7 @@
938 SA_SHIRQ, dev_name, dev_id);
939 }
940
941 -void __ke_free_irq(unsigned int irq, void *dev_id)
942 +void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id)
943 {
944 free_irq(irq, dev_id);
945 irq_handler_func = NULL;
946 @@ -2188,22 +2197,22 @@
947
948 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) */
949
950 -void* __ke_vma_file_priv(struct vm_area_struct* vma)
951 +void* ATI_API_CALL __ke_vma_file_priv(struct vm_area_struct* vma)
952 {
953 return vma->vm_file->private_data;
954 }
955
956 -unsigned long __ke_vm_start(struct vm_area_struct* vma)
957 +unsigned long ATI_API_CALL __ke_vm_start(struct vm_area_struct* vma)
958 {
959 return vma->vm_start;
960 }
961
962 -unsigned long __ke_vm_end(struct vm_area_struct* vma)
963 +unsigned long ATI_API_CALL __ke_vm_end(struct vm_area_struct* vma)
964 {
965 return vma->vm_end;
966 }
967
968 -unsigned long __ke_vm_offset(struct vm_area_struct* vma)
969 +unsigned long ATI_API_CALL __ke_vm_offset(struct vm_area_struct* vma)
970 {
971 #if LINUX_VERSION_CODE < 0x020319
972 return vma->vm_offset;
973 @@ -2212,7 +2221,7 @@
974 #endif/* LINUX_VERSION_CODE >= 0x020319 */
975 }
976
977 -char* __ke_vm_flags_str(struct vm_area_struct* vma, char* buf)
978 +char* ATI_API_CALL __ke_vm_flags_str(struct vm_area_struct* vma, char* buf)
979 {
980 *(buf + 0) = vma->vm_flags & VM_READ ? 'r' : '-';
981 *(buf + 1) = vma->vm_flags & VM_WRITE ? 'w' : '-';
982 @@ -2224,7 +2233,7 @@
983 return buf;
984 }
985
986 -char* __ke_vm_page_prot_str(struct vm_area_struct* vma, char* buf)
987 +char* ATI_API_CALL __ke_vm_page_prot_str(struct vm_area_struct* vma, char* buf)
988 {
989 int i = 0;
990
991 @@ -2251,7 +2260,7 @@
992 return buf;
993 }
994
995 -char* __ke_vm_phys_addr_str(struct vm_area_struct* vma,
996 +char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma,
997 char* buf,
998 unsigned long virtual_addr,
999 unsigned long* phys_address)
1000 @@ -2304,11 +2313,16 @@
1001 return buf;
1002 }
1003
1004 +void ip_drm_vm_open(struct vm_area_struct* vma)
1005 +{ drm_vm_open(vma); }
1006 +void ip_drm_vm_close(struct vm_area_struct* vma)
1007 +{ drm_vm_close(vma); }
1008 +
1009 static struct vm_operations_struct vm_ops =
1010 {
1011 nopage: vm_nopage,
1012 - open: drm_vm_open,
1013 - close: drm_vm_close,
1014 + open: ip_drm_vm_open,
1015 + close: ip_drm_vm_close,
1016 };
1017
1018 #ifdef __AGP__BUILTIN__
1019 @@ -2316,8 +2330,8 @@
1020 static struct vm_operations_struct vm_cant_ops =
1021 {
1022 nopage: vm_cant_nopage,
1023 - open: drm_vm_open,
1024 - close: drm_vm_close,
1025 + open: ip_drm_vm_open,
1026 + close: ip_drm_vm_close,
1027 };
1028 #endif /* __ia64_ */
1029 #endif /* __AGP__BUILTIN__ */
1030 @@ -2325,22 +2339,22 @@
1031 static struct vm_operations_struct vm_shm_ops =
1032 {
1033 nopage: vm_shm_nopage,
1034 - open: drm_vm_open,
1035 - close: drm_vm_close,
1036 + open: ip_drm_vm_open,
1037 + close: ip_drm_vm_close,
1038 };
1039
1040 static struct vm_operations_struct vm_pci_bq_ops =
1041 {
1042 nopage: vm_dma_nopage,
1043 - open: drm_vm_open,
1044 - close: drm_vm_close,
1045 + open: ip_drm_vm_open,
1046 + close: ip_drm_vm_close,
1047 };
1048
1049 static struct vm_operations_struct vm_ctx_ops =
1050 {
1051 nopage: vm_dma_nopage,
1052 - open: drm_vm_open,
1053 - close: drm_vm_close,
1054 + open: ip_drm_vm_open,
1055 + close: ip_drm_vm_close,
1056 };
1057
1058 #ifdef __AGP__BUILTIN__
1059 @@ -2348,20 +2362,20 @@
1060 static struct vm_operations_struct vm_agp_bq_ops =
1061 {
1062 nopage: vm_nopage,
1063 - open: drm_vm_open,
1064 - close: drm_vm_close,
1065 + open: ip_drm_vm_open,
1066 + close: ip_drm_vm_close,
1067 };
1068 #else
1069 static struct vm_operations_struct vm_cant_agp_bq_ops =
1070 {
1071 nopage: vm_cant_nopage,
1072 - open: drm_vm_open,
1073 - close: drm_vm_close,
1074 + open: ip_drm_vm_open,
1075 + close: ip_drm_vm_close,
1076 };
1077 #endif /* __ia64_ */
1078 #endif /* __AGP__BUILTIN__ */
1079
1080 -int __ke_vm_map(struct file* filp,
1081 +int ATI_API_CALL __ke_vm_map(struct file* filp,
1082 struct vm_area_struct* vma,
1083 enum __ke_vm_maptype type,
1084 int readonly)
1085 @@ -2640,7 +2654,7 @@
1086 #endif // !USE_FIREGL_AGPGART_IMPLEMENTATION
1087
1088 static
1089 -int __ke_firegl_agpgart_available(void)
1090 +int ATI_API_CALL __ke_firegl_agpgart_available(void)
1091 {
1092 int retval;
1093
1094 @@ -2663,7 +2677,7 @@
1095 }
1096
1097 static
1098 -int __ke_agpgart_available(void)
1099 +int ATI_API_CALL __ke_agpgart_available(void)
1100 {
1101 #ifdef __AGP__
1102 unsigned int found = 0;
1103 @@ -2771,7 +2785,7 @@
1104 return 0; /* failed */
1105 }
1106
1107 -int __ke_agp_available(int use_internal)
1108 +int ATI_API_CALL __ke_agp_available(int use_internal)
1109 {
1110 int available = 0;
1111
1112 @@ -2787,7 +2801,7 @@
1113 return available;
1114 }
1115
1116 -void __ke_agp_uninit(void)
1117 +void ATI_API_CALL __ke_agp_uninit(void)
1118 {
1119 if (firegl_agp)
1120 {
1121 @@ -2816,7 +2830,7 @@
1122 }
1123
1124 #ifdef FGL
1125 -struct _agp_memory* __ke_agp_allocate_memory_phys_list(__ke_size_t pages, unsigned long type, unsigned long * phys_addr)
1126 +struct _agp_memory* ATI_API_CALL __ke_agp_allocate_memory_phys_list(__ke_size_t pages, unsigned long type, unsigned long * phys_addr)
1127 {
1128 #if 0
1129 #ifdef __AGP__
1130 @@ -2830,7 +2844,7 @@
1131 }
1132 #endif
1133
1134 -void __ke_agp_free_memory(struct _agp_memory* handle)
1135 +void ATI_API_CALL __ke_agp_free_memory(struct _agp_memory* handle)
1136 {
1137 #ifdef __AGP__
1138 if (AGP_AVAILABLE(free_memory))
1139 @@ -2840,7 +2854,7 @@
1140 return FIREGL_agp_free_memory((FIREGL_agp_memory*)handle);
1141 }
1142
1143 -struct _agp_memory* __ke_agp_allocate_memory(__ke_size_t pages, unsigned long type)
1144 +struct _agp_memory* ATI_API_CALL __ke_agp_allocate_memory(__ke_size_t pages, unsigned long type)
1145 {
1146 #ifdef __AGP__
1147 if (AGP_AVAILABLE(allocate_memory))
1148 @@ -2851,7 +2865,7 @@
1149 return NULL;
1150 }
1151
1152 -int __ke_agp_bind_memory(struct _agp_memory* handle, __ke_off_t start)
1153 +int ATI_API_CALL __ke_agp_bind_memory(struct _agp_memory* handle, __ke_off_t start)
1154 {
1155 #ifdef __AGP__
1156 if (AGP_AVAILABLE(bind_memory))
1157 @@ -2862,7 +2876,7 @@
1158 return -EINVAL;
1159 }
1160
1161 -int __ke_agp_unbind_memory(struct _agp_memory* handle)
1162 +int ATI_API_CALL __ke_agp_unbind_memory(struct _agp_memory* handle)
1163 {
1164 #ifdef __AGP__
1165 if (AGP_AVAILABLE(unbind_memory))
1166 @@ -2873,7 +2887,7 @@
1167 return -EINVAL;
1168 }
1169
1170 -int __ke_agp_enable(unsigned long mode)
1171 +int ATI_API_CALL __ke_agp_enable(unsigned long mode)
1172 {
1173 #ifdef __AGP__
1174 if (AGP_AVAILABLE(enable))
1175 @@ -2890,7 +2904,7 @@
1176 return -EINVAL;
1177 }
1178
1179 -int __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps)
1180 +int ATI_API_CALL __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps)
1181 {
1182 u8 capndx;
1183 u32 cap_id;
1184 @@ -2921,7 +2935,7 @@
1185 return -ENODATA;
1186 }
1187
1188 -int __ke_agp_acquire(void)
1189 +int ATI_API_CALL __ke_agp_acquire(void)
1190 {
1191 #ifdef __AGP__
1192 if (AGP_AVAILABLE(acquire))
1193 @@ -2932,7 +2946,7 @@
1194 return -EINVAL;
1195 }
1196
1197 -void __ke_agp_release(void)
1198 +void ATI_API_CALL __ke_agp_release(void)
1199 {
1200 #ifdef __AGP__
1201 if (AGP_AVAILABLE(release))
1202 @@ -2942,7 +2956,7 @@
1203 FIREGL_agp_backend_release();
1204 }
1205
1206 -void __ke_agp_copy_info(__ke_agp_kern_info_t* info)
1207 +void ATI_API_CALL __ke_agp_copy_info(__ke_agp_kern_info_t* info)
1208 {
1209 struct pci_dev *device = NULL;
1210
1211 @@ -3029,7 +3043,7 @@
1212 }
1213 }
1214
1215 -unsigned long __ke_agp_memory_handle(struct _agp_memory* handle)
1216 +unsigned long ATI_API_CALL __ke_agp_memory_handle(struct _agp_memory* handle)
1217 {
1218 if (firegl_agp)
1219 #ifdef USE_FIREGL_AGPGART_IMPLEMENTATION
1220 @@ -3045,7 +3059,7 @@
1221 #endif /* !__AGP__ */
1222 }
1223
1224 -unsigned long __ke_agp_memory_page_count(struct _agp_memory* handle)
1225 +unsigned long ATI_API_CALL __ke_agp_memory_page_count(struct _agp_memory* handle)
1226 {
1227 if (firegl_agp)
1228 #ifdef USE_FIREGL_AGPGART_IMPLEMENTATION
1229 @@ -3061,13 +3075,13 @@
1230 #endif /* !__AGP__ */
1231 }
1232
1233 -int __ke_smp_processor_id(void)
1234 +int ATI_API_CALL __ke_smp_processor_id(void)
1235 {
1236 return (int)(smp_processor_id());
1237 }
1238
1239
1240 -void __ke_smp_call_function( void (*func)(void *info) )
1241 +void ATI_API_CALL __ke_smp_call_function( void (*ATI_API_CALL func)(void *info) )
1242 {
1243 smp_call_function( func, NULL, 0, 1 );
1244 }
1245 @@ -3097,7 +3111,7 @@
1246 return 0; // does not match
1247 }
1248
1249 -int __ke_is_athlon(void)
1250 +int ATI_API_CALL __ke_is_athlon(void)
1251 {
1252 register int bAthlon;
1253 __asm
1254 @@ -3184,7 +3198,7 @@
1255 #endif
1256
1257
1258 -int __ke_amd_adv_spec_cache_feature(void)
1259 +int ATI_API_CALL __ke_amd_adv_spec_cache_feature(void)
1260 {
1261 #if ( (PAGE_ATTR_FIX == 1) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,19)) )
1262 /* the kernel already does provide a fix for the AMD Athlon
1263 @@ -3226,7 +3240,7 @@
1264 return 0;
1265 }
1266
1267 -int __ke_has_PSE(void)
1268 +int ATI_API_CALL __ke_has_PSE(void)
1269 {
1270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71)
1271 if (test_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability))
1272 diff -u --recursive build_mod.old/firegl_public.h build_mod/firegl_public.h
1273 --- build_mod.old/firegl_public.h 2004-03-18 00:00:29.000000000 +0200
1274 +++ build_mod/firegl_public.h 2004-05-28 17:45:19.923107330 +0300
1275 @@ -75,7 +75,7 @@
1276 typedef struct { int uniqe4; } __ke_pci_dev_t;
1277 typedef struct { int uniqe5; } __ke_priv_device_t;
1278
1279 -typedef int (*__ke_read_proc_t)(
1280 +typedef int (*ATI_API_CALL __ke_read_proc_t)(
1281 char* page, char** start, __ke_off_t off, int count, int* eof, void* data);
1282
1283 typedef struct {
1284 @@ -133,25 +133,25 @@
1285
1286 /*****************************************************************************/
1287
1288 -extern __ke_wait_queue_head_t* __ke_alloc_wait_queue_head_struct(void);
1289 -extern void __ke_free_wait_queue_head_struct(__ke_wait_queue_head_t* queue_head);
1290 -extern __ke_wait_queue_t* __ke_alloc_wait_queue_struct(void);
1291 -extern void __ke_free_wait_queue_struct(__ke_wait_queue_t* queue);
1292 -
1293 -extern void __ke_wake_up_interruptible(__ke_wait_queue_head_t* queue_head);
1294 -extern void __ke_add_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry);
1295 -extern void __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry);
1296 -
1297 -extern void __ke_schedule(void);
1298 -extern int __ke_signal_pending(void);
1299 -
1300 -extern void __ke_set_current_state_task_interruptible(void);
1301 -extern void __ke_set_current_state_task_running(void);
1302 -extern void __ke_configure_sigmask(__ke_sigset_t *pSigMask);
1303 -extern void __ke_block_all_signals(int (*notifier)(void *priv), void *pPriv, __ke_sigset_t *pSigMask);
1304 -extern void __ke_unblock_all_signals(void);
1305 +extern __ke_wait_queue_head_t* ATI_API_CALL __ke_alloc_wait_queue_head_struct(void);
1306 +extern void ATI_API_CALL __ke_free_wait_queue_head_struct(__ke_wait_queue_head_t* queue_head);
1307 +extern __ke_wait_queue_t* ATI_API_CALL __ke_alloc_wait_queue_struct(void);
1308 +extern void ATI_API_CALL __ke_free_wait_queue_struct(__ke_wait_queue_t* queue);
1309 +
1310 +extern void ATI_API_CALL __ke_wake_up_interruptible(__ke_wait_queue_head_t* queue_head);
1311 +extern void ATI_API_CALL __ke_add_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry);
1312 +extern void ATI_API_CALL __ke_remove_wait_queue(__ke_wait_queue_head_t* queue_head, __ke_wait_queue_t* entry);
1313 +
1314 +extern void ATI_API_CALL __ke_schedule(void);
1315 +extern int ATI_API_CALL __ke_signal_pending(void);
1316 +
1317 +extern void ATI_API_CALL __ke_set_current_state_task_interruptible(void);
1318 +extern void ATI_API_CALL __ke_set_current_state_task_running(void);
1319 +extern void ATI_API_CALL __ke_configure_sigmask(__ke_sigset_t *pSigMask);
1320 +extern void ATI_API_CALL __ke_block_all_signals(int (*notifier)(void *priv), void *pPriv, __ke_sigset_t *pSigMask);
1321 +extern void ATI_API_CALL __ke_unblock_all_signals(void);
1322
1323 -extern unsigned long __ke__cmpxchg(volatile void *ptr, unsigned long old,
1324 +extern unsigned long ATI_API_CALL __ke__cmpxchg(volatile void *ptr, unsigned long old,
1325 unsigned long new, int size);
1326
1327 #define __ke_cmpxchg(ptr,o,n) \
1328 @@ -160,25 +160,25 @@
1329
1330 /*****************************************************************************/
1331
1332 -extern __ke_dev_t __ke_getdevice(__ke_device_t *dev);
1333 -extern const char* __ke_module_parm(void);
1334 -extern int __ke_inode_rdev_minor(struct inode* inode);
1335 -extern void* __ke_get_file_priv(struct file* filp);
1336 -extern void __ke_set_file_priv(struct file* filp, void* private_data);
1337 -extern int __ke_file_excl_open(struct file* filp);
1338 -extern int __ke_file_rw_open(struct file* filp);
1339 -extern unsigned int __ke_file_counter(struct file* filp);
1340 -extern int __ke_getpid(void);
1341 -extern int __ke_geteuid(void);
1342 -extern unsigned long __ke_jiffies(void);
1343 -extern void __ke_udelay(unsigned long usecs);
1344 -extern void __ke_mdelay(unsigned long msecs);
1345 -extern unsigned long __ke_virt_to_bus(void* address);
1346 -extern unsigned long __ke_virt_to_phys(void* address);
1347 -extern void* __ke_high_memory(void);
1348 +extern __ke_dev_t ATI_API_CALL __ke_getdevice(__ke_device_t *dev);
1349 +extern const char* ATI_API_CALL __ke_module_parm(void);
1350 +extern int ATI_API_CALL __ke_inode_rdev_minor(struct inode* inode);
1351 +extern void* ATI_API_CALL __ke_get_file_priv(struct file* filp);
1352 +extern void ATI_API_CALL __ke_set_file_priv(struct file* filp, void* private_data);
1353 +extern int ATI_API_CALL __ke_file_excl_open(struct file* filp);
1354 +extern int ATI_API_CALL __ke_file_rw_open(struct file* filp);
1355 +extern unsigned int ATI_API_CALL __ke_file_counter(struct file* filp);
1356 +extern int ATI_API_CALL __ke_getpid(void);
1357 +extern int ATI_API_CALL __ke_geteuid(void);
1358 +extern unsigned long ATI_API_CALL __ke_jiffies(void);
1359 +extern void ATI_API_CALL __ke_udelay(unsigned long usecs);
1360 +extern void ATI_API_CALL __ke_mdelay(unsigned long msecs);
1361 +extern unsigned long ATI_API_CALL __ke_virt_to_bus(void* address);
1362 +extern unsigned long ATI_API_CALL __ke_virt_to_phys(void* address);
1363 +extern void* ATI_API_CALL __ke_high_memory(void);
1364 #if defined(__x86_64__) || defined(__ia64__)
1365 -void* __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle);
1366 -void __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
1367 +void* ATI_API_CALL __ke_pci_alloc_consistent(__ke_pci_dev_t* dev, int size, void *dma_handle);
1368 +void ATI_API_CALL __ke_pci_free_consistent(__ke_pci_dev_t* dev, int size, unsigned long cpu_addr,
1369 unsigned int dma_handle);
1370 #endif
1371
1372 @@ -198,39 +198,38 @@
1373 __KE_ERESTARTSYS,
1374 __KE_ELIBBAD,
1375 };
1376 -extern int __ke_error_code(enum __ke_error_num errcode);
1377 +extern int ATI_API_CALL __ke_error_code(enum __ke_error_num errcode);
1378
1379 -extern void __ke_mod_inc_use_count(void);
1380 -extern void __ke_mod_dec_use_count(void);
1381 +extern void ATI_API_CALL __ke_mod_inc_use_count(void);
1382 +extern void ATI_API_CALL __ke_mod_dec_use_count(void);
1383
1384 -extern void __ke_down_struct_sem(__ke_device_t *dev, int idx);
1385 -extern void __ke_up_struct_sem(__ke_device_t *dev, int idx);
1386 +extern void ATI_API_CALL __ke_down_struct_sem(__ke_device_t *dev, int idx);
1387 +extern void ATI_API_CALL __ke_up_struct_sem(__ke_device_t *dev, int idx);
1388 #define __KE_MAX_SEMAPHORES 2
1389 -extern void __ke_sema_init(struct semaphore* sem, int value);
1390 -extern __ke_size_t __ke_sema_size(void);
1391 -extern void __ke_down(struct semaphore* sem);
1392 -extern void __ke_up(struct semaphore* sem);
1393 -extern void __ke_atomic_inc(void* v);
1394 -extern void __ke_atomic_dec(void* v);
1395 -extern void __ke_atomic_add(int val, void* v);
1396 -extern void __ke_atomic_sub(int val, void* v);
1397 -extern int __ke_atomic_read(void* v);
1398 -extern void __ke_atomic_set(void* v, int val);
1399 -extern void __ke_spin_lock(__ke_device_t *dev, int ndx);
1400 -extern void __ke_spin_unlock(__ke_device_t *dev, int ndx);
1401 +extern void ATI_API_CALL __ke_sema_init(struct semaphore* sem, int value);
1402 +extern __ke_size_t ATI_API_CALL __ke_sema_size(void);
1403 +extern void ATI_API_CALL __ke_down(struct semaphore* sem);
1404 +extern void ATI_API_CALL __ke_up(struct semaphore* sem);
1405 +extern void ATI_API_CALL __ke_atomic_inc(void* v);
1406 +extern void ATI_API_CALL __ke_atomic_dec(void* v);
1407 +extern void ATI_API_CALL __ke_atomic_add(int val, void* v);
1408 +extern void ATI_API_CALL __ke_atomic_sub(int val, void* v);
1409 +extern int ATI_API_CALL __ke_atomic_read(void* v);
1410 +extern void ATI_API_CALL __ke_atomic_set(void* v, int val);
1411 +extern void ATI_API_CALL __ke_spin_lock(__ke_device_t *dev, int ndx);
1412 +extern void ATI_API_CALL __ke_spin_unlock(__ke_device_t *dev, int ndx);
1413 #define __KE_MAX_SPINLOCKS 6
1414 -extern void __ke_lock_kernel(void);
1415 -extern void __ke_unlock_kernel(void);
1416 -extern int __ke_sys_mlock(unsigned long start, __ke_size_t len);
1417 -extern int __ke_sys_mlock(unsigned long start, __ke_size_t len);
1418 -extern int __ke_sys_munlock(unsigned long start, __ke_size_t len);
1419 -extern int __ke_sys_modify_ldt(int func, void *ptr, unsigned long bytecount);
1420 +extern void ATI_API_CALL __ke_lock_kernel(void);
1421 +extern void ATI_API_CALL __ke_unlock_kernel(void);
1422 +extern int ATI_API_CALL __ke_sys_mlock(unsigned long start, __ke_size_t len);
1423 +extern int ATI_API_CALL __ke_sys_munlock(unsigned long start, __ke_size_t len);
1424 +extern int ATI_API_CALL __ke_sys_modify_ldt(int func, void *ptr, unsigned long bytecount);
1425 #ifdef __KE_NO_VSPRINTF
1426 -extern void __ke_printk(const char* fmt, ...);
1427 +extern void ATI_API_CALL __ke_printk(const char* fmt, ...);
1428 #else // !__KE_NO_VSPRINTF
1429 -extern void __ke_print_info(const char* fmt, ...);
1430 -extern void __ke_print_error(const char* fmt, ...);
1431 -extern void __ke_print_debug(const char* fmt, ...);
1432 +extern void ATI_API_CALL __ke_print_info(const char* fmt, ...);
1433 +extern void ATI_API_CALL __ke_print_error(const char* fmt, ...);
1434 +extern void ATI_API_CALL __ke_print_debug(const char* fmt, ...);
1435 #endif // !__KE_NO_VSPRINTF
1436
1437 enum __ke_cap
1438 @@ -238,98 +237,98 @@
1439 __KE_CAP_SYS_ADMIN,
1440 __KE_CAP_IPC_LOCK,
1441 };
1442 -extern int __ke_capable(enum __ke_cap cap);
1443 -extern void __ke_cap_effective_raise(enum __ke_cap cap);
1444 -extern __ke_u32 __ke_get_cap_effective(void);
1445 -extern void __ke_set_cap_effective(__ke_u32 cap);
1446 -extern unsigned long __ke_ram_available(void);
1447 -
1448 -extern int __ke_copy_from_user(void* to, const void* from, __ke_size_t size);
1449 -extern int __ke_copy_to_user(void* to, const void* from, __ke_size_t size);
1450 -extern int __ke_verify_area(int type, const void * addr, unsigned long size);
1451 -
1452 -extern void* __ke_malloc(__ke_size_t size);
1453 -extern void __ke_free_s(void* p, __ke_size_t size);
1454 -extern void* __ke_vmalloc(__ke_size_t size);
1455 -extern void __ke_vfree(void* p);
1456 -extern void* __ke_get_free_page(void);
1457 -extern void* __ke_get_free_pages(int order);
1458 -extern void __ke_free_page(void* pt);
1459 -extern void __ke_free_pages(void* pt, int order);
1460 -extern void __ke_mem_map_reserve(void* pt);
1461 -extern void __ke_mem_map_unreserve(void* pt);
1462 -extern void __ke_virt_reserve(void* pt);
1463 -extern void __ke_virt_unreserve(void* pt);
1464 -extern void* __ke_get_vmptr( struct _agp_memory* memory );
1465 -extern void* __ke_ioremap(unsigned long offset, unsigned long size);
1466 -extern void* __ke_ioremap_nocache(unsigned long offset, unsigned long size);
1467 -extern void __ke_iounmap(void* pt);
1468 -extern int __ke_verify_read_access(void* addr, __ke_size_t size);
1469 -extern int __ke_verify_write_access(void* addr, __ke_size_t size);
1470 -extern struct mm_struct* __ke_init_mm(void);
1471 -extern struct mm_struct* __ke_current_mm(void);
1472 -extern unsigned long __ke_get_vm_phys_addr(struct mm_struct* mm, unsigned long virtual_addr);
1473 -extern unsigned long* __ke_get_vm_phys_addr_list(struct mm_struct* mm, unsigned long virtual_addr, unsigned long pages);
1474 -extern void* __ke_memset(void* s, int c, __ke_size_t count);
1475 -extern void* __ke_memcpy(void* d, const void* s, __ke_size_t count);
1476 -extern __ke_size_t __ke_strlen(const char *s);
1477 -extern char* __ke_strcpy(char* d, const char* s);
1478 -extern char* __ke_strncpy(char* d, const char* s, __ke_size_t count);
1479 -extern int __ke_strcmp(const char *s1, const char *s2);
1480 -extern int __ke_strncmp(const char* str1, const char* str2, __ke_size_t count);
1481 -extern int __ke_sprintf(char* buf, const char* fmt, ...);
1482 -
1483 -/*****************************************************************************/
1484 -
1485 -extern void __ke_set_bit(int nr, volatile void * addr);
1486 -extern void __ke_clear_bit(int nr, volatile void * addr);
1487 -
1488 -/*****************************************************************************/
1489 -
1490 -extern int __ke_flush_cache(void);
1491 -
1492 -/*****************************************************************************/
1493 -
1494 -extern int __ke_config_mtrr(void);
1495 -extern int __ke_mtrr_add_wc(unsigned long base, unsigned long size);
1496 -extern int __ke_mtrr_del(int reg, unsigned long base, unsigned long size);
1497 -
1498 -/*****************************************************************************/
1499 -
1500 -extern int __ke_get_pci_device_info(__ke_pci_dev_t* dev, __ke_pci_device_info_t *pinfo);
1501 -extern int __ke_check_pci(int busnum, int devnum, int funcnum, __ke_u16* vendor, __ke_u16* device, unsigned int* irq);
1502 -extern int __ke_pci_get_irq(__ke_pci_dev_t *dev, unsigned int* irq);
1503 -extern __ke_pci_dev_t* __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from);
1504 -extern int __ke_pci_read_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 *val);
1505 -extern int __ke_pci_read_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 *val);
1506 -extern int __ke_pci_read_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 *val);
1507 -extern int __ke_pci_write_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 val);
1508 -extern int __ke_pci_write_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 val);
1509 -extern int __ke_pci_write_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 val);
1510 -extern int __ke_pci_enable_device(__ke_pci_dev_t* dev);
1511 -
1512 -/*****************************************************************************/
1513 -
1514 -extern void __ke_outb(unsigned char value, unsigned short port);
1515 -extern void __ke_outw(unsigned short value, unsigned short port);
1516 -extern void __ke_out(unsigned int value, unsigned short port);
1517 -extern char __ke_inb(unsigned short port);
1518 -extern short __ke_inw(unsigned short port);
1519 -extern int __ke_in(unsigned short port);
1520 -
1521 -/*****************************************************************************/
1522 -
1523 -extern void __ke_enable_irq(int irq);
1524 -extern void __ke_disable_irq(int irq);
1525 -extern int __ke_request_irq(unsigned int irq, void (*handler)(int, void *, void *), const char *dev_name, void *dev_id);
1526 -extern void __ke_free_irq(unsigned int irq, void *dev_id);
1527 -
1528 -/*****************************************************************************/
1529 -
1530 -extern void* __ke_vma_file_priv(struct vm_area_struct* vma);
1531 -extern unsigned long __ke_vm_start(struct vm_area_struct* vma);
1532 -extern unsigned long __ke_vm_end(struct vm_area_struct* vma);
1533 -extern unsigned long __ke_vm_offset(struct vm_area_struct* vma);
1534 +extern int ATI_API_CALL __ke_capable(enum __ke_cap cap);
1535 +extern void ATI_API_CALL __ke_cap_effective_raise(enum __ke_cap cap);
1536 +extern __ke_u32 ATI_API_CALL __ke_get_cap_effective(void);
1537 +extern void ATI_API_CALL __ke_set_cap_effective(__ke_u32 cap);
1538 +extern unsigned long ATI_API_CALL __ke_ram_available(void);
1539 +
1540 +extern int ATI_API_CALL __ke_copy_from_user(void* to, const void* from, __ke_size_t size);
1541 +extern int ATI_API_CALL __ke_copy_to_user(void* to, const void* from, __ke_size_t size);
1542 +extern int ATI_API_CALL __ke_verify_area(int type, const void * addr, unsigned long size);
1543 +
1544 +extern void* ATI_API_CALL __ke_malloc(__ke_size_t size);
1545 +extern void ATI_API_CALL __ke_free_s(void* p, __ke_size_t size);
1546 +extern void* ATI_API_CALL __ke_vmalloc(__ke_size_t size);
1547 +extern void ATI_API_CALL __ke_vfree(void* p);
1548 +extern void* ATI_API_CALL __ke_get_free_page(void);
1549 +extern void* ATI_API_CALL __ke_get_free_pages(int order);
1550 +extern void ATI_API_CALL __ke_free_page(void* pt);
1551 +extern void ATI_API_CALL __ke_free_pages(void* pt, int order);
1552 +extern void ATI_API_CALL __ke_mem_map_reserve(void* pt);
1553 +extern void ATI_API_CALL __ke_mem_map_unreserve(void* pt);
1554 +extern void ATI_API_CALL __ke_virt_reserve(void* pt);
1555 +extern void ATI_API_CALL __ke_virt_unreserve(void* pt);
1556 +extern void* ATI_API_CALL __ke_get_vmptr( struct _agp_memory* memory );
1557 +extern void* ATI_API_CALL __ke_ioremap(unsigned long offset, unsigned long size);
1558 +extern void* ATI_API_CALL __ke_ioremap_nocache(unsigned long offset, unsigned long size);
1559 +extern void ATI_API_CALL __ke_iounmap(void* pt);
1560 +extern int ATI_API_CALL __ke_verify_read_access(void* addr, __ke_size_t size);
1561 +extern int ATI_API_CALL __ke_verify_write_access(void* addr, __ke_size_t size);
1562 +extern struct mm_struct* ATI_API_CALL __ke_init_mm(void);
1563 +extern struct mm_struct* ATI_API_CALL __ke_current_mm(void);
1564 +extern unsigned long ATI_API_CALL __ke_get_vm_phys_addr(struct mm_struct* mm, unsigned long virtual_addr);
1565 +extern unsigned long* ATI_API_CALL __ke_get_vm_phys_addr_list(struct mm_struct* mm, unsigned long virtual_addr, unsigned long pages);
1566 +extern void* ATI_API_CALL __ke_memset(void* s, int c, __ke_size_t count);
1567 +extern void* ATI_API_CALL __ke_memcpy(void* d, const void* s, __ke_size_t count);
1568 +extern __ke_size_t ATI_API_CALL __ke_strlen(const char *s);
1569 +extern char* ATI_API_CALL __ke_strcpy(char* d, const char* s);
1570 +extern char* ATI_API_CALL __ke_strncpy(char* d, const char* s, __ke_size_t count);
1571 +extern int ATI_API_CALL __ke_strcmp(const char *s1, const char *s2);
1572 +extern int ATI_API_CALL __ke_strncmp(const char* str1, const char* str2, __ke_size_t count);
1573 +extern int ATI_API_CALL __ke_sprintf(char* buf, const char* fmt, ...);
1574 +
1575 +/*****************************************************************************/
1576 +
1577 +extern void ATI_API_CALL __ke_set_bit(int nr, volatile void * addr);
1578 +extern void ATI_API_CALL __ke_clear_bit(int nr, volatile void * addr);
1579 +
1580 +/*****************************************************************************/
1581 +
1582 +extern int ATI_API_CALL __ke_flush_cache(void);
1583 +
1584 +/*****************************************************************************/
1585 +
1586 +extern int ATI_API_CALL __ke_config_mtrr(void);
1587 +extern int ATI_API_CALL __ke_mtrr_add_wc(unsigned long base, unsigned long size);
1588 +extern int ATI_API_CALL __ke_mtrr_del(int reg, unsigned long base, unsigned long size);
1589 +
1590 +/*****************************************************************************/
1591 +
1592 +extern int ATI_API_CALL __ke_get_pci_device_info(__ke_pci_dev_t* dev, __ke_pci_device_info_t *pinfo);
1593 +extern int ATI_API_CALL __ke_check_pci(int busnum, int devnum, int funcnum, __ke_u16* vendor, __ke_u16* device, unsigned int* irq);
1594 +extern int ATI_API_CALL __ke_pci_get_irq(__ke_pci_dev_t *dev, unsigned int* irq);
1595 +extern __ke_pci_dev_t* ATI_API_CALL __ke_pci_find_device (unsigned int vendor, unsigned int dev, __ke_pci_dev_t* from);
1596 +extern int ATI_API_CALL __ke_pci_read_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 *val);
1597 +extern int ATI_API_CALL __ke_pci_read_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 *val);
1598 +extern int ATI_API_CALL __ke_pci_read_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 *val);
1599 +extern int ATI_API_CALL __ke_pci_write_config_byte(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u8 val);
1600 +extern int ATI_API_CALL __ke_pci_write_config_word(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u16 val);
1601 +extern int ATI_API_CALL __ke_pci_write_config_dword(__ke_pci_dev_t* dev, __ke_u8 where, __ke_u32 val);
1602 +extern int ATI_API_CALL __ke_pci_enable_device(__ke_pci_dev_t* dev);
1603 +
1604 +/*****************************************************************************/
1605 +
1606 +extern void ATI_API_CALL __ke_outb(unsigned char value, unsigned short port);
1607 +extern void ATI_API_CALL __ke_outw(unsigned short value, unsigned short port);
1608 +extern void ATI_API_CALL __ke_out(unsigned int value, unsigned short port);
1609 +extern char ATI_API_CALL __ke_inb(unsigned short port);
1610 +extern short ATI_API_CALL __ke_inw(unsigned short port);
1611 +extern int ATI_API_CALL __ke_in(unsigned short port);
1612 +
1613 +/*****************************************************************************/
1614 +
1615 +extern void ATI_API_CALL __ke_enable_irq(int irq);
1616 +extern void ATI_API_CALL __ke_disable_irq(int irq);
1617 +extern int ATI_API_CALL __ke_request_irq(unsigned int irq, void (*ATI_API_CALL handler)(int, void *, void *), const char *dev_name, void *dev_id);
1618 +extern void ATI_API_CALL __ke_free_irq(unsigned int irq, void *dev_id);
1619 +
1620 +/*****************************************************************************/
1621 +
1622 +extern void* ATI_API_CALL __ke_vma_file_priv(struct vm_area_struct* vma);
1623 +extern unsigned long ATI_API_CALL __ke_vm_start(struct vm_area_struct* vma);
1624 +extern unsigned long ATI_API_CALL __ke_vm_end(struct vm_area_struct* vma);
1625 +extern unsigned long ATI_API_CALL __ke_vm_offset(struct vm_area_struct* vma);
1626 enum __ke_vm_maptype
1627 {
1628 __KE_ADPT,
1629 @@ -339,13 +338,13 @@
1630 __KE_AGP_BQS,
1631 __KE_AGP,
1632 };
1633 -extern char* __ke_vm_flags_str(struct vm_area_struct* vma, char* buf);
1634 -extern char* __ke_vm_page_prot_str(struct vm_area_struct* vma, char* buf);
1635 -extern char* __ke_vm_phys_addr_str(struct vm_area_struct* vma,
1636 +extern char* ATI_API_CALL __ke_vm_flags_str(struct vm_area_struct* vma, char* buf);
1637 +extern char* ATI_API_CALL __ke_vm_page_prot_str(struct vm_area_struct* vma, char* buf);
1638 +extern char* ATI_API_CALL __ke_vm_phys_addr_str(struct vm_area_struct* vma,
1639 char* buf,
1640 unsigned long linear_address,
1641 unsigned long* phys_address);
1642 -extern int __ke_vm_map(struct file* filp,
1643 +extern int ATI_API_CALL __ke_vm_map(struct file* filp,
1644 struct vm_area_struct* vma,
1645 enum __ke_vm_maptype type,
1646 int readonly);
1647 @@ -376,30 +375,30 @@
1648
1649 extern int __ke_agp_try_unsupported;
1650
1651 -int __ke_agp_available(int use_internal);
1652 -void __ke_agp_uninit(void);
1653 +int ATI_API_CALL __ke_agp_available(int use_internal);
1654 +void ATI_API_CALL __ke_agp_uninit(void);
1655 #ifdef FGL
1656 -struct _agp_memory* __ke_agp_allocate_memory_phys_list(
1657 +struct _agp_memory* ATI_API_CALL __ke_agp_allocate_memory_phys_list(
1658 __ke_size_t pages, unsigned long type, unsigned long * phys_addr);
1659 #endif
1660 -void __ke_agp_free_memory(struct _agp_memory* handle);
1661 -struct _agp_memory* __ke_agp_allocate_memory(__ke_size_t pages,
1662 +void ATI_API_CALL __ke_agp_free_memory(struct _agp_memory* handle);
1663 +struct _agp_memory* ATI_API_CALL __ke_agp_allocate_memory(__ke_size_t pages,
1664 unsigned long type);
1665 -int __ke_agp_bind_memory(struct _agp_memory* handle, __ke_off_t start);
1666 -int __ke_agp_unbind_memory(struct _agp_memory* handle);
1667 -int __ke_agp_enable(unsigned long mode);
1668 -int __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps);
1669 -int __ke_agp_acquire(void);
1670 -void __ke_agp_release(void);
1671 -void __ke_agp_copy_info(__ke_agp_kern_info_t* info);
1672 -unsigned long __ke_agp_memory_handle(struct _agp_memory* handle);
1673 -unsigned long __ke_agp_memory_page_count(struct _agp_memory* handle);
1674 -
1675 -int __ke_is_athlon(void);
1676 -int __ke_has_PSE(void);
1677 -int __ke_amd_adv_spec_cache_feature(void);
1678 -void __ke_smp_call_function( void (*func)(void *info) );
1679 -int __ke_smp_processor_id(void);
1680 +int ATI_API_CALL __ke_agp_bind_memory(struct _agp_memory* handle, __ke_off_t start);
1681 +int ATI_API_CALL __ke_agp_unbind_memory(struct _agp_memory* handle);
1682 +int ATI_API_CALL __ke_agp_enable(unsigned long mode);
1683 +int ATI_API_CALL __ke_read_agp_caps_registers(__ke_pci_dev_t* dev, unsigned int *caps);
1684 +int ATI_API_CALL __ke_agp_acquire(void);
1685 +void ATI_API_CALL __ke_agp_release(void);
1686 +void ATI_API_CALL __ke_agp_copy_info(__ke_agp_kern_info_t* info);
1687 +unsigned long ATI_API_CALL __ke_agp_memory_handle(struct _agp_memory* handle);
1688 +unsigned long ATI_API_CALL __ke_agp_memory_page_count(struct _agp_memory* handle);
1689 +
1690 +int ATI_API_CALL __ke_is_athlon(void);
1691 +int ATI_API_CALL __ke_has_PSE(void);
1692 +int ATI_API_CALL __ke_amd_adv_spec_cache_feature(void);
1693 +void ATI_API_CALL __ke_smp_call_function( void (*func)(void *info) );
1694 +int ATI_API_CALL __ke_smp_processor_id(void);
1695
1696 /*****************************************************************************/
1697
1698

Properties

Name Value
svn:executable *