Contents of /trunk/kernel26-xen/patches-2.6.25-r1/1015-2.6.25-xen-fixup-xen.patch
Parent Directory | Revision Log
Revision 609 -
(show annotations)
(download)
Fri May 23 17:35:37 2008 UTC (16 years, 5 months ago) by niro
File size: 203398 byte(s)
Fri May 23 17:35:37 2008 UTC (16 years, 5 months ago) by niro
File size: 203398 byte(s)
-using opensuse xen patchset, updated kernel configs
1 | Subject: Fix Xen build wrt. Xen files coming from mainline. |
2 | From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 517:d71965a78c20) |
3 | Patch-mainline: obsolete |
4 | |
5 | Acked-by: jbeulich@novell.com |
6 | |
7 | Index: head-2008-04-15/drivers/xen/Makefile |
8 | =================================================================== |
9 | --- head-2008-04-15.orig/drivers/xen/Makefile 2008-04-15 09:41:09.000000000 +0200 |
10 | +++ head-2008-04-15/drivers/xen/Makefile 2008-04-15 09:59:33.000000000 +0200 |
11 | @@ -1,2 +1,23 @@ |
12 | -obj-y += grant-table.o |
13 | +obj-y += core/ |
14 | +obj-y += console/ |
15 | +obj-y += evtchn/ |
16 | obj-y += xenbus/ |
17 | +obj-y += char/ |
18 | + |
19 | +obj-y += util.o |
20 | +obj-$(CONFIG_XEN_BALLOON) += balloon/ |
21 | +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ |
22 | +obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ |
23 | +obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/ |
24 | +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/ |
25 | +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/ |
26 | +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/ |
27 | +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/ |
28 | +obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/ |
29 | +obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/ |
30 | +obj-$(CONFIG_XEN_KEYBOARD) += fbfront/ |
31 | +obj-$(CONFIG_XEN_PRIVCMD) += privcmd/ |
32 | +obj-$(CONFIG_XEN_GRANT_DEV) += gntdev/ |
33 | +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/ |
34 | +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/ |
35 | +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/ |
36 | Index: head-2008-04-15/drivers/xen/xenbus/Makefile |
37 | =================================================================== |
38 | --- head-2008-04-15.orig/drivers/xen/xenbus/Makefile 2008-04-15 09:41:09.000000000 +0200 |
39 | +++ head-2008-04-15/drivers/xen/xenbus/Makefile 2008-04-15 09:59:33.000000000 +0200 |
40 | @@ -1,7 +1,9 @@ |
41 | -obj-y += xenbus.o |
42 | +obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o |
43 | +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o |
44 | |
45 | -xenbus-objs = |
46 | -xenbus-objs += xenbus_client.o |
47 | -xenbus-objs += xenbus_comms.o |
48 | -xenbus-objs += xenbus_xs.o |
49 | -xenbus-objs += xenbus_probe.o |
50 | +xenbus_be-objs = |
51 | +xenbus_be-objs += xenbus_backend_client.o |
52 | + |
53 | +xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o |
54 | +obj-y += $(xenbus-y) $(xenbus-m) |
55 | +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o |
56 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_client.c |
57 | =================================================================== |
58 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_client.c 2008-04-15 09:41:09.000000000 +0200 |
59 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_client.c 2008-04-15 09:59:33.000000000 +0200 |
60 | @@ -4,23 +4,23 @@ |
61 | * frontend or the backend of that driver. |
62 | * |
63 | * Copyright (C) 2005 XenSource Ltd |
64 | - * |
65 | + * |
66 | * This program is free software; you can redistribute it and/or |
67 | * modify it under the terms of the GNU General Public License version 2 |
68 | * as published by the Free Software Foundation; or, when distributed |
69 | * separately from the Linux kernel or incorporated into other |
70 | * software packages, subject to the following license: |
71 | - * |
72 | + * |
73 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
74 | * of this source file (the "Software"), to deal in the Software without |
75 | * restriction, including without limitation the rights to use, copy, modify, |
76 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
77 | * and to permit persons to whom the Software is furnished to do so, subject to |
78 | * the following conditions: |
79 | - * |
80 | + * |
81 | * The above copyright notice and this permission notice shall be included in |
82 | * all copies or substantial portions of the Software. |
83 | - * |
84 | + * |
85 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
86 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
87 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
88 | @@ -30,14 +30,18 @@ |
89 | * IN THE SOFTWARE. |
90 | */ |
91 | |
92 | -#include <linux/types.h> |
93 | -#include <linux/vmalloc.h> |
94 | -#include <asm/xen/hypervisor.h> |
95 | -#include <xen/interface/xen.h> |
96 | -#include <xen/interface/event_channel.h> |
97 | -#include <xen/events.h> |
98 | -#include <xen/grant_table.h> |
99 | +#include <linux/slab.h> |
100 | +#include <xen/evtchn.h> |
101 | +#include <xen/gnttab.h> |
102 | #include <xen/xenbus.h> |
103 | +#include <xen/driver_util.h> |
104 | + |
105 | +#ifdef HAVE_XEN_PLATFORM_COMPAT_H |
106 | +#include <xen/platform-compat.h> |
107 | +#endif |
108 | + |
109 | +#define DPRINTK(fmt, args...) \ |
110 | + pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) |
111 | |
112 | const char *xenbus_strstate(enum xenbus_state state) |
113 | { |
114 | @@ -54,20 +58,6 @@ const char *xenbus_strstate(enum xenbus_ |
115 | } |
116 | EXPORT_SYMBOL_GPL(xenbus_strstate); |
117 | |
118 | -/** |
119 | - * xenbus_watch_path - register a watch |
120 | - * @dev: xenbus device |
121 | - * @path: path to watch |
122 | - * @watch: watch to register |
123 | - * @callback: callback to register |
124 | - * |
125 | - * Register a @watch on the given path, using the given xenbus_watch structure |
126 | - * for storage, and the given @callback function as the callback. Return 0 on |
127 | - * success, or -errno on error. On success, the given @path will be saved as |
128 | - * @watch->node, and remains the caller's to free. On error, @watch->node will |
129 | - * be NULL, the device will switch to %XenbusStateClosing, and the error will |
130 | - * be saved in the store. |
131 | - */ |
132 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, |
133 | struct xenbus_watch *watch, |
134 | void (*callback)(struct xenbus_watch *, |
135 | @@ -91,58 +81,26 @@ int xenbus_watch_path(struct xenbus_devi |
136 | EXPORT_SYMBOL_GPL(xenbus_watch_path); |
137 | |
138 | |
139 | -/** |
140 | - * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path |
141 | - * @dev: xenbus device |
142 | - * @watch: watch to register |
143 | - * @callback: callback to register |
144 | - * @pathfmt: format of path to watch |
145 | - * |
146 | - * Register a watch on the given @path, using the given xenbus_watch |
147 | - * structure for storage, and the given @callback function as the callback. |
148 | - * Return 0 on success, or -errno on error. On success, the watched path |
149 | - * (@path/@path2) will be saved as @watch->node, and becomes the caller's to |
150 | - * kfree(). On error, watch->node will be NULL, so the caller has nothing to |
151 | - * free, the device will switch to %XenbusStateClosing, and the error will be |
152 | - * saved in the store. |
153 | - */ |
154 | -int xenbus_watch_pathfmt(struct xenbus_device *dev, |
155 | - struct xenbus_watch *watch, |
156 | - void (*callback)(struct xenbus_watch *, |
157 | - const char **, unsigned int), |
158 | - const char *pathfmt, ...) |
159 | +int xenbus_watch_path2(struct xenbus_device *dev, const char *path, |
160 | + const char *path2, struct xenbus_watch *watch, |
161 | + void (*callback)(struct xenbus_watch *, |
162 | + const char **, unsigned int)) |
163 | { |
164 | int err; |
165 | - va_list ap; |
166 | - char *path; |
167 | - |
168 | - va_start(ap, pathfmt); |
169 | - path = kvasprintf(GFP_KERNEL, pathfmt, ap); |
170 | - va_end(ap); |
171 | - |
172 | - if (!path) { |
173 | + char *state = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s/%s", path, path2); |
174 | + if (!state) { |
175 | xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); |
176 | return -ENOMEM; |
177 | } |
178 | - err = xenbus_watch_path(dev, path, watch, callback); |
179 | + err = xenbus_watch_path(dev, state, watch, callback); |
180 | |
181 | if (err) |
182 | - kfree(path); |
183 | + kfree(state); |
184 | return err; |
185 | } |
186 | -EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); |
187 | +EXPORT_SYMBOL_GPL(xenbus_watch_path2); |
188 | |
189 | |
190 | -/** |
191 | - * xenbus_switch_state |
192 | - * @dev: xenbus device |
193 | - * @xbt: transaction handle |
194 | - * @state: new state |
195 | - * |
196 | - * Advertise in the store a change of the given driver to the given new_state. |
197 | - * Return 0 on success, or -errno on error. On error, the device will switch |
198 | - * to XenbusStateClosing, and the error will be saved in the store. |
199 | - */ |
200 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) |
201 | { |
202 | /* We check whether the state is currently set to the given value, and |
203 | @@ -201,13 +159,12 @@ static char *error_path(struct xenbus_de |
204 | } |
205 | |
206 | |
207 | -static void xenbus_va_dev_error(struct xenbus_device *dev, int err, |
208 | - const char *fmt, va_list ap) |
209 | +void _dev_error(struct xenbus_device *dev, int err, const char *fmt, |
210 | + va_list ap) |
211 | { |
212 | int ret; |
213 | unsigned int len; |
214 | - char *printf_buffer = NULL; |
215 | - char *path_buffer = NULL; |
216 | + char *printf_buffer = NULL, *path_buffer = NULL; |
217 | |
218 | #define PRINTF_BUFFER_SIZE 4096 |
219 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); |
220 | @@ -224,74 +181,51 @@ static void xenbus_va_dev_error(struct x |
221 | path_buffer = error_path(dev); |
222 | |
223 | if (path_buffer == NULL) { |
224 | - dev_err(&dev->dev, "failed to write error node for %s (%s)\n", |
225 | + printk("xenbus: failed to write error node for %s (%s)\n", |
226 | dev->nodename, printf_buffer); |
227 | goto fail; |
228 | } |
229 | |
230 | if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { |
231 | - dev_err(&dev->dev, "failed to write error node for %s (%s)\n", |
232 | + printk("xenbus: failed to write error node for %s (%s)\n", |
233 | dev->nodename, printf_buffer); |
234 | goto fail; |
235 | } |
236 | |
237 | fail: |
238 | - kfree(printf_buffer); |
239 | - kfree(path_buffer); |
240 | + if (printf_buffer) |
241 | + kfree(printf_buffer); |
242 | + if (path_buffer) |
243 | + kfree(path_buffer); |
244 | } |
245 | |
246 | |
247 | -/** |
248 | - * xenbus_dev_error |
249 | - * @dev: xenbus device |
250 | - * @err: error to report |
251 | - * @fmt: error message format |
252 | - * |
253 | - * Report the given negative errno into the store, along with the given |
254 | - * formatted message. |
255 | - */ |
256 | -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) |
257 | +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, |
258 | + ...) |
259 | { |
260 | va_list ap; |
261 | |
262 | va_start(ap, fmt); |
263 | - xenbus_va_dev_error(dev, err, fmt, ap); |
264 | + _dev_error(dev, err, fmt, ap); |
265 | va_end(ap); |
266 | } |
267 | EXPORT_SYMBOL_GPL(xenbus_dev_error); |
268 | |
269 | -/** |
270 | - * xenbus_dev_fatal |
271 | - * @dev: xenbus device |
272 | - * @err: error to report |
273 | - * @fmt: error message format |
274 | - * |
275 | - * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by |
276 | - * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly |
277 | - * closedown of this driver and its peer. |
278 | - */ |
279 | |
280 | -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) |
281 | +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, |
282 | + ...) |
283 | { |
284 | va_list ap; |
285 | |
286 | va_start(ap, fmt); |
287 | - xenbus_va_dev_error(dev, err, fmt, ap); |
288 | + _dev_error(dev, err, fmt, ap); |
289 | va_end(ap); |
290 | |
291 | xenbus_switch_state(dev, XenbusStateClosing); |
292 | } |
293 | EXPORT_SYMBOL_GPL(xenbus_dev_fatal); |
294 | |
295 | -/** |
296 | - * xenbus_grant_ring |
297 | - * @dev: xenbus device |
298 | - * @ring_mfn: mfn of ring to grant |
299 | - |
300 | - * Grant access to the given @ring_mfn to the peer of the given device. Return |
301 | - * 0 on success, or -errno on error. On error, the device will switch to |
302 | - * XenbusStateClosing, and the error will be saved in the store. |
303 | - */ |
304 | + |
305 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) |
306 | { |
307 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); |
308 | @@ -302,18 +236,12 @@ int xenbus_grant_ring(struct xenbus_devi |
309 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); |
310 | |
311 | |
312 | -/** |
313 | - * Allocate an event channel for the given xenbus_device, assigning the newly |
314 | - * created local port to *port. Return 0 on success, or -errno on error. On |
315 | - * error, the device will switch to XenbusStateClosing, and the error will be |
316 | - * saved in the store. |
317 | - */ |
318 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) |
319 | { |
320 | struct evtchn_alloc_unbound alloc_unbound; |
321 | int err; |
322 | |
323 | - alloc_unbound.dom = DOMID_SELF; |
324 | + alloc_unbound.dom = DOMID_SELF; |
325 | alloc_unbound.remote_dom = dev->otherend_id; |
326 | |
327 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, |
328 | @@ -328,36 +256,6 @@ int xenbus_alloc_evtchn(struct xenbus_de |
329 | EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); |
330 | |
331 | |
332 | -/** |
333 | - * Bind to an existing interdomain event channel in another domain. Returns 0 |
334 | - * on success and stores the local port in *port. On error, returns -errno, |
335 | - * switches the device to XenbusStateClosing, and saves the error in XenStore. |
336 | - */ |
337 | -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) |
338 | -{ |
339 | - struct evtchn_bind_interdomain bind_interdomain; |
340 | - int err; |
341 | - |
342 | - bind_interdomain.remote_dom = dev->otherend_id; |
343 | - bind_interdomain.remote_port = remote_port; |
344 | - |
345 | - err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, |
346 | - &bind_interdomain); |
347 | - if (err) |
348 | - xenbus_dev_fatal(dev, err, |
349 | - "binding to event channel %d from domain %d", |
350 | - remote_port, dev->otherend_id); |
351 | - else |
352 | - *port = bind_interdomain.local_port; |
353 | - |
354 | - return err; |
355 | -} |
356 | -EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); |
357 | - |
358 | - |
359 | -/** |
360 | - * Free an existing event channel. Returns 0 on success or -errno on error. |
361 | - */ |
362 | int xenbus_free_evtchn(struct xenbus_device *dev, int port) |
363 | { |
364 | struct evtchn_close close; |
365 | @@ -374,189 +272,6 @@ int xenbus_free_evtchn(struct xenbus_dev |
366 | EXPORT_SYMBOL_GPL(xenbus_free_evtchn); |
367 | |
368 | |
369 | -/** |
370 | - * xenbus_map_ring_valloc |
371 | - * @dev: xenbus device |
372 | - * @gnt_ref: grant reference |
373 | - * @vaddr: pointer to address to be filled out by mapping |
374 | - * |
375 | - * Based on Rusty Russell's skeleton driver's map_page. |
376 | - * Map a page of memory into this domain from another domain's grant table. |
377 | - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the |
378 | - * page to that address, and sets *vaddr to that address. |
379 | - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
380 | - * or -ENOMEM on error. If an error is returned, device will switch to |
381 | - * XenbusStateClosing and the error message will be saved in XenStore. |
382 | - */ |
383 | -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) |
384 | -{ |
385 | - struct gnttab_map_grant_ref op = { |
386 | - .flags = GNTMAP_host_map, |
387 | - .ref = gnt_ref, |
388 | - .dom = dev->otherend_id, |
389 | - }; |
390 | - struct vm_struct *area; |
391 | - |
392 | - *vaddr = NULL; |
393 | - |
394 | - area = alloc_vm_area(PAGE_SIZE); |
395 | - if (!area) |
396 | - return -ENOMEM; |
397 | - |
398 | - op.host_addr = (unsigned long)area->addr; |
399 | - |
400 | - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) |
401 | - BUG(); |
402 | - |
403 | - if (op.status != GNTST_okay) { |
404 | - free_vm_area(area); |
405 | - xenbus_dev_fatal(dev, op.status, |
406 | - "mapping in shared page %d from domain %d", |
407 | - gnt_ref, dev->otherend_id); |
408 | - return op.status; |
409 | - } |
410 | - |
411 | - /* Stuff the handle in an unused field */ |
412 | - area->phys_addr = (unsigned long)op.handle; |
413 | - |
414 | - *vaddr = area->addr; |
415 | - return 0; |
416 | -} |
417 | -EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); |
418 | - |
419 | - |
420 | -/** |
421 | - * xenbus_map_ring |
422 | - * @dev: xenbus device |
423 | - * @gnt_ref: grant reference |
424 | - * @handle: pointer to grant handle to be filled |
425 | - * @vaddr: address to be mapped to |
426 | - * |
427 | - * Map a page of memory into this domain from another domain's grant table. |
428 | - * xenbus_map_ring does not allocate the virtual address space (you must do |
429 | - * this yourself!). It only maps in the page to the specified address. |
430 | - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
431 | - * or -ENOMEM on error. If an error is returned, device will switch to |
432 | - * XenbusStateClosing and the error message will be saved in XenStore. |
433 | - */ |
434 | -int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, |
435 | - grant_handle_t *handle, void *vaddr) |
436 | -{ |
437 | - struct gnttab_map_grant_ref op = { |
438 | - .host_addr = (unsigned long)vaddr, |
439 | - .flags = GNTMAP_host_map, |
440 | - .ref = gnt_ref, |
441 | - .dom = dev->otherend_id, |
442 | - }; |
443 | - |
444 | - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) |
445 | - BUG(); |
446 | - |
447 | - if (op.status != GNTST_okay) { |
448 | - xenbus_dev_fatal(dev, op.status, |
449 | - "mapping in shared page %d from domain %d", |
450 | - gnt_ref, dev->otherend_id); |
451 | - } else |
452 | - *handle = op.handle; |
453 | - |
454 | - return op.status; |
455 | -} |
456 | -EXPORT_SYMBOL_GPL(xenbus_map_ring); |
457 | - |
458 | - |
459 | -/** |
460 | - * xenbus_unmap_ring_vfree |
461 | - * @dev: xenbus device |
462 | - * @vaddr: addr to unmap |
463 | - * |
464 | - * Based on Rusty Russell's skeleton driver's unmap_page. |
465 | - * Unmap a page of memory in this domain that was imported from another domain. |
466 | - * Use xenbus_unmap_ring_vfree if you mapped in your memory with |
467 | - * xenbus_map_ring_valloc (it will free the virtual address space). |
468 | - * Returns 0 on success and returns GNTST_* on error |
469 | - * (see xen/include/interface/grant_table.h). |
470 | - */ |
471 | -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) |
472 | -{ |
473 | - struct vm_struct *area; |
474 | - struct gnttab_unmap_grant_ref op = { |
475 | - .host_addr = (unsigned long)vaddr, |
476 | - }; |
477 | - |
478 | - /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) |
479 | - * method so that we don't have to muck with vmalloc internals here. |
480 | - * We could force the user to hang on to their struct vm_struct from |
481 | - * xenbus_map_ring_valloc, but these 6 lines considerably simplify |
482 | - * this API. |
483 | - */ |
484 | - read_lock(&vmlist_lock); |
485 | - for (area = vmlist; area != NULL; area = area->next) { |
486 | - if (area->addr == vaddr) |
487 | - break; |
488 | - } |
489 | - read_unlock(&vmlist_lock); |
490 | - |
491 | - if (!area) { |
492 | - xenbus_dev_error(dev, -ENOENT, |
493 | - "can't find mapped virtual address %p", vaddr); |
494 | - return GNTST_bad_virt_addr; |
495 | - } |
496 | - |
497 | - op.handle = (grant_handle_t)area->phys_addr; |
498 | - |
499 | - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
500 | - BUG(); |
501 | - |
502 | - if (op.status == GNTST_okay) |
503 | - free_vm_area(area); |
504 | - else |
505 | - xenbus_dev_error(dev, op.status, |
506 | - "unmapping page at handle %d error %d", |
507 | - (int16_t)area->phys_addr, op.status); |
508 | - |
509 | - return op.status; |
510 | -} |
511 | -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); |
512 | - |
513 | - |
514 | -/** |
515 | - * xenbus_unmap_ring |
516 | - * @dev: xenbus device |
517 | - * @handle: grant handle |
518 | - * @vaddr: addr to unmap |
519 | - * |
520 | - * Unmap a page of memory in this domain that was imported from another domain. |
521 | - * Returns 0 on success and returns GNTST_* on error |
522 | - * (see xen/include/interface/grant_table.h). |
523 | - */ |
524 | -int xenbus_unmap_ring(struct xenbus_device *dev, |
525 | - grant_handle_t handle, void *vaddr) |
526 | -{ |
527 | - struct gnttab_unmap_grant_ref op = { |
528 | - .host_addr = (unsigned long)vaddr, |
529 | - .handle = handle, |
530 | - }; |
531 | - |
532 | - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) |
533 | - BUG(); |
534 | - |
535 | - if (op.status != GNTST_okay) |
536 | - xenbus_dev_error(dev, op.status, |
537 | - "unmapping page at handle %d error %d", |
538 | - handle, op.status); |
539 | - |
540 | - return op.status; |
541 | -} |
542 | -EXPORT_SYMBOL_GPL(xenbus_unmap_ring); |
543 | - |
544 | - |
545 | -/** |
546 | - * xenbus_read_driver_state |
547 | - * @path: path for driver |
548 | - * |
549 | - * Return the state of the driver rooted at the given store path, or |
550 | - * XenbusStateUnknown if no state can be read. |
551 | - */ |
552 | enum xenbus_state xenbus_read_driver_state(const char *path) |
553 | { |
554 | enum xenbus_state result; |
555 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_comms.c |
556 | =================================================================== |
557 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_comms.c 2008-04-15 09:41:09.000000000 +0200 |
558 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_comms.c 2008-04-15 09:59:33.000000000 +0200 |
559 | @@ -4,23 +4,23 @@ |
560 | * Low level code to talks to Xen Store: ringbuffer and event channel. |
561 | * |
562 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
563 | - * |
564 | + * |
565 | * This program is free software; you can redistribute it and/or |
566 | * modify it under the terms of the GNU General Public License version 2 |
567 | * as published by the Free Software Foundation; or, when distributed |
568 | * separately from the Linux kernel or incorporated into other |
569 | * software packages, subject to the following license: |
570 | - * |
571 | + * |
572 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
573 | * of this source file (the "Software"), to deal in the Software without |
574 | * restriction, including without limitation the rights to use, copy, modify, |
575 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
576 | * and to permit persons to whom the Software is furnished to do so, subject to |
577 | * the following conditions: |
578 | - * |
579 | + * |
580 | * The above copyright notice and this permission notice shall be included in |
581 | * all copies or substantial portions of the Software. |
582 | - * |
583 | + * |
584 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
585 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
586 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
587 | @@ -34,19 +34,28 @@ |
588 | #include <linux/interrupt.h> |
589 | #include <linux/sched.h> |
590 | #include <linux/err.h> |
591 | +#include <linux/ptrace.h> |
592 | +#include <linux/workqueue.h> |
593 | +#include <xen/evtchn.h> |
594 | #include <xen/xenbus.h> |
595 | -#include <asm/xen/hypervisor.h> |
596 | -#include <xen/events.h> |
597 | -#include <xen/page.h> |
598 | + |
599 | +#include <asm/hypervisor.h> |
600 | + |
601 | #include "xenbus_comms.h" |
602 | |
603 | +#ifdef HAVE_XEN_PLATFORM_COMPAT_H |
604 | +#include <xen/platform-compat.h> |
605 | +#endif |
606 | + |
607 | static int xenbus_irq; |
608 | |
609 | -static DECLARE_WORK(probe_work, xenbus_probe); |
610 | +extern void xenbus_probe(void *); |
611 | +extern int xenstored_ready; |
612 | +static DECLARE_WORK(probe_work, xenbus_probe, NULL); |
613 | |
614 | static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); |
615 | |
616 | -static irqreturn_t wake_waiting(int irq, void *unused) |
617 | +static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) |
618 | { |
619 | if (unlikely(xenstored_ready == 0)) { |
620 | xenstored_ready = 1; |
621 | @@ -82,13 +91,6 @@ static const void *get_input_chunk(XENST |
622 | return buf + MASK_XENSTORE_IDX(cons); |
623 | } |
624 | |
625 | -/** |
626 | - * xb_write - low level write |
627 | - * @data: buffer to send |
628 | - * @len: length of buffer |
629 | - * |
630 | - * Returns 0 on success, error otherwise. |
631 | - */ |
632 | int xb_write(const void *data, unsigned len) |
633 | { |
634 | struct xenstore_domain_interface *intf = xen_store_interface; |
635 | @@ -197,9 +199,7 @@ int xb_read(void *data, unsigned len) |
636 | return 0; |
637 | } |
638 | |
639 | -/** |
640 | - * xb_init_comms - Set up interrupt handler off store event channel. |
641 | - */ |
642 | +/* Set up interrupt handler off store event channel. */ |
643 | int xb_init_comms(void) |
644 | { |
645 | struct xenstore_domain_interface *intf = xen_store_interface; |
646 | @@ -219,7 +219,7 @@ int xb_init_comms(void) |
647 | if (xenbus_irq) |
648 | unbind_from_irqhandler(xenbus_irq, &xb_waitq); |
649 | |
650 | - err = bind_evtchn_to_irqhandler( |
651 | + err = bind_caller_port_to_irqhandler( |
652 | xen_store_evtchn, wake_waiting, |
653 | 0, "xenbus", &xb_waitq); |
654 | if (err <= 0) { |
655 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_comms.h |
656 | =================================================================== |
657 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_comms.h 2008-04-15 09:41:09.000000000 +0200 |
658 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_comms.h 2008-04-15 09:59:33.000000000 +0200 |
659 | @@ -1,6 +1,6 @@ |
660 | /* |
661 | * Private include for xenbus communications. |
662 | - * |
663 | + * |
664 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
665 | * |
666 | * This program is free software; you can redistribute it and/or |
667 | @@ -8,17 +8,17 @@ |
668 | * as published by the Free Software Foundation; or, when distributed |
669 | * separately from the Linux kernel or incorporated into other |
670 | * software packages, subject to the following license: |
671 | - * |
672 | + * |
673 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
674 | * of this source file (the "Software"), to deal in the Software without |
675 | * restriction, including without limitation the rights to use, copy, modify, |
676 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
677 | * and to permit persons to whom the Software is furnished to do so, subject to |
678 | * the following conditions: |
679 | - * |
680 | + * |
681 | * The above copyright notice and this permission notice shall be included in |
682 | * all copies or substantial portions of the Software. |
683 | - * |
684 | + * |
685 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
686 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
687 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
688 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_probe.c |
689 | =================================================================== |
690 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_probe.c 2008-04-15 09:41:09.000000000 +0200 |
691 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_probe.c 2008-04-15 09:59:33.000000000 +0200 |
692 | @@ -4,23 +4,24 @@ |
693 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
694 | * Copyright (C) 2005 Mike Wray, Hewlett-Packard |
695 | * Copyright (C) 2005, 2006 XenSource Ltd |
696 | - * |
697 | + * Copyright (C) 2007 Solarflare Communications, Inc. |
698 | + * |
699 | * This program is free software; you can redistribute it and/or |
700 | * modify it under the terms of the GNU General Public License version 2 |
701 | * as published by the Free Software Foundation; or, when distributed |
702 | * separately from the Linux kernel or incorporated into other |
703 | * software packages, subject to the following license: |
704 | - * |
705 | + * |
706 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
707 | * of this source file (the "Software"), to deal in the Software without |
708 | * restriction, including without limitation the rights to use, copy, modify, |
709 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
710 | * and to permit persons to whom the Software is furnished to do so, subject to |
711 | * the following conditions: |
712 | - * |
713 | + * |
714 | * The above copyright notice and this permission notice shall be included in |
715 | * all copies or substantial portions of the Software. |
716 | - * |
717 | + * |
718 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
719 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
720 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
721 | @@ -32,7 +33,7 @@ |
722 | |
723 | #define DPRINTK(fmt, args...) \ |
724 | pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ |
725 | - __func__, __LINE__, ##args) |
726 | + __FUNCTION__, __LINE__, ##args) |
727 | |
728 | #include <linux/kernel.h> |
729 | #include <linux/err.h> |
730 | @@ -41,24 +42,35 @@ |
731 | #include <linux/fcntl.h> |
732 | #include <linux/mm.h> |
733 | #include <linux/notifier.h> |
734 | -#include <linux/kthread.h> |
735 | #include <linux/mutex.h> |
736 | -#include <linux/io.h> |
737 | +#include <linux/module.h> |
738 | |
739 | +#include <asm/io.h> |
740 | #include <asm/page.h> |
741 | +#include <asm/maddr.h> |
742 | #include <asm/pgtable.h> |
743 | -#include <asm/xen/hypervisor.h> |
744 | +#include <asm/hypervisor.h> |
745 | #include <xen/xenbus.h> |
746 | -#include <xen/events.h> |
747 | -#include <xen/page.h> |
748 | +#include <xen/xen_proc.h> |
749 | +#include <xen/evtchn.h> |
750 | +#include <xen/features.h> |
751 | +#ifdef MODULE |
752 | +#include <xen/hvm.h> |
753 | +#endif |
754 | |
755 | #include "xenbus_comms.h" |
756 | #include "xenbus_probe.h" |
757 | |
758 | +#ifdef HAVE_XEN_PLATFORM_COMPAT_H |
759 | +#include <xen/platform-compat.h> |
760 | +#endif |
761 | + |
762 | int xen_store_evtchn; |
763 | struct xenstore_domain_interface *xen_store_interface; |
764 | static unsigned long xen_store_mfn; |
765 | |
766 | +extern struct mutex xenwatch_mutex; |
767 | + |
768 | static BLOCKING_NOTIFIER_HEAD(xenstore_chain); |
769 | |
770 | static void wait_for_devices(struct xenbus_driver *xendrv); |
771 | @@ -156,6 +168,30 @@ static int read_backend_details(struct x |
772 | return read_otherend_details(xendev, "backend-id", "backend"); |
773 | } |
774 | |
775 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) |
776 | +static int xenbus_uevent_frontend(struct device *dev, char **envp, |
777 | + int num_envp, char *buffer, int buffer_size) |
778 | +{ |
779 | + struct xenbus_device *xdev; |
780 | + int length = 0, i = 0; |
781 | + |
782 | + if (dev == NULL) |
783 | + return -ENODEV; |
784 | + xdev = to_xenbus_device(dev); |
785 | + if (xdev == NULL) |
786 | + return -ENODEV; |
787 | + |
788 | + /* stuff we want to pass to /sbin/hotplug */ |
789 | + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, |
790 | + "XENBUS_TYPE=%s", xdev->devicetype); |
791 | + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, |
792 | + "XENBUS_PATH=%s", xdev->nodename); |
793 | + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, |
794 | + "MODALIAS=xen:%s", xdev->devicetype); |
795 | + |
796 | + return 0; |
797 | +} |
798 | +#endif |
799 | |
800 | /* Bus type for frontend drivers. */ |
801 | static struct xen_bus_type xenbus_frontend = { |
802 | @@ -163,12 +199,19 @@ static struct xen_bus_type xenbus_fronte |
803 | .levels = 2, /* device/type/<id> */ |
804 | .get_bus_id = frontend_bus_id, |
805 | .probe = xenbus_probe_frontend, |
806 | + .error = -ENODEV, |
807 | .bus = { |
808 | .name = "xen", |
809 | .match = xenbus_match, |
810 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) |
811 | .probe = xenbus_dev_probe, |
812 | .remove = xenbus_dev_remove, |
813 | .shutdown = xenbus_dev_shutdown, |
814 | + .uevent = xenbus_uevent_frontend, |
815 | +#endif |
816 | + }, |
817 | + .dev = { |
818 | + .bus_id = "xen", |
819 | }, |
820 | }; |
821 | |
822 | @@ -185,17 +228,16 @@ static void otherend_changed(struct xenb |
823 | if (!dev->otherend || |
824 | strncmp(dev->otherend, vec[XS_WATCH_PATH], |
825 | strlen(dev->otherend))) { |
826 | - dev_dbg(&dev->dev, "Ignoring watch at %s\n", |
827 | - vec[XS_WATCH_PATH]); |
828 | + DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); |
829 | return; |
830 | } |
831 | |
832 | state = xenbus_read_driver_state(dev->otherend); |
833 | |
834 | - dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n", |
835 | - state, xenbus_strstate(state), dev->otherend_watch.node, |
836 | - vec[XS_WATCH_PATH]); |
837 | + DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), |
838 | + dev->otherend_watch.node, vec[XS_WATCH_PATH]); |
839 | |
840 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) |
841 | /* |
842 | * Ignore xenbus transitions during shutdown. This prevents us doing |
843 | * work that can fail e.g., when the rootfs is gone. |
844 | @@ -209,6 +251,7 @@ static void otherend_changed(struct xenb |
845 | xenbus_frontend_closed(dev); |
846 | return; |
847 | } |
848 | +#endif |
849 | |
850 | if (drv->otherend_changed) |
851 | drv->otherend_changed(dev, state); |
852 | @@ -228,8 +271,8 @@ static int talk_to_otherend(struct xenbu |
853 | |
854 | static int watch_otherend(struct xenbus_device *dev) |
855 | { |
856 | - return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, |
857 | - "%s/%s", dev->otherend, "state"); |
858 | + return xenbus_watch_path2(dev, dev->otherend, "state", |
859 | + &dev->otherend_watch, otherend_changed); |
860 | } |
861 | |
862 | |
863 | @@ -255,8 +298,9 @@ int xenbus_dev_probe(struct device *_dev |
864 | |
865 | err = talk_to_otherend(dev); |
866 | if (err) { |
867 | - dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n", |
868 | - dev->nodename); |
869 | + printk(KERN_WARNING |
870 | + "xenbus_probe: talk_to_otherend on %s failed.\n", |
871 | + dev->nodename); |
872 | return err; |
873 | } |
874 | |
875 | @@ -266,7 +310,8 @@ int xenbus_dev_probe(struct device *_dev |
876 | |
877 | err = watch_otherend(dev); |
878 | if (err) { |
879 | - dev_warn(&dev->dev, "watch_otherend on %s failed.\n", |
880 | + printk(KERN_WARNING |
881 | + "xenbus_probe: watch_otherend on %s failed.\n", |
882 | dev->nodename); |
883 | return err; |
884 | } |
885 | @@ -304,41 +349,50 @@ static void xenbus_dev_shutdown(struct d |
886 | |
887 | get_device(&dev->dev); |
888 | if (dev->state != XenbusStateConnected) { |
889 | - printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__, |
890 | + printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, |
891 | dev->nodename, xenbus_strstate(dev->state)); |
892 | goto out; |
893 | } |
894 | xenbus_switch_state(dev, XenbusStateClosing); |
895 | timeout = wait_for_completion_timeout(&dev->down, timeout); |
896 | if (!timeout) |
897 | - printk(KERN_INFO "%s: %s timeout closing device\n", |
898 | - __func__, dev->nodename); |
899 | + printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); |
900 | out: |
901 | put_device(&dev->dev); |
902 | } |
903 | |
904 | int xenbus_register_driver_common(struct xenbus_driver *drv, |
905 | - struct xen_bus_type *bus, |
906 | - struct module *owner, |
907 | - const char *mod_name) |
908 | + struct xen_bus_type *bus) |
909 | { |
910 | + int ret; |
911 | + |
912 | + if (bus->error) |
913 | + return bus->error; |
914 | + |
915 | drv->driver.name = drv->name; |
916 | drv->driver.bus = &bus->bus; |
917 | - drv->driver.owner = owner; |
918 | - drv->driver.mod_name = mod_name; |
919 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) |
920 | + drv->driver.owner = drv->owner; |
921 | +#endif |
922 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) |
923 | + drv->driver.probe = xenbus_dev_probe; |
924 | + drv->driver.remove = xenbus_dev_remove; |
925 | + drv->driver.shutdown = xenbus_dev_shutdown; |
926 | +#endif |
927 | |
928 | - return driver_register(&drv->driver); |
929 | + mutex_lock(&xenwatch_mutex); |
930 | + ret = driver_register(&drv->driver); |
931 | + mutex_unlock(&xenwatch_mutex); |
932 | + return ret; |
933 | } |
934 | |
935 | -int __xenbus_register_frontend(struct xenbus_driver *drv, |
936 | - struct module *owner, const char *mod_name) |
937 | +int xenbus_register_frontend(struct xenbus_driver *drv) |
938 | { |
939 | int ret; |
940 | |
941 | drv->read_otherend_details = read_backend_details; |
942 | |
943 | - ret = xenbus_register_driver_common(drv, &xenbus_frontend, |
944 | - owner, mod_name); |
945 | + ret = xenbus_register_driver_common(drv, &xenbus_frontend); |
946 | if (ret) |
947 | return ret; |
948 | |
949 | @@ -347,7 +401,7 @@ int __xenbus_register_frontend(struct xe |
950 | |
951 | return 0; |
952 | } |
953 | -EXPORT_SYMBOL_GPL(__xenbus_register_frontend); |
954 | +EXPORT_SYMBOL_GPL(xenbus_register_frontend); |
955 | |
956 | void xenbus_unregister_driver(struct xenbus_driver *drv) |
957 | { |
958 | @@ -425,14 +479,20 @@ static void xenbus_dev_release(struct de |
959 | } |
960 | |
961 | static ssize_t xendev_show_nodename(struct device *dev, |
962 | - struct device_attribute *attr, char *buf) |
963 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) |
964 | + struct device_attribute *attr, |
965 | +#endif |
966 | + char *buf) |
967 | { |
968 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); |
969 | } |
970 | DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); |
971 | |
972 | static ssize_t xendev_show_devtype(struct device *dev, |
973 | - struct device_attribute *attr, char *buf) |
974 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) |
975 | + struct device_attribute *attr, |
976 | +#endif |
977 | + char *buf) |
978 | { |
979 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); |
980 | } |
981 | @@ -450,6 +510,9 @@ int xenbus_probe_node(struct xen_bus_typ |
982 | |
983 | enum xenbus_state state = xenbus_read_driver_state(nodename); |
984 | |
985 | + if (bus->error) |
986 | + return bus->error; |
987 | + |
988 | if (state != XenbusStateInitialising) { |
989 | /* Device is not new, so ignore it. This can happen if a |
990 | device is going away after switching to Closed. */ |
991 | @@ -474,6 +537,7 @@ int xenbus_probe_node(struct xen_bus_typ |
992 | xendev->devicetype = tmpstring; |
993 | init_completion(&xendev->down); |
994 | |
995 | + xendev->dev.parent = &bus->dev; |
996 | xendev->dev.bus = &bus->bus; |
997 | xendev->dev.release = xenbus_dev_release; |
998 | |
999 | @@ -488,16 +552,15 @@ int xenbus_probe_node(struct xen_bus_typ |
1000 | |
1001 | err = device_create_file(&xendev->dev, &dev_attr_nodename); |
1002 | if (err) |
1003 | - goto fail_unregister; |
1004 | - |
1005 | + goto unregister; |
1006 | err = device_create_file(&xendev->dev, &dev_attr_devtype); |
1007 | if (err) |
1008 | - goto fail_remove_file; |
1009 | + goto unregister; |
1010 | |
1011 | return 0; |
1012 | -fail_remove_file: |
1013 | +unregister: |
1014 | device_remove_file(&xendev->dev, &dev_attr_nodename); |
1015 | -fail_unregister: |
1016 | + device_remove_file(&xendev->dev, &dev_attr_devtype); |
1017 | device_unregister(&xendev->dev); |
1018 | fail: |
1019 | kfree(xendev); |
1020 | @@ -510,8 +573,7 @@ static int xenbus_probe_frontend(const c |
1021 | char *nodename; |
1022 | int err; |
1023 | |
1024 | - nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", |
1025 | - xenbus_frontend.root, type, name); |
1026 | + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); |
1027 | if (!nodename) |
1028 | return -ENOMEM; |
1029 | |
1030 | @@ -548,6 +610,9 @@ int xenbus_probe_devices(struct xen_bus_ |
1031 | char **dir; |
1032 | unsigned int i, dir_n; |
1033 | |
1034 | + if (bus->error) |
1035 | + return bus->error; |
1036 | + |
1037 | dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); |
1038 | if (IS_ERR(dir)) |
1039 | return PTR_ERR(dir); |
1040 | @@ -584,15 +649,15 @@ static int strsep_len(const char *str, c |
1041 | return (len == 0) ? i : -ERANGE; |
1042 | } |
1043 | |
1044 | -void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) |
1045 | +void dev_changed(const char *node, struct xen_bus_type *bus) |
1046 | { |
1047 | int exists, rootlen; |
1048 | struct xenbus_device *dev; |
1049 | char type[BUS_ID_SIZE]; |
1050 | const char *p, *root; |
1051 | |
1052 | - if (char_count(node, '/') < 2) |
1053 | - return; |
1054 | + if (bus->error || char_count(node, '/') < 2) |
1055 | + return; |
1056 | |
1057 | exists = xenbus_exists(XBT_NIL, node, ""); |
1058 | if (!exists) { |
1059 | @@ -626,7 +691,7 @@ static void frontend_changed(struct xenb |
1060 | { |
1061 | DPRINTK(""); |
1062 | |
1063 | - xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); |
1064 | + dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); |
1065 | } |
1066 | |
1067 | /* We watch for devices appearing and vanishing. */ |
1068 | @@ -702,9 +767,9 @@ static int resume_dev(struct device *dev |
1069 | |
1070 | if (drv->resume) { |
1071 | err = drv->resume(xdev); |
1072 | - if (err) { |
1073 | + if (err) { |
1074 | printk(KERN_WARNING |
1075 | - "xenbus: resume %s failed: %i\n", |
1076 | + "xenbus: resume %s failed: %i\n", |
1077 | dev->bus_id, err); |
1078 | return err; |
1079 | } |
1080 | @@ -725,7 +790,8 @@ void xenbus_suspend(void) |
1081 | { |
1082 | DPRINTK(""); |
1083 | |
1084 | - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); |
1085 | + if (!xenbus_frontend.error) |
1086 | + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); |
1087 | xenbus_backend_suspend(suspend_dev); |
1088 | xs_suspend(); |
1089 | } |
1090 | @@ -735,7 +801,8 @@ void xenbus_resume(void) |
1091 | { |
1092 | xb_init_comms(); |
1093 | xs_resume(); |
1094 | - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); |
1095 | + if (!xenbus_frontend.error) |
1096 | + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); |
1097 | xenbus_backend_resume(resume_dev); |
1098 | } |
1099 | EXPORT_SYMBOL_GPL(xenbus_resume); |
1100 | @@ -743,7 +810,8 @@ EXPORT_SYMBOL_GPL(xenbus_resume); |
1101 | void xenbus_suspend_cancel(void) |
1102 | { |
1103 | xs_suspend_cancel(); |
1104 | - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); |
1105 | + if (!xenbus_frontend.error) |
1106 | + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); |
1107 | xenbus_backend_resume(suspend_cancel_dev); |
1108 | } |
1109 | EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); |
1110 | @@ -771,7 +839,8 @@ void unregister_xenstore_notifier(struct |
1111 | } |
1112 | EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); |
1113 | |
1114 | -void xenbus_probe(struct work_struct *unused) |
1115 | + |
1116 | +void xenbus_probe(void *unused) |
1117 | { |
1118 | BUG_ON((xenstored_ready <= 0)); |
1119 | |
1120 | @@ -784,68 +853,177 @@ void xenbus_probe(struct work_struct *un |
1121 | blocking_notifier_call_chain(&xenstore_chain, 0, NULL); |
1122 | } |
1123 | |
1124 | -static int __init xenbus_probe_init(void) |
1125 | + |
1126 | +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) |
1127 | +static struct file_operations xsd_kva_fops; |
1128 | +static struct proc_dir_entry *xsd_kva_intf; |
1129 | +static struct proc_dir_entry *xsd_port_intf; |
1130 | + |
1131 | +static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) |
1132 | +{ |
1133 | + size_t size = vma->vm_end - vma->vm_start; |
1134 | + |
1135 | + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) |
1136 | + return -EINVAL; |
1137 | + |
1138 | + if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), |
1139 | + size, vma->vm_page_prot)) |
1140 | + return -EAGAIN; |
1141 | + |
1142 | + return 0; |
1143 | +} |
1144 | + |
1145 | +static int xsd_kva_read(char *page, char **start, off_t off, |
1146 | + int count, int *eof, void *data) |
1147 | +{ |
1148 | + int len; |
1149 | + |
1150 | + len = sprintf(page, "0x%p", xen_store_interface); |
1151 | + *eof = 1; |
1152 | + return len; |
1153 | +} |
1154 | + |
1155 | +static int xsd_port_read(char *page, char **start, off_t off, |
1156 | + int count, int *eof, void *data) |
1157 | +{ |
1158 | + int len; |
1159 | + |
1160 | + len = sprintf(page, "%d", xen_store_evtchn); |
1161 | + *eof = 1; |
1162 | + return len; |
1163 | +} |
1164 | +#endif |
1165 | + |
1166 | +static int xenbus_probe_init(void) |
1167 | { |
1168 | int err = 0; |
1169 | + unsigned long page = 0; |
1170 | |
1171 | DPRINTK(""); |
1172 | |
1173 | - err = -ENODEV; |
1174 | if (!is_running_on_xen()) |
1175 | - goto out_error; |
1176 | + return -ENODEV; |
1177 | |
1178 | /* Register ourselves with the kernel bus subsystem */ |
1179 | - err = bus_register(&xenbus_frontend.bus); |
1180 | - if (err) |
1181 | - goto out_error; |
1182 | - |
1183 | - err = xenbus_backend_bus_register(); |
1184 | - if (err) |
1185 | - goto out_unreg_front; |
1186 | + xenbus_frontend.error = bus_register(&xenbus_frontend.bus); |
1187 | + if (xenbus_frontend.error) |
1188 | + printk(KERN_WARNING |
1189 | + "XENBUS: Error registering frontend bus: %i\n", |
1190 | + xenbus_frontend.error); |
1191 | + xenbus_backend_bus_register(); |
1192 | |
1193 | /* |
1194 | * Domain0 doesn't have a store_evtchn or store_mfn yet. |
1195 | */ |
1196 | if (is_initial_xendomain()) { |
1197 | - /* dom0 not yet supported */ |
1198 | + struct evtchn_alloc_unbound alloc_unbound; |
1199 | + |
1200 | + /* Allocate page. */ |
1201 | + page = get_zeroed_page(GFP_KERNEL); |
1202 | + if (!page) |
1203 | + return -ENOMEM; |
1204 | + |
1205 | + xen_store_mfn = xen_start_info->store_mfn = |
1206 | + pfn_to_mfn(virt_to_phys((void *)page) >> |
1207 | + PAGE_SHIFT); |
1208 | + |
1209 | + /* Next allocate a local port which xenstored can bind to */ |
1210 | + alloc_unbound.dom = DOMID_SELF; |
1211 | + alloc_unbound.remote_dom = 0; |
1212 | + |
1213 | + err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, |
1214 | + &alloc_unbound); |
1215 | + if (err == -ENOSYS) |
1216 | + goto err; |
1217 | + BUG_ON(err); |
1218 | + xen_store_evtchn = xen_start_info->store_evtchn = |
1219 | + alloc_unbound.port; |
1220 | + |
1221 | +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) |
1222 | + /* And finally publish the above info in /proc/xen */ |
1223 | + xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); |
1224 | + if (xsd_kva_intf) { |
1225 | + memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, |
1226 | + sizeof(xsd_kva_fops)); |
1227 | + xsd_kva_fops.mmap = xsd_kva_mmap; |
1228 | + xsd_kva_intf->proc_fops = &xsd_kva_fops; |
1229 | + xsd_kva_intf->read_proc = xsd_kva_read; |
1230 | + } |
1231 | + xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); |
1232 | + if (xsd_port_intf) |
1233 | + xsd_port_intf->read_proc = xsd_port_read; |
1234 | +#endif |
1235 | + xen_store_interface = mfn_to_virt(xen_store_mfn); |
1236 | } else { |
1237 | xenstored_ready = 1; |
1238 | +#ifdef CONFIG_XEN |
1239 | xen_store_evtchn = xen_start_info->store_evtchn; |
1240 | xen_store_mfn = xen_start_info->store_mfn; |
1241 | + xen_store_interface = mfn_to_virt(xen_store_mfn); |
1242 | +#else |
1243 | + xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); |
1244 | + xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); |
1245 | + xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, |
1246 | + PAGE_SIZE); |
1247 | +#endif |
1248 | } |
1249 | - xen_store_interface = mfn_to_virt(xen_store_mfn); |
1250 | + |
1251 | + |
1252 | + xenbus_dev_init(); |
1253 | |
1254 | /* Initialize the interface to xenstore. */ |
1255 | err = xs_init(); |
1256 | if (err) { |
1257 | printk(KERN_WARNING |
1258 | "XENBUS: Error initializing xenstore comms: %i\n", err); |
1259 | - goto out_unreg_back; |
1260 | + goto err; |
1261 | + } |
1262 | + |
1263 | + /* Register ourselves with the kernel device subsystem */ |
1264 | + if (!xenbus_frontend.error) { |
1265 | + xenbus_frontend.error = device_register(&xenbus_frontend.dev); |
1266 | + if (xenbus_frontend.error) { |
1267 | + bus_unregister(&xenbus_frontend.bus); |
1268 | + printk(KERN_WARNING |
1269 | + "XENBUS: Error registering frontend device: %i\n", |
1270 | + xenbus_frontend.error); |
1271 | + } |
1272 | } |
1273 | + xenbus_backend_device_register(); |
1274 | |
1275 | if (!is_initial_xendomain()) |
1276 | xenbus_probe(NULL); |
1277 | |
1278 | return 0; |
1279 | |
1280 | - out_unreg_back: |
1281 | - xenbus_backend_bus_unregister(); |
1282 | + err: |
1283 | + if (page) |
1284 | + free_page(page); |
1285 | |
1286 | - out_unreg_front: |
1287 | - bus_unregister(&xenbus_frontend.bus); |
1288 | + /* |
1289 | + * Do not unregister the xenbus front/backend buses here. The buses |
1290 | + * must exist because front/backend drivers will use them when they are |
1291 | + * registered. |
1292 | + */ |
1293 | |
1294 | - out_error: |
1295 | return err; |
1296 | } |
1297 | |
1298 | +#ifdef CONFIG_XEN |
1299 | postcore_initcall(xenbus_probe_init); |
1300 | - |
1301 | -MODULE_LICENSE("GPL"); |
1302 | +MODULE_LICENSE("Dual BSD/GPL"); |
1303 | +#else |
1304 | +int xenbus_init(void) |
1305 | +{ |
1306 | + return xenbus_probe_init(); |
1307 | +} |
1308 | +#endif |
1309 | |
1310 | static int is_disconnected_device(struct device *dev, void *data) |
1311 | { |
1312 | struct xenbus_device *xendev = to_xenbus_device(dev); |
1313 | struct device_driver *drv = data; |
1314 | + struct xenbus_driver *xendrv; |
1315 | |
1316 | /* |
1317 | * A device with no driver will never connect. We care only about |
1318 | @@ -858,11 +1036,15 @@ static int is_disconnected_device(struct |
1319 | if (drv && (dev->driver != drv)) |
1320 | return 0; |
1321 | |
1322 | - return (xendev->state != XenbusStateConnected); |
1323 | + xendrv = to_xenbus_driver(dev->driver); |
1324 | + return (xendev->state < XenbusStateConnected || |
1325 | + (xendrv->is_ready && !xendrv->is_ready(xendev))); |
1326 | } |
1327 | |
1328 | static int exists_disconnected_device(struct device_driver *drv) |
1329 | { |
1330 | + if (xenbus_frontend.error) |
1331 | + return xenbus_frontend.error; |
1332 | return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, |
1333 | is_disconnected_device); |
1334 | } |
1335 | @@ -871,6 +1053,7 @@ static int print_device_status(struct de |
1336 | { |
1337 | struct xenbus_device *xendev = to_xenbus_device(dev); |
1338 | struct device_driver *drv = data; |
1339 | + struct xenbus_driver *xendrv; |
1340 | |
1341 | /* Is this operation limited to a particular driver? */ |
1342 | if (drv && (dev->driver != drv)) |
1343 | @@ -880,12 +1063,23 @@ static int print_device_status(struct de |
1344 | /* Information only: is this too noisy? */ |
1345 | printk(KERN_INFO "XENBUS: Device with no driver: %s\n", |
1346 | xendev->nodename); |
1347 | - } else if (xendev->state != XenbusStateConnected) { |
1348 | + return 0; |
1349 | + } |
1350 | + |
1351 | + if (xendev->state < XenbusStateConnected) { |
1352 | + enum xenbus_state rstate = XenbusStateUnknown; |
1353 | + if (xendev->otherend) |
1354 | + rstate = xenbus_read_driver_state(xendev->otherend); |
1355 | printk(KERN_WARNING "XENBUS: Timeout connecting " |
1356 | - "to device: %s (state %d)\n", |
1357 | - xendev->nodename, xendev->state); |
1358 | + "to device: %s (local state %d, remote state %d)\n", |
1359 | + xendev->nodename, xendev->state, rstate); |
1360 | } |
1361 | |
1362 | + xendrv = to_xenbus_driver(dev->driver); |
1363 | + if (xendrv->is_ready && !xendrv->is_ready(xendev)) |
1364 | + printk(KERN_WARNING "XENBUS: Device not ready: %s\n", |
1365 | + xendev->nodename); |
1366 | + |
1367 | return 0; |
1368 | } |
1369 | |
1370 | @@ -893,7 +1087,7 @@ static int print_device_status(struct de |
1371 | static int ready_to_wait_for_devices; |
1372 | |
1373 | /* |
1374 | - * On a 10 second timeout, wait for all devices currently configured. We need |
1375 | + * On a 5-minute timeout, wait for all devices currently configured. We need |
1376 | * to do this to guarantee that the filesystems and / or network devices |
1377 | * needed for boot are available, before we can allow the boot to proceed. |
1378 | * |
1379 | @@ -908,18 +1102,30 @@ static int ready_to_wait_for_devices; |
1380 | */ |
1381 | static void wait_for_devices(struct xenbus_driver *xendrv) |
1382 | { |
1383 | - unsigned long timeout = jiffies + 10*HZ; |
1384 | + unsigned long start = jiffies; |
1385 | struct device_driver *drv = xendrv ? &xendrv->driver : NULL; |
1386 | + unsigned int seconds_waited = 0; |
1387 | |
1388 | if (!ready_to_wait_for_devices || !is_running_on_xen()) |
1389 | return; |
1390 | |
1391 | while (exists_disconnected_device(drv)) { |
1392 | - if (time_after(jiffies, timeout)) |
1393 | - break; |
1394 | + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { |
1395 | + if (!seconds_waited) |
1396 | + printk(KERN_WARNING "XENBUS: Waiting for " |
1397 | + "devices to initialise: "); |
1398 | + seconds_waited += 5; |
1399 | + printk("%us...", 300 - seconds_waited); |
1400 | + if (seconds_waited == 300) |
1401 | + break; |
1402 | + } |
1403 | + |
1404 | schedule_timeout_interruptible(HZ/10); |
1405 | } |
1406 | |
1407 | + if (seconds_waited) |
1408 | + printk("\n"); |
1409 | + |
1410 | bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, |
1411 | print_device_status); |
1412 | } |
1413 | @@ -927,10 +1133,18 @@ static void wait_for_devices(struct xenb |
1414 | #ifndef MODULE |
1415 | static int __init boot_wait_for_devices(void) |
1416 | { |
1417 | - ready_to_wait_for_devices = 1; |
1418 | - wait_for_devices(NULL); |
1419 | + if (!xenbus_frontend.error) { |
1420 | + ready_to_wait_for_devices = 1; |
1421 | + wait_for_devices(NULL); |
1422 | + } |
1423 | return 0; |
1424 | } |
1425 | |
1426 | late_initcall(boot_wait_for_devices); |
1427 | #endif |
1428 | + |
1429 | +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) |
1430 | +{ |
1431 | + return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); |
1432 | +} |
1433 | +EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); |
1434 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_probe.h |
1435 | =================================================================== |
1436 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_probe.h 2008-04-15 09:41:09.000000000 +0200 |
1437 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_probe.h 2008-04-15 09:59:33.000000000 +0200 |
1438 | @@ -5,23 +5,23 @@ |
1439 | * |
1440 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
1441 | * Copyright (C) 2005 XenSource Ltd. |
1442 | - * |
1443 | + * |
1444 | * This program is free software; you can redistribute it and/or |
1445 | * modify it under the terms of the GNU General Public License version 2 |
1446 | * as published by the Free Software Foundation; or, when distributed |
1447 | * separately from the Linux kernel or incorporated into other |
1448 | * software packages, subject to the following license: |
1449 | - * |
1450 | + * |
1451 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
1452 | * of this source file (the "Software"), to deal in the Software without |
1453 | * restriction, including without limitation the rights to use, copy, modify, |
1454 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
1455 | * and to permit persons to whom the Software is furnished to do so, subject to |
1456 | * the following conditions: |
1457 | - * |
1458 | + * |
1459 | * The above copyright notice and this permission notice shall be included in |
1460 | * all copies or substantial portions of the Software. |
1461 | - * |
1462 | + * |
1463 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
1464 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
1465 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
1466 | @@ -34,41 +34,42 @@ |
1467 | #ifndef _XENBUS_PROBE_H |
1468 | #define _XENBUS_PROBE_H |
1469 | |
1470 | -#ifdef CONFIG_XEN_BACKEND |
1471 | +#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) |
1472 | extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); |
1473 | extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); |
1474 | extern void xenbus_backend_probe_and_watch(void); |
1475 | -extern int xenbus_backend_bus_register(void); |
1476 | -extern void xenbus_backend_bus_unregister(void); |
1477 | +extern void xenbus_backend_bus_register(void); |
1478 | +extern void xenbus_backend_device_register(void); |
1479 | #else |
1480 | static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} |
1481 | static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} |
1482 | static inline void xenbus_backend_probe_and_watch(void) {} |
1483 | -static inline int xenbus_backend_bus_register(void) { return 0; } |
1484 | -static inline void xenbus_backend_bus_unregister(void) {} |
1485 | +static inline void xenbus_backend_bus_register(void) {} |
1486 | +static inline void xenbus_backend_device_register(void) {} |
1487 | #endif |
1488 | |
1489 | struct xen_bus_type |
1490 | { |
1491 | char *root; |
1492 | + int error; |
1493 | unsigned int levels; |
1494 | int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); |
1495 | int (*probe)(const char *type, const char *dir); |
1496 | struct bus_type bus; |
1497 | + struct device dev; |
1498 | }; |
1499 | |
1500 | extern int xenbus_match(struct device *_dev, struct device_driver *_drv); |
1501 | extern int xenbus_dev_probe(struct device *_dev); |
1502 | extern int xenbus_dev_remove(struct device *_dev); |
1503 | extern int xenbus_register_driver_common(struct xenbus_driver *drv, |
1504 | - struct xen_bus_type *bus, |
1505 | - struct module *owner, |
1506 | - const char *mod_name); |
1507 | + struct xen_bus_type *bus); |
1508 | extern int xenbus_probe_node(struct xen_bus_type *bus, |
1509 | const char *type, |
1510 | const char *nodename); |
1511 | extern int xenbus_probe_devices(struct xen_bus_type *bus); |
1512 | |
1513 | -extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); |
1514 | +extern void dev_changed(const char *node, struct xen_bus_type *bus); |
1515 | |
1516 | #endif |
1517 | + |
1518 | Index: head-2008-04-15/drivers/xen/xenbus/xenbus_xs.c |
1519 | =================================================================== |
1520 | --- head-2008-04-15.orig/drivers/xen/xenbus/xenbus_xs.c 2008-04-15 09:41:09.000000000 +0200 |
1521 | +++ head-2008-04-15/drivers/xen/xenbus/xenbus_xs.c 2008-04-15 09:59:33.000000000 +0200 |
1522 | @@ -5,23 +5,23 @@ |
1523 | * and we use xenbus_comms for communication. |
1524 | * |
1525 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
1526 | - * |
1527 | + * |
1528 | * This program is free software; you can redistribute it and/or |
1529 | * modify it under the terms of the GNU General Public License version 2 |
1530 | * as published by the Free Software Foundation; or, when distributed |
1531 | * separately from the Linux kernel or incorporated into other |
1532 | * software packages, subject to the following license: |
1533 | - * |
1534 | + * |
1535 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
1536 | * of this source file (the "Software"), to deal in the Software without |
1537 | * restriction, including without limitation the rights to use, copy, modify, |
1538 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
1539 | * and to permit persons to whom the Software is furnished to do so, subject to |
1540 | * the following conditions: |
1541 | - * |
1542 | + * |
1543 | * The above copyright notice and this permission notice shall be included in |
1544 | * all copies or substantial portions of the Software. |
1545 | - * |
1546 | + * |
1547 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
1548 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
1549 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
1550 | @@ -47,6 +47,14 @@ |
1551 | #include <xen/xenbus.h> |
1552 | #include "xenbus_comms.h" |
1553 | |
1554 | +#ifdef HAVE_XEN_PLATFORM_COMPAT_H |
1555 | +#include <xen/platform-compat.h> |
1556 | +#endif |
1557 | + |
1558 | +#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ |
1559 | +#define PF_NOFREEZE 0 |
1560 | +#endif |
1561 | + |
1562 | struct xs_stored_msg { |
1563 | struct list_head list; |
1564 | |
1565 | @@ -108,7 +116,7 @@ static DEFINE_SPINLOCK(watch_events_lock |
1566 | * carrying out work. |
1567 | */ |
1568 | static pid_t xenwatch_pid; |
1569 | -static DEFINE_MUTEX(xenwatch_mutex); |
1570 | +/* static */ DEFINE_MUTEX(xenwatch_mutex); |
1571 | static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); |
1572 | |
1573 | static int get_error(const char *errorstring) |
1574 | @@ -177,7 +185,7 @@ void *xenbus_dev_request_and_reply(struc |
1575 | |
1576 | mutex_unlock(&xs_state.request_mutex); |
1577 | |
1578 | - if ((msg->type == XS_TRANSACTION_END) || |
1579 | + if ((req_msg.type == XS_TRANSACTION_END) || |
1580 | ((req_msg.type == XS_TRANSACTION_START) && |
1581 | (msg->type == XS_ERROR))) |
1582 | up_read(&xs_state.transaction_mutex); |
1583 | @@ -213,7 +221,7 @@ static void *xs_talkv(struct xenbus_tran |
1584 | } |
1585 | |
1586 | for (i = 0; i < num_vecs; i++) { |
1587 | - err = xb_write(iovec[i].iov_base, iovec[i].iov_len); |
1588 | + err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; |
1589 | if (err) { |
1590 | mutex_unlock(&xs_state.request_mutex); |
1591 | return ERR_PTR(err); |
1592 | @@ -283,9 +291,9 @@ static char *join(const char *dir, const |
1593 | char *buffer; |
1594 | |
1595 | if (strlen(name) == 0) |
1596 | - buffer = kasprintf(GFP_KERNEL, "%s", dir); |
1597 | + buffer = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s", dir); |
1598 | else |
1599 | - buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name); |
1600 | + buffer = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s/%s", dir, name); |
1601 | return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; |
1602 | } |
1603 | |
1604 | @@ -297,7 +305,7 @@ static char **split(char *strings, unsig |
1605 | *num = count_strings(strings, len); |
1606 | |
1607 | /* Transfer to one big alloc for easy freeing. */ |
1608 | - ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL); |
1609 | + ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL|__GFP_HIGH); |
1610 | if (!ret) { |
1611 | kfree(strings); |
1612 | return ERR_PTR(-ENOMEM); |
1613 | @@ -498,7 +506,7 @@ int xenbus_printf(struct xenbus_transact |
1614 | #define PRINTF_BUFFER_SIZE 4096 |
1615 | char *printf_buffer; |
1616 | |
1617 | - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); |
1618 | + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL|__GFP_HIGH); |
1619 | if (printf_buffer == NULL) |
1620 | return -ENOMEM; |
1621 | |
1622 | @@ -621,6 +629,8 @@ void unregister_xenbus_watch(struct xenb |
1623 | char token[sizeof(watch) * 2 + 1]; |
1624 | int err; |
1625 | |
1626 | + BUG_ON(watch->flags & XBWF_new_thread); |
1627 | + |
1628 | sprintf(token, "%lX", (long)watch); |
1629 | |
1630 | down_read(&xs_state.watch_mutex); |
1631 | @@ -638,11 +648,6 @@ void unregister_xenbus_watch(struct xenb |
1632 | |
1633 | up_read(&xs_state.watch_mutex); |
1634 | |
1635 | - /* Make sure there are no callbacks running currently (unless |
1636 | - its us) */ |
1637 | - if (current->pid != xenwatch_pid) |
1638 | - mutex_lock(&xenwatch_mutex); |
1639 | - |
1640 | /* Cancel pending watch events. */ |
1641 | spin_lock(&watch_events_lock); |
1642 | list_for_each_entry_safe(msg, tmp, &watch_events, list) { |
1643 | @@ -654,8 +659,11 @@ void unregister_xenbus_watch(struct xenb |
1644 | } |
1645 | spin_unlock(&watch_events_lock); |
1646 | |
1647 | - if (current->pid != xenwatch_pid) |
1648 | + /* Flush any currently-executing callback, unless we are it. :-) */ |
1649 | + if (current->pid != xenwatch_pid) { |
1650 | + mutex_lock(&xenwatch_mutex); |
1651 | mutex_unlock(&xenwatch_mutex); |
1652 | + } |
1653 | } |
1654 | EXPORT_SYMBOL_GPL(unregister_xenbus_watch); |
1655 | |
1656 | @@ -693,11 +701,30 @@ void xs_suspend_cancel(void) |
1657 | up_write(&xs_state.transaction_mutex); |
1658 | } |
1659 | |
1660 | +static int xenwatch_handle_callback(void *data) |
1661 | +{ |
1662 | + struct xs_stored_msg *msg = data; |
1663 | + |
1664 | + msg->u.watch.handle->callback(msg->u.watch.handle, |
1665 | + (const char **)msg->u.watch.vec, |
1666 | + msg->u.watch.vec_size); |
1667 | + |
1668 | + kfree(msg->u.watch.vec); |
1669 | + kfree(msg); |
1670 | + |
1671 | + /* Kill this kthread if we were spawned just for this callback. */ |
1672 | + if (current->pid != xenwatch_pid) |
1673 | + do_exit(0); |
1674 | + |
1675 | + return 0; |
1676 | +} |
1677 | + |
1678 | static int xenwatch_thread(void *unused) |
1679 | { |
1680 | struct list_head *ent; |
1681 | struct xs_stored_msg *msg; |
1682 | |
1683 | + current->flags |= PF_NOFREEZE; |
1684 | for (;;) { |
1685 | wait_event_interruptible(watch_events_waitq, |
1686 | !list_empty(&watch_events)); |
1687 | @@ -713,17 +740,29 @@ static int xenwatch_thread(void *unused) |
1688 | list_del(ent); |
1689 | spin_unlock(&watch_events_lock); |
1690 | |
1691 | - if (ent != &watch_events) { |
1692 | - msg = list_entry(ent, struct xs_stored_msg, list); |
1693 | - msg->u.watch.handle->callback( |
1694 | - msg->u.watch.handle, |
1695 | - (const char **)msg->u.watch.vec, |
1696 | - msg->u.watch.vec_size); |
1697 | - kfree(msg->u.watch.vec); |
1698 | - kfree(msg); |
1699 | + if (ent == &watch_events) { |
1700 | + mutex_unlock(&xenwatch_mutex); |
1701 | + continue; |
1702 | } |
1703 | |
1704 | - mutex_unlock(&xenwatch_mutex); |
1705 | + msg = list_entry(ent, struct xs_stored_msg, list); |
1706 | + |
1707 | + /* |
1708 | + * Unlock the mutex before running an XBWF_new_thread |
1709 | + * handler. kthread_run can block which can deadlock |
1710 | + * against unregister_xenbus_watch() if we need to |
1711 | + * unregister other watches in order to make |
1712 | + * progress. This can occur on resume before the swap |
1713 | + * device is attached. |
1714 | + */ |
1715 | + if (msg->u.watch.handle->flags & XBWF_new_thread) { |
1716 | + mutex_unlock(&xenwatch_mutex); |
1717 | + kthread_run(xenwatch_handle_callback, |
1718 | + msg, "xenwatch_cb"); |
1719 | + } else { |
1720 | + xenwatch_handle_callback(msg); |
1721 | + mutex_unlock(&xenwatch_mutex); |
1722 | + } |
1723 | } |
1724 | |
1725 | return 0; |
1726 | @@ -751,7 +790,7 @@ static int process_msg(void) |
1727 | } |
1728 | |
1729 | |
1730 | - msg = kmalloc(sizeof(*msg), GFP_KERNEL); |
1731 | + msg = kmalloc(sizeof(*msg), GFP_KERNEL|__GFP_HIGH); |
1732 | if (msg == NULL) { |
1733 | err = -ENOMEM; |
1734 | goto out; |
1735 | @@ -763,7 +802,7 @@ static int process_msg(void) |
1736 | goto out; |
1737 | } |
1738 | |
1739 | - body = kmalloc(msg->hdr.len + 1, GFP_KERNEL); |
1740 | + body = kmalloc(msg->hdr.len + 1, GFP_KERNEL|__GFP_HIGH); |
1741 | if (body == NULL) { |
1742 | kfree(msg); |
1743 | err = -ENOMEM; |
1744 | @@ -817,6 +856,7 @@ static int xenbus_thread(void *unused) |
1745 | { |
1746 | int err; |
1747 | |
1748 | + current->flags |= PF_NOFREEZE; |
1749 | for (;;) { |
1750 | err = process_msg(); |
1751 | if (err) |
1752 | Index: head-2008-04-15/include/xen/interface/elfnote.h |
1753 | =================================================================== |
1754 | --- head-2008-04-15.orig/include/xen/interface/elfnote.h 2008-04-15 09:41:09.000000000 +0200 |
1755 | +++ head-2008-04-15/include/xen/interface/elfnote.h 2008-04-15 09:59:33.000000000 +0200 |
1756 | @@ -3,6 +3,24 @@ |
1757 | * |
1758 | * Definitions used for the Xen ELF notes. |
1759 | * |
1760 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
1761 | + * of this software and associated documentation files (the "Software"), to |
1762 | + * deal in the Software without restriction, including without limitation the |
1763 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
1764 | + * sell copies of the Software, and to permit persons to whom the Software is |
1765 | + * furnished to do so, subject to the following conditions: |
1766 | + * |
1767 | + * The above copyright notice and this permission notice shall be included in |
1768 | + * all copies or substantial portions of the Software. |
1769 | + * |
1770 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
1771 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
1772 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
1773 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
1774 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
1775 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
1776 | + * DEALINGS IN THE SOFTWARE. |
1777 | + * |
1778 | * Copyright (c) 2006, Ian Campbell, XenSource Ltd. |
1779 | */ |
1780 | |
1781 | @@ -10,7 +28,7 @@ |
1782 | #define __XEN_PUBLIC_ELFNOTE_H__ |
1783 | |
1784 | /* |
1785 | - * The notes should live in a SHT_NOTE segment and have "Xen" in the |
1786 | + * The notes should live in a PT_NOTE segment and have "Xen" in the |
1787 | * name field. |
1788 | * |
1789 | * Numeric types are either 4 or 8 bytes depending on the content of |
1790 | @@ -22,8 +40,6 @@ |
1791 | |
1792 | /* |
1793 | * NAME=VALUE pair (string). |
1794 | - * |
1795 | - * LEGACY: FEATURES and PAE |
1796 | */ |
1797 | #define XEN_ELFNOTE_INFO 0 |
1798 | |
1799 | @@ -90,7 +106,12 @@ |
1800 | #define XEN_ELFNOTE_LOADER 8 |
1801 | |
1802 | /* |
1803 | - * The kernel supports PAE (x86/32 only, string = "yes" or "no"). |
1804 | + * The kernel supports PAE (x86/32 only, string = "yes", "no" or |
1805 | + * "bimodal"). |
1806 | + * |
1807 | + * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting |
1808 | + * may be given as "yes,bimodal" which will cause older Xen to treat |
1809 | + * this kernel as PAE. |
1810 | * |
1811 | * LEGACY: PAE (n.b. The legacy interface included a provision to |
1812 | * indicate 'extended-cr3' support allowing L3 page tables to be |
1813 | @@ -120,6 +141,85 @@ |
1814 | */ |
1815 | #define XEN_ELFNOTE_BSD_SYMTAB 11 |
1816 | |
1817 | +/* |
1818 | + * The lowest address the hypervisor hole can begin at (numeric). |
1819 | + * |
1820 | + * This must not be set higher than HYPERVISOR_VIRT_START. Its presence |
1821 | + * also indicates to the hypervisor that the kernel can deal with the |
1822 | + * hole starting at a higher address. |
1823 | + */ |
1824 | +#define XEN_ELFNOTE_HV_START_LOW 12 |
1825 | + |
1826 | +/* |
1827 | + * List of maddr_t-sized mask/value pairs describing how to recognize |
1828 | + * (non-present) L1 page table entries carrying valid MFNs (numeric). |
1829 | + */ |
1830 | +#define XEN_ELFNOTE_L1_MFN_VALID 13 |
1831 | + |
1832 | +/* |
1833 | + * Whether or not the guest supports cooperative suspend cancellation. |
1834 | + */ |
1835 | +#define XEN_ELFNOTE_SUSPEND_CANCEL 14 |
1836 | + |
1837 | +/* |
1838 | + * The number of the highest elfnote defined. |
1839 | + */ |
1840 | +#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL |
1841 | + |
1842 | +/* |
1843 | + * System information exported through crash notes. |
1844 | + * |
1845 | + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO |
1846 | + * note in case of a system crash. This note will contain various |
1847 | + * information about the system, see xen/include/xen/elfcore.h. |
1848 | + */ |
1849 | +#define XEN_ELFNOTE_CRASH_INFO 0x1000001 |
1850 | + |
1851 | +/* |
1852 | + * System registers exported through crash notes. |
1853 | + * |
1854 | + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS |
1855 | + * note per cpu in case of a system crash. This note is architecture |
1856 | + * specific and will contain registers not saved in the "CORE" note. |
1857 | + * See xen/include/xen/elfcore.h for more information. |
1858 | + */ |
1859 | +#define XEN_ELFNOTE_CRASH_REGS 0x1000002 |
1860 | + |
1861 | + |
1862 | +/* |
1863 | + * xen dump-core none note. |
1864 | + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE |
1865 | + * in its dump file to indicate that the file is xen dump-core |
1866 | + * file. This note doesn't have any other information. |
1867 | + * See tools/libxc/xc_core.h for more information. |
1868 | + */ |
1869 | +#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 |
1870 | + |
1871 | +/* |
1872 | + * xen dump-core header note. |
1873 | + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER |
1874 | + * in its dump file. |
1875 | + * See tools/libxc/xc_core.h for more information. |
1876 | + */ |
1877 | +#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 |
1878 | + |
1879 | +/* |
1880 | + * xen dump-core xen version note. |
1881 | + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION |
1882 | + * in its dump file. It contains the xen version obtained via the |
1883 | + * XENVER hypercall. |
1884 | + * See tools/libxc/xc_core.h for more information. |
1885 | + */ |
1886 | +#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 |
1887 | + |
1888 | +/* |
1889 | + * xen dump-core format version note. |
1890 | + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION |
1891 | + * in its dump file. It contains a format version identifier. |
1892 | + * See tools/libxc/xc_core.h for more information. |
1893 | + */ |
1894 | +#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 |
1895 | + |
1896 | #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ |
1897 | |
1898 | /* |
1899 | Index: head-2008-04-15/include/xen/interface/event_channel.h |
1900 | =================================================================== |
1901 | --- head-2008-04-15.orig/include/xen/interface/event_channel.h 2008-04-15 09:41:09.000000000 +0200 |
1902 | +++ head-2008-04-15/include/xen/interface/event_channel.h 2008-04-15 09:59:33.000000000 +0200 |
1903 | @@ -1,7 +1,25 @@ |
1904 | /****************************************************************************** |
1905 | * event_channel.h |
1906 | - * |
1907 | + * |
1908 | * Event channels between domains. |
1909 | + * |
1910 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
1911 | + * of this software and associated documentation files (the "Software"), to |
1912 | + * deal in the Software without restriction, including without limitation the |
1913 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
1914 | + * sell copies of the Software, and to permit persons to whom the Software is |
1915 | + * furnished to do so, subject to the following conditions: |
1916 | + * |
1917 | + * The above copyright notice and this permission notice shall be included in |
1918 | + * all copies or substantial portions of the Software. |
1919 | + * |
1920 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
1921 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
1922 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
1923 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
1924 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
1925 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
1926 | + * DEALINGS IN THE SOFTWARE. |
1927 | * |
1928 | * Copyright (c) 2003-2004, K A Fraser. |
1929 | */ |
1930 | @@ -9,8 +27,15 @@ |
1931 | #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ |
1932 | #define __XEN_PUBLIC_EVENT_CHANNEL_H__ |
1933 | |
1934 | +/* |
1935 | + * Prototype for this hypercall is: |
1936 | + * int event_channel_op(int cmd, void *args) |
1937 | + * @cmd == EVTCHNOP_??? (event-channel operation). |
1938 | + * @args == Operation-specific extra arguments (NULL if none). |
1939 | + */ |
1940 | + |
1941 | typedef uint32_t evtchn_port_t; |
1942 | -DEFINE_GUEST_HANDLE(evtchn_port_t); |
1943 | +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); |
1944 | |
1945 | /* |
1946 | * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as |
1947 | @@ -20,13 +45,14 @@ DEFINE_GUEST_HANDLE(evtchn_port_t); |
1948 | * 1. If the caller is unprivileged then <dom> must be DOMID_SELF. |
1949 | * 2. <rdom> may be DOMID_SELF, allowing loopback connections. |
1950 | */ |
1951 | -#define EVTCHNOP_alloc_unbound 6 |
1952 | +#define EVTCHNOP_alloc_unbound 6 |
1953 | struct evtchn_alloc_unbound { |
1954 | - /* IN parameters */ |
1955 | - domid_t dom, remote_dom; |
1956 | - /* OUT parameters */ |
1957 | - evtchn_port_t port; |
1958 | + /* IN parameters */ |
1959 | + domid_t dom, remote_dom; |
1960 | + /* OUT parameters */ |
1961 | + evtchn_port_t port; |
1962 | }; |
1963 | +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; |
1964 | |
1965 | /* |
1966 | * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between |
1967 | @@ -39,29 +65,35 @@ struct evtchn_alloc_unbound { |
1968 | */ |
1969 | #define EVTCHNOP_bind_interdomain 0 |
1970 | struct evtchn_bind_interdomain { |
1971 | - /* IN parameters. */ |
1972 | - domid_t remote_dom; |
1973 | - evtchn_port_t remote_port; |
1974 | - /* OUT parameters. */ |
1975 | - evtchn_port_t local_port; |
1976 | + /* IN parameters. */ |
1977 | + domid_t remote_dom; |
1978 | + evtchn_port_t remote_port; |
1979 | + /* OUT parameters. */ |
1980 | + evtchn_port_t local_port; |
1981 | }; |
1982 | +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; |
1983 | |
1984 | /* |
1985 | * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified |
1986 | * vcpu. |
1987 | * NOTES: |
1988 | - * 1. A virtual IRQ may be bound to at most one event channel per vcpu. |
1989 | - * 2. The allocated event channel is bound to the specified vcpu. The binding |
1990 | - * may not be changed. |
1991 | + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list |
1992 | + * in xen.h for the classification of each VIRQ. |
1993 | + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be |
1994 | + * re-bound via EVTCHNOP_bind_vcpu. |
1995 | + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. |
1996 | + * The allocated event channel is bound to the specified vcpu and the |
1997 | + * binding cannot be changed. |
1998 | */ |
1999 | -#define EVTCHNOP_bind_virq 1 |
2000 | +#define EVTCHNOP_bind_virq 1 |
2001 | struct evtchn_bind_virq { |
2002 | - /* IN parameters. */ |
2003 | - uint32_t virq; |
2004 | - uint32_t vcpu; |
2005 | - /* OUT parameters. */ |
2006 | - evtchn_port_t port; |
2007 | + /* IN parameters. */ |
2008 | + uint32_t virq; |
2009 | + uint32_t vcpu; |
2010 | + /* OUT parameters. */ |
2011 | + evtchn_port_t port; |
2012 | }; |
2013 | +typedef struct evtchn_bind_virq evtchn_bind_virq_t; |
2014 | |
2015 | /* |
2016 | * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>. |
2017 | @@ -69,15 +101,16 @@ struct evtchn_bind_virq { |
2018 | * 1. A physical IRQ may be bound to at most one event channel per domain. |
2019 | * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. |
2020 | */ |
2021 | -#define EVTCHNOP_bind_pirq 2 |
2022 | +#define EVTCHNOP_bind_pirq 2 |
2023 | struct evtchn_bind_pirq { |
2024 | - /* IN parameters. */ |
2025 | - uint32_t pirq; |
2026 | + /* IN parameters. */ |
2027 | + uint32_t pirq; |
2028 | #define BIND_PIRQ__WILL_SHARE 1 |
2029 | - uint32_t flags; /* BIND_PIRQ__* */ |
2030 | - /* OUT parameters. */ |
2031 | - evtchn_port_t port; |
2032 | + uint32_t flags; /* BIND_PIRQ__* */ |
2033 | + /* OUT parameters. */ |
2034 | + evtchn_port_t port; |
2035 | }; |
2036 | +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; |
2037 | |
2038 | /* |
2039 | * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. |
2040 | @@ -85,33 +118,36 @@ struct evtchn_bind_pirq { |
2041 | * 1. The allocated event channel is bound to the specified vcpu. The binding |
2042 | * may not be changed. |
2043 | */ |
2044 | -#define EVTCHNOP_bind_ipi 7 |
2045 | +#define EVTCHNOP_bind_ipi 7 |
2046 | struct evtchn_bind_ipi { |
2047 | - uint32_t vcpu; |
2048 | - /* OUT parameters. */ |
2049 | - evtchn_port_t port; |
2050 | + uint32_t vcpu; |
2051 | + /* OUT parameters. */ |
2052 | + evtchn_port_t port; |
2053 | }; |
2054 | +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; |
2055 | |
2056 | /* |
2057 | * EVTCHNOP_close: Close a local event channel <port>. If the channel is |
2058 | * interdomain then the remote end is placed in the unbound state |
2059 | * (EVTCHNSTAT_unbound), awaiting a new connection. |
2060 | */ |
2061 | -#define EVTCHNOP_close 3 |
2062 | +#define EVTCHNOP_close 3 |
2063 | struct evtchn_close { |
2064 | - /* IN parameters. */ |
2065 | - evtchn_port_t port; |
2066 | + /* IN parameters. */ |
2067 | + evtchn_port_t port; |
2068 | }; |
2069 | +typedef struct evtchn_close evtchn_close_t; |
2070 | |
2071 | /* |
2072 | * EVTCHNOP_send: Send an event to the remote end of the channel whose local |
2073 | * endpoint is <port>. |
2074 | */ |
2075 | -#define EVTCHNOP_send 4 |
2076 | +#define EVTCHNOP_send 4 |
2077 | struct evtchn_send { |
2078 | - /* IN parameters. */ |
2079 | - evtchn_port_t port; |
2080 | + /* IN parameters. */ |
2081 | + evtchn_port_t port; |
2082 | }; |
2083 | +typedef struct evtchn_send evtchn_send_t; |
2084 | |
2085 | /* |
2086 | * EVTCHNOP_status: Get the current status of the communication channel which |
2087 | @@ -121,75 +157,108 @@ struct evtchn_send { |
2088 | * 2. Only a sufficiently-privileged domain may obtain the status of an event |
2089 | * channel for which <dom> is not DOMID_SELF. |
2090 | */ |
2091 | -#define EVTCHNOP_status 5 |
2092 | +#define EVTCHNOP_status 5 |
2093 | struct evtchn_status { |
2094 | - /* IN parameters */ |
2095 | - domid_t dom; |
2096 | - evtchn_port_t port; |
2097 | - /* OUT parameters */ |
2098 | -#define EVTCHNSTAT_closed 0 /* Channel is not in use. */ |
2099 | -#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ |
2100 | -#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ |
2101 | -#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ |
2102 | -#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ |
2103 | -#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ |
2104 | - uint32_t status; |
2105 | - uint32_t vcpu; /* VCPU to which this channel is bound. */ |
2106 | - union { |
2107 | - struct { |
2108 | - domid_t dom; |
2109 | - } unbound; /* EVTCHNSTAT_unbound */ |
2110 | - struct { |
2111 | - domid_t dom; |
2112 | - evtchn_port_t port; |
2113 | - } interdomain; /* EVTCHNSTAT_interdomain */ |
2114 | - uint32_t pirq; /* EVTCHNSTAT_pirq */ |
2115 | - uint32_t virq; /* EVTCHNSTAT_virq */ |
2116 | - } u; |
2117 | + /* IN parameters */ |
2118 | + domid_t dom; |
2119 | + evtchn_port_t port; |
2120 | + /* OUT parameters */ |
2121 | +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */ |
2122 | +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ |
2123 | +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ |
2124 | +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ |
2125 | +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ |
2126 | +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ |
2127 | + uint32_t status; |
2128 | + uint32_t vcpu; /* VCPU to which this channel is bound. */ |
2129 | + union { |
2130 | + struct { |
2131 | + domid_t dom; |
2132 | + } unbound; /* EVTCHNSTAT_unbound */ |
2133 | + struct { |
2134 | + domid_t dom; |
2135 | + evtchn_port_t port; |
2136 | + } interdomain; /* EVTCHNSTAT_interdomain */ |
2137 | + uint32_t pirq; /* EVTCHNSTAT_pirq */ |
2138 | + uint32_t virq; /* EVTCHNSTAT_virq */ |
2139 | + } u; |
2140 | }; |
2141 | +typedef struct evtchn_status evtchn_status_t; |
2142 | |
2143 | /* |
2144 | * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an |
2145 | * event is pending. |
2146 | * NOTES: |
2147 | - * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised |
2148 | - * the binding. This binding cannot be changed. |
2149 | - * 2. All other channels notify vcpu0 by default. This default is set when |
2150 | + * 1. IPI-bound channels always notify the vcpu specified at bind time. |
2151 | + * This binding cannot be changed. |
2152 | + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. |
2153 | + * This binding cannot be changed. |
2154 | + * 3. All other channels notify vcpu0 by default. This default is set when |
2155 | * the channel is allocated (a port that is freed and subsequently reused |
2156 | * has its binding reset to vcpu0). |
2157 | */ |
2158 | -#define EVTCHNOP_bind_vcpu 8 |
2159 | +#define EVTCHNOP_bind_vcpu 8 |
2160 | struct evtchn_bind_vcpu { |
2161 | - /* IN parameters. */ |
2162 | - evtchn_port_t port; |
2163 | - uint32_t vcpu; |
2164 | + /* IN parameters. */ |
2165 | + evtchn_port_t port; |
2166 | + uint32_t vcpu; |
2167 | }; |
2168 | +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; |
2169 | |
2170 | /* |
2171 | * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver |
2172 | * a notification to the appropriate VCPU if an event is pending. |
2173 | */ |
2174 | -#define EVTCHNOP_unmask 9 |
2175 | +#define EVTCHNOP_unmask 9 |
2176 | struct evtchn_unmask { |
2177 | - /* IN parameters. */ |
2178 | - evtchn_port_t port; |
2179 | + /* IN parameters. */ |
2180 | + evtchn_port_t port; |
2181 | +}; |
2182 | +typedef struct evtchn_unmask evtchn_unmask_t; |
2183 | + |
2184 | +/* |
2185 | + * EVTCHNOP_reset: Close all event channels associated with specified domain. |
2186 | + * NOTES: |
2187 | + * 1. <dom> may be specified as DOMID_SELF. |
2188 | + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. |
2189 | + */ |
2190 | +#define EVTCHNOP_reset 10 |
2191 | +struct evtchn_reset { |
2192 | + /* IN parameters. */ |
2193 | + domid_t dom; |
2194 | }; |
2195 | +typedef struct evtchn_reset evtchn_reset_t; |
2196 | |
2197 | +/* |
2198 | + * Argument to event_channel_op_compat() hypercall. Superceded by new |
2199 | + * event_channel_op() hypercall since 0x00030202. |
2200 | + */ |
2201 | struct evtchn_op { |
2202 | - uint32_t cmd; /* EVTCHNOP_* */ |
2203 | - union { |
2204 | - struct evtchn_alloc_unbound alloc_unbound; |
2205 | - struct evtchn_bind_interdomain bind_interdomain; |
2206 | - struct evtchn_bind_virq bind_virq; |
2207 | - struct evtchn_bind_pirq bind_pirq; |
2208 | - struct evtchn_bind_ipi bind_ipi; |
2209 | - struct evtchn_close close; |
2210 | - struct evtchn_send send; |
2211 | - struct evtchn_status status; |
2212 | - struct evtchn_bind_vcpu bind_vcpu; |
2213 | - struct evtchn_unmask unmask; |
2214 | - } u; |
2215 | + uint32_t cmd; /* EVTCHNOP_* */ |
2216 | + union { |
2217 | + struct evtchn_alloc_unbound alloc_unbound; |
2218 | + struct evtchn_bind_interdomain bind_interdomain; |
2219 | + struct evtchn_bind_virq bind_virq; |
2220 | + struct evtchn_bind_pirq bind_pirq; |
2221 | + struct evtchn_bind_ipi bind_ipi; |
2222 | + struct evtchn_close close; |
2223 | + struct evtchn_send send; |
2224 | + struct evtchn_status status; |
2225 | + struct evtchn_bind_vcpu bind_vcpu; |
2226 | + struct evtchn_unmask unmask; |
2227 | + } u; |
2228 | }; |
2229 | -DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); |
2230 | +typedef struct evtchn_op evtchn_op_t; |
2231 | +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); |
2232 | |
2233 | #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ |
2234 | + |
2235 | +/* |
2236 | + * Local variables: |
2237 | + * mode: C |
2238 | + * c-set-style: "BSD" |
2239 | + * c-basic-offset: 4 |
2240 | + * tab-width: 4 |
2241 | + * indent-tabs-mode: nil |
2242 | + * End: |
2243 | + */ |
2244 | Index: head-2008-04-15/include/xen/interface/features.h |
2245 | =================================================================== |
2246 | --- head-2008-04-15.orig/include/xen/interface/features.h 2008-04-15 09:41:09.000000000 +0200 |
2247 | +++ head-2008-04-15/include/xen/interface/features.h 2008-04-15 09:59:33.000000000 +0200 |
2248 | @@ -1,7 +1,25 @@ |
2249 | /****************************************************************************** |
2250 | * features.h |
2251 | - * |
2252 | + * |
2253 | * Feature flags, reported by XENVER_get_features. |
2254 | + * |
2255 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
2256 | + * of this software and associated documentation files (the "Software"), to |
2257 | + * deal in the Software without restriction, including without limitation the |
2258 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
2259 | + * sell copies of the Software, and to permit persons to whom the Software is |
2260 | + * furnished to do so, subject to the following conditions: |
2261 | + * |
2262 | + * The above copyright notice and this permission notice shall be included in |
2263 | + * all copies or substantial portions of the Software. |
2264 | + * |
2265 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2266 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2267 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
2268 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
2269 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
2270 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
2271 | + * DEALINGS IN THE SOFTWARE. |
2272 | * |
2273 | * Copyright (c) 2006, Keir Fraser <keir@xensource.com> |
2274 | */ |
2275 | @@ -38,6 +56,19 @@ |
2276 | */ |
2277 | #define XENFEAT_pae_pgdir_above_4gb 4 |
2278 | |
2279 | +/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ |
2280 | +#define XENFEAT_mmu_pt_update_preserve_ad 5 |
2281 | + |
2282 | #define XENFEAT_NR_SUBMAPS 1 |
2283 | |
2284 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ |
2285 | + |
2286 | +/* |
2287 | + * Local variables: |
2288 | + * mode: C |
2289 | + * c-set-style: "BSD" |
2290 | + * c-basic-offset: 4 |
2291 | + * tab-width: 4 |
2292 | + * indent-tabs-mode: nil |
2293 | + * End: |
2294 | + */ |
2295 | Index: head-2008-04-15/include/xen/interface/grant_table.h |
2296 | =================================================================== |
2297 | --- head-2008-04-15.orig/include/xen/interface/grant_table.h 2008-04-15 09:41:09.000000000 +0200 |
2298 | +++ head-2008-04-15/include/xen/interface/grant_table.h 2008-04-15 09:59:33.000000000 +0200 |
2299 | @@ -1,9 +1,9 @@ |
2300 | /****************************************************************************** |
2301 | * grant_table.h |
2302 | - * |
2303 | + * |
2304 | * Interface for granting foreign access to page frames, and receiving |
2305 | * page-ownership transfers. |
2306 | - * |
2307 | + * |
2308 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
2309 | * of this software and associated documentation files (the "Software"), to |
2310 | * deal in the Software without restriction, including without limitation the |
2311 | @@ -36,10 +36,10 @@ |
2312 | /* Some rough guidelines on accessing and updating grant-table entries |
2313 | * in a concurrency-safe manner. For more information, Linux contains a |
2314 | * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). |
2315 | - * |
2316 | + * |
2317 | * NB. WMB is a no-op on current-generation x86 processors. However, a |
2318 | * compiler barrier will still be required. |
2319 | - * |
2320 | + * |
2321 | * Introducing a valid entry into the grant table: |
2322 | * 1. Write ent->domid. |
2323 | * 2. Write ent->frame: |
2324 | @@ -48,7 +48,7 @@ |
2325 | * frame, or zero if none. |
2326 | * 3. Write memory barrier (WMB). |
2327 | * 4. Write ent->flags, inc. valid type. |
2328 | - * |
2329 | + * |
2330 | * Invalidating an unused GTF_permit_access entry: |
2331 | * 1. flags = ent->flags. |
2332 | * 2. Observe that !(flags & (GTF_reading|GTF_writing)). |
2333 | @@ -60,7 +60,7 @@ |
2334 | * This cannot be done directly. Request assistance from the domain controller |
2335 | * which can set a timeout on the use of a grant entry and take necessary |
2336 | * action. (NB. This is not yet implemented!). |
2337 | - * |
2338 | + * |
2339 | * Invalidating an unused GTF_accept_transfer entry: |
2340 | * 1. flags = ent->flags. |
2341 | * 2. Observe that !(flags & GTF_transfer_committed). [*] |
2342 | @@ -78,7 +78,7 @@ |
2343 | * |
2344 | * Changing a GTF_permit_access from writable to read-only: |
2345 | * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. |
2346 | - * |
2347 | + * |
2348 | * Changing a GTF_permit_access from read-only to writable: |
2349 | * Use SMP-safe bit-setting instruction. |
2350 | */ |
2351 | @@ -100,6 +100,7 @@ struct grant_entry { |
2352 | */ |
2353 | uint32_t frame; |
2354 | }; |
2355 | +typedef struct grant_entry grant_entry_t; |
2356 | |
2357 | /* |
2358 | * Type of grant entry. |
2359 | @@ -118,6 +119,7 @@ struct grant_entry { |
2360 | * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] |
2361 | * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] |
2362 | * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] |
2363 | + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] |
2364 | */ |
2365 | #define _GTF_readonly (2) |
2366 | #define GTF_readonly (1U<<_GTF_readonly) |
2367 | @@ -125,6 +127,12 @@ struct grant_entry { |
2368 | #define GTF_reading (1U<<_GTF_reading) |
2369 | #define _GTF_writing (4) |
2370 | #define GTF_writing (1U<<_GTF_writing) |
2371 | +#define _GTF_PWT (5) |
2372 | +#define GTF_PWT (1U<<_GTF_PWT) |
2373 | +#define _GTF_PCD (6) |
2374 | +#define GTF_PCD (1U<<_GTF_PCD) |
2375 | +#define _GTF_PAT (7) |
2376 | +#define GTF_PAT (1U<<_GTF_PAT) |
2377 | |
2378 | /* |
2379 | * Subflags for GTF_accept_transfer: |
2380 | @@ -167,7 +175,7 @@ typedef uint32_t grant_handle_t; |
2381 | * 2. If GNTMAP_host_map is specified then a mapping will be added at |
2382 | * either a host virtual address in the current address space, or at |
2383 | * a PTE at the specified machine address. The type of mapping to |
2384 | - * perform is selected through the GNTMAP_contains_pte flag, and the |
2385 | + * perform is selected through the GNTMAP_contains_pte flag, and the |
2386 | * address is specified in <host_addr>. |
2387 | * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a |
2388 | * host mapping is destroyed by other means then it is *NOT* guaranteed |
2389 | @@ -185,6 +193,8 @@ struct gnttab_map_grant_ref { |
2390 | grant_handle_t handle; |
2391 | uint64_t dev_bus_addr; |
2392 | }; |
2393 | +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; |
2394 | +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); |
2395 | |
2396 | /* |
2397 | * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings |
2398 | @@ -206,6 +216,8 @@ struct gnttab_unmap_grant_ref { |
2399 | /* OUT parameters. */ |
2400 | int16_t status; /* GNTST_* */ |
2401 | }; |
2402 | +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; |
2403 | +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); |
2404 | |
2405 | /* |
2406 | * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least |
2407 | @@ -223,8 +235,10 @@ struct gnttab_setup_table { |
2408 | uint32_t nr_frames; |
2409 | /* OUT parameters. */ |
2410 | int16_t status; /* GNTST_* */ |
2411 | - ulong *frame_list; |
2412 | + XEN_GUEST_HANDLE(ulong) frame_list; |
2413 | }; |
2414 | +typedef struct gnttab_setup_table gnttab_setup_table_t; |
2415 | +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); |
2416 | |
2417 | /* |
2418 | * GNTTABOP_dump_table: Dump the contents of the grant table to the |
2419 | @@ -237,24 +251,28 @@ struct gnttab_dump_table { |
2420 | /* OUT parameters. */ |
2421 | int16_t status; /* GNTST_* */ |
2422 | }; |
2423 | +typedef struct gnttab_dump_table gnttab_dump_table_t; |
2424 | +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); |
2425 | |
2426 | /* |
2427 | * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The |
2428 | * foreign domain has previously registered its interest in the transfer via |
2429 | * <domid, ref>. |
2430 | - * |
2431 | + * |
2432 | * Note that, even if the transfer fails, the specified page no longer belongs |
2433 | * to the calling domain *unless* the error is GNTST_bad_page. |
2434 | */ |
2435 | #define GNTTABOP_transfer 4 |
2436 | struct gnttab_transfer { |
2437 | /* IN parameters. */ |
2438 | - unsigned long mfn; |
2439 | + xen_pfn_t mfn; |
2440 | domid_t domid; |
2441 | grant_ref_t ref; |
2442 | /* OUT parameters. */ |
2443 | int16_t status; |
2444 | }; |
2445 | +typedef struct gnttab_transfer gnttab_transfer_t; |
2446 | +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); |
2447 | |
2448 | |
2449 | /* |
2450 | @@ -281,21 +299,22 @@ struct gnttab_transfer { |
2451 | #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) |
2452 | |
2453 | #define GNTTABOP_copy 5 |
2454 | -struct gnttab_copy { |
2455 | - /* IN parameters. */ |
2456 | - struct { |
2457 | - union { |
2458 | - grant_ref_t ref; |
2459 | - unsigned long gmfn; |
2460 | - } u; |
2461 | - domid_t domid; |
2462 | - uint16_t offset; |
2463 | - } source, dest; |
2464 | - uint16_t len; |
2465 | - uint16_t flags; /* GNTCOPY_* */ |
2466 | - /* OUT parameters. */ |
2467 | - int16_t status; |
2468 | -}; |
2469 | +typedef struct gnttab_copy { |
2470 | + /* IN parameters. */ |
2471 | + struct { |
2472 | + union { |
2473 | + grant_ref_t ref; |
2474 | + xen_pfn_t gmfn; |
2475 | + } u; |
2476 | + domid_t domid; |
2477 | + uint16_t offset; |
2478 | + } source, dest; |
2479 | + uint16_t len; |
2480 | + uint16_t flags; /* GNTCOPY_* */ |
2481 | + /* OUT parameters. */ |
2482 | + int16_t status; |
2483 | +} gnttab_copy_t; |
2484 | +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); |
2485 | |
2486 | /* |
2487 | * GNTTABOP_query_size: Query the current and maximum sizes of the shared |
2488 | @@ -313,6 +332,31 @@ struct gnttab_query_size { |
2489 | uint32_t max_nr_frames; |
2490 | int16_t status; /* GNTST_* */ |
2491 | }; |
2492 | +typedef struct gnttab_query_size gnttab_query_size_t; |
2493 | +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); |
2494 | + |
2495 | +/* |
2496 | + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings |
2497 | + * tracked by <handle> but atomically replace the page table entry with one |
2498 | + * pointing to the machine address under <new_addr>. <new_addr> will be |
2499 | + * redirected to the null entry. |
2500 | + * NOTES: |
2501 | + * 1. The call may fail in an undefined manner if either mapping is not |
2502 | + * tracked by <handle>. |
2503 | + * 2. After executing a batch of unmaps, it is guaranteed that no stale |
2504 | + * mappings will remain in the device or host TLBs. |
2505 | + */ |
2506 | +#define GNTTABOP_unmap_and_replace 7 |
2507 | +struct gnttab_unmap_and_replace { |
2508 | + /* IN parameters. */ |
2509 | + uint64_t host_addr; |
2510 | + uint64_t new_addr; |
2511 | + grant_handle_t handle; |
2512 | + /* OUT parameters. */ |
2513 | + int16_t status; /* GNTST_* */ |
2514 | +}; |
2515 | +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; |
2516 | +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); |
2517 | |
2518 | |
2519 | /* |
2520 | @@ -356,7 +400,8 @@ struct gnttab_query_size { |
2521 | #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ |
2522 | #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ |
2523 | #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ |
2524 | -#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */ |
2525 | +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ |
2526 | +#define GNTST_address_too_big (-11) /* transfer page address too large. */ |
2527 | |
2528 | #define GNTTABOP_error_msgs { \ |
2529 | "okay", \ |
2530 | @@ -369,7 +414,18 @@ struct gnttab_query_size { |
2531 | "no spare translation slot in the I/O MMU", \ |
2532 | "permission denied", \ |
2533 | "bad page", \ |
2534 | - "copy arguments cross page boundary" \ |
2535 | + "copy arguments cross page boundary", \ |
2536 | + "page address size too large" \ |
2537 | } |
2538 | |
2539 | #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ |
2540 | + |
2541 | +/* |
2542 | + * Local variables: |
2543 | + * mode: C |
2544 | + * c-set-style: "BSD" |
2545 | + * c-basic-offset: 4 |
2546 | + * tab-width: 4 |
2547 | + * indent-tabs-mode: nil |
2548 | + * End: |
2549 | + */ |
2550 | Index: head-2008-04-15/include/xen/interface/io/blkif.h |
2551 | =================================================================== |
2552 | --- head-2008-04-15.orig/include/xen/interface/io/blkif.h 2008-04-15 09:41:09.000000000 +0200 |
2553 | +++ head-2008-04-15/include/xen/interface/io/blkif.h 2008-04-15 09:59:33.000000000 +0200 |
2554 | @@ -1,7 +1,25 @@ |
2555 | /****************************************************************************** |
2556 | * blkif.h |
2557 | - * |
2558 | + * |
2559 | * Unified block-device I/O interface for Xen guest OSes. |
2560 | + * |
2561 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
2562 | + * of this software and associated documentation files (the "Software"), to |
2563 | + * deal in the Software without restriction, including without limitation the |
2564 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
2565 | + * sell copies of the Software, and to permit persons to whom the Software is |
2566 | + * furnished to do so, subject to the following conditions: |
2567 | + * |
2568 | + * The above copyright notice and this permission notice shall be included in |
2569 | + * all copies or substantial portions of the Software. |
2570 | + * |
2571 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2572 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2573 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
2574 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
2575 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
2576 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
2577 | + * DEALINGS IN THE SOFTWARE. |
2578 | * |
2579 | * Copyright (c) 2003-2004, Keir Fraser |
2580 | */ |
2581 | @@ -17,15 +35,17 @@ |
2582 | * notification can be made conditional on req_event (i.e., the generic |
2583 | * hold-off mechanism provided by the ring macros). Backends must set |
2584 | * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). |
2585 | - * |
2586 | + * |
2587 | * Back->front notifications: When enqueuing a new response, sending a |
2588 | * notification can be made conditional on rsp_event (i.e., the generic |
2589 | * hold-off mechanism provided by the ring macros). Frontends must set |
2590 | * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). |
2591 | */ |
2592 | |
2593 | -typedef uint16_t blkif_vdev_t; |
2594 | -typedef uint64_t blkif_sector_t; |
2595 | +#ifndef blkif_vdev_t |
2596 | +#define blkif_vdev_t uint16_t |
2597 | +#endif |
2598 | +#define blkif_sector_t uint64_t |
2599 | |
2600 | /* |
2601 | * REQUEST CODES. |
2602 | @@ -34,7 +54,7 @@ typedef uint64_t blkif_sector_t; |
2603 | #define BLKIF_OP_WRITE 1 |
2604 | /* |
2605 | * Recognised only if "feature-barrier" is present in backend xenbus info. |
2606 | - * The "feature_barrier" node contains a boolean indicating whether barrier |
2607 | + * The "feature-barrier" node contains a boolean indicating whether barrier |
2608 | * requests are likely to succeed or fail. Either way, a barrier request |
2609 | * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by |
2610 | * the underlying block-device hardware. The boolean simply indicates whether |
2611 | @@ -43,33 +63,50 @@ typedef uint64_t blkif_sector_t; |
2612 | * create the "feature-barrier" node! |
2613 | */ |
2614 | #define BLKIF_OP_WRITE_BARRIER 2 |
2615 | +/* |
2616 | + * Recognised if "feature-flush-cache" is present in backend xenbus |
2617 | + * info. A flush will ask the underlying storage hardware to flush its |
2618 | + * non-volatile caches as appropriate. The "feature-flush-cache" node |
2619 | + * contains a boolean indicating whether flush requests are likely to |
2620 | + * succeed or fail. Either way, a flush request may fail at any time |
2621 | + * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying |
2622 | + * block-device hardware. The boolean simply indicates whether or not it |
2623 | + * is worthwhile for the frontend to attempt flushes. If a backend does |
2624 | + * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the |
2625 | + * "feature-flush-cache" node! |
2626 | + */ |
2627 | +#define BLKIF_OP_FLUSH_DISKCACHE 3 |
2628 | |
2629 | /* |
2630 | * Maximum scatter/gather segments per request. |
2631 | - * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. |
2632 | + * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. |
2633 | * NB. This could be 12 if the ring indexes weren't stored in the same page. |
2634 | */ |
2635 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 |
2636 | |
2637 | +struct blkif_request_segment { |
2638 | + grant_ref_t gref; /* reference to I/O buffer frame */ |
2639 | + /* @first_sect: first sector in frame to transfer (inclusive). */ |
2640 | + /* @last_sect: last sector in frame to transfer (inclusive). */ |
2641 | + uint8_t first_sect, last_sect; |
2642 | +}; |
2643 | + |
2644 | struct blkif_request { |
2645 | - uint8_t operation; /* BLKIF_OP_??? */ |
2646 | - uint8_t nr_segments; /* number of segments */ |
2647 | - blkif_vdev_t handle; /* only for read/write requests */ |
2648 | - uint64_t id; /* private guest value, echoed in resp */ |
2649 | - blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
2650 | - struct blkif_request_segment { |
2651 | - grant_ref_t gref; /* reference to I/O buffer frame */ |
2652 | - /* @first_sect: first sector in frame to transfer (inclusive). */ |
2653 | - /* @last_sect: last sector in frame to transfer (inclusive). */ |
2654 | - uint8_t first_sect, last_sect; |
2655 | - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
2656 | + uint8_t operation; /* BLKIF_OP_??? */ |
2657 | + uint8_t nr_segments; /* number of segments */ |
2658 | + blkif_vdev_t handle; /* only for read/write requests */ |
2659 | + uint64_t id; /* private guest value, echoed in resp */ |
2660 | + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
2661 | + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
2662 | }; |
2663 | +typedef struct blkif_request blkif_request_t; |
2664 | |
2665 | struct blkif_response { |
2666 | - uint64_t id; /* copied from request */ |
2667 | - uint8_t operation; /* copied from request */ |
2668 | - int16_t status; /* BLKIF_RSP_??? */ |
2669 | + uint64_t id; /* copied from request */ |
2670 | + uint8_t operation; /* copied from request */ |
2671 | + int16_t status; /* BLKIF_RSP_??? */ |
2672 | }; |
2673 | +typedef struct blkif_response blkif_response_t; |
2674 | |
2675 | /* |
2676 | * STATUS RETURN CODES. |
2677 | @@ -92,3 +129,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_re |
2678 | #define VDISK_READONLY 0x4 |
2679 | |
2680 | #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ |
2681 | + |
2682 | +/* |
2683 | + * Local variables: |
2684 | + * mode: C |
2685 | + * c-set-style: "BSD" |
2686 | + * c-basic-offset: 4 |
2687 | + * tab-width: 4 |
2688 | + * indent-tabs-mode: nil |
2689 | + * End: |
2690 | + */ |
2691 | Index: head-2008-04-15/include/xen/interface/io/console.h |
2692 | =================================================================== |
2693 | --- head-2008-04-15.orig/include/xen/interface/io/console.h 2008-04-15 09:41:09.000000000 +0200 |
2694 | +++ head-2008-04-15/include/xen/interface/io/console.h 2008-04-15 09:59:33.000000000 +0200 |
2695 | @@ -1,7 +1,25 @@ |
2696 | /****************************************************************************** |
2697 | * console.h |
2698 | - * |
2699 | + * |
2700 | * Console I/O interface for Xen guest OSes. |
2701 | + * |
2702 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
2703 | + * of this software and associated documentation files (the "Software"), to |
2704 | + * deal in the Software without restriction, including without limitation the |
2705 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
2706 | + * sell copies of the Software, and to permit persons to whom the Software is |
2707 | + * furnished to do so, subject to the following conditions: |
2708 | + * |
2709 | + * The above copyright notice and this permission notice shall be included in |
2710 | + * all copies or substantial portions of the Software. |
2711 | + * |
2712 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2713 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2714 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
2715 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
2716 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
2717 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
2718 | + * DEALINGS IN THE SOFTWARE. |
2719 | * |
2720 | * Copyright (c) 2005, Keir Fraser |
2721 | */ |
2722 | @@ -21,3 +39,13 @@ struct xencons_interface { |
2723 | }; |
2724 | |
2725 | #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ |
2726 | + |
2727 | +/* |
2728 | + * Local variables: |
2729 | + * mode: C |
2730 | + * c-set-style: "BSD" |
2731 | + * c-basic-offset: 4 |
2732 | + * tab-width: 4 |
2733 | + * indent-tabs-mode: nil |
2734 | + * End: |
2735 | + */ |
2736 | Index: head-2008-04-15/include/xen/interface/io/netif.h |
2737 | =================================================================== |
2738 | --- head-2008-04-15.orig/include/xen/interface/io/netif.h 2008-04-15 09:41:09.000000000 +0200 |
2739 | +++ head-2008-04-15/include/xen/interface/io/netif.h 2008-04-15 09:59:33.000000000 +0200 |
2740 | @@ -1,7 +1,25 @@ |
2741 | /****************************************************************************** |
2742 | * netif.h |
2743 | - * |
2744 | + * |
2745 | * Unified network-device I/O interface for Xen guest OSes. |
2746 | + * |
2747 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
2748 | + * of this software and associated documentation files (the "Software"), to |
2749 | + * deal in the Software without restriction, including without limitation the |
2750 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
2751 | + * sell copies of the Software, and to permit persons to whom the Software is |
2752 | + * furnished to do so, subject to the following conditions: |
2753 | + * |
2754 | + * The above copyright notice and this permission notice shall be included in |
2755 | + * all copies or substantial portions of the Software. |
2756 | + * |
2757 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2758 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2759 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
2760 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
2761 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
2762 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
2763 | + * DEALINGS IN THE SOFTWARE. |
2764 | * |
2765 | * Copyright (c) 2003-2004, Keir Fraser |
2766 | */ |
2767 | @@ -47,18 +65,21 @@ |
2768 | #define _NETTXF_extra_info (3) |
2769 | #define NETTXF_extra_info (1U<<_NETTXF_extra_info) |
2770 | |
2771 | -struct xen_netif_tx_request { |
2772 | +struct netif_tx_request { |
2773 | grant_ref_t gref; /* Reference to buffer page */ |
2774 | uint16_t offset; /* Offset within buffer page */ |
2775 | uint16_t flags; /* NETTXF_* */ |
2776 | uint16_t id; /* Echoed in response message. */ |
2777 | uint16_t size; /* Packet size in bytes. */ |
2778 | }; |
2779 | +typedef struct netif_tx_request netif_tx_request_t; |
2780 | |
2781 | /* Types of netif_extra_info descriptors. */ |
2782 | -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ |
2783 | -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ |
2784 | -#define XEN_NETIF_EXTRA_TYPE_MAX (2) |
2785 | +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ |
2786 | +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ |
2787 | +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ |
2788 | +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ |
2789 | +#define XEN_NETIF_EXTRA_TYPE_MAX (4) |
2790 | |
2791 | /* netif_extra_info flags. */ |
2792 | #define _XEN_NETIF_EXTRA_FLAG_MORE (0) |
2793 | @@ -71,49 +92,68 @@ struct xen_netif_tx_request { |
2794 | * This structure needs to fit within both netif_tx_request and |
2795 | * netif_rx_response for compatibility. |
2796 | */ |
2797 | -struct xen_netif_extra_info { |
2798 | - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ |
2799 | - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ |
2800 | - |
2801 | - union { |
2802 | - struct { |
2803 | - /* |
2804 | - * Maximum payload size of each segment. For |
2805 | - * example, for TCP this is just the path MSS. |
2806 | - */ |
2807 | - uint16_t size; |
2808 | - |
2809 | - /* |
2810 | - * GSO type. This determines the protocol of |
2811 | - * the packet and any extra features required |
2812 | - * to segment the packet properly. |
2813 | - */ |
2814 | - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ |
2815 | - |
2816 | - /* Future expansion. */ |
2817 | - uint8_t pad; |
2818 | - |
2819 | - /* |
2820 | - * GSO features. This specifies any extra GSO |
2821 | - * features required to process this packet, |
2822 | - * such as ECN support for TCPv4. |
2823 | - */ |
2824 | - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ |
2825 | - } gso; |
2826 | +struct netif_extra_info { |
2827 | + uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ |
2828 | + uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ |
2829 | + |
2830 | + union { |
2831 | + /* |
2832 | + * XEN_NETIF_EXTRA_TYPE_GSO: |
2833 | + */ |
2834 | + struct { |
2835 | + /* |
2836 | + * Maximum payload size of each segment. For example, for TCP this |
2837 | + * is just the path MSS. |
2838 | + */ |
2839 | + uint16_t size; |
2840 | + |
2841 | + /* |
2842 | + * GSO type. This determines the protocol of the packet and any |
2843 | + * extra features required to segment the packet properly. |
2844 | + */ |
2845 | + uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ |
2846 | + |
2847 | + /* Future expansion. */ |
2848 | + uint8_t pad; |
2849 | + |
2850 | + /* |
2851 | + * GSO features. This specifies any extra GSO features required |
2852 | + * to process this packet, such as ECN support for TCPv4. |
2853 | + */ |
2854 | + uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ |
2855 | + } gso; |
2856 | + |
2857 | + /* |
2858 | + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: |
2859 | + * Backend advertises availability via 'feature-multicast-control' |
2860 | + * xenbus node containing value '1'. |
2861 | + * Frontend requests this feature by advertising |
2862 | + * 'request-multicast-control' xenbus node containing value '1'. |
2863 | + * If multicast control is requested then multicast flooding is |
2864 | + * disabled and the frontend must explicitly register its interest |
2865 | + * in multicast groups using dummy transmit requests containing |
2866 | + * MCAST_{ADD,DEL} extra-info fragments. |
2867 | + */ |
2868 | + struct { |
2869 | + uint8_t addr[6]; /* Address to add/remove. */ |
2870 | + } mcast; |
2871 | |
2872 | - uint16_t pad[3]; |
2873 | - } u; |
2874 | + uint16_t pad[3]; |
2875 | + } u; |
2876 | }; |
2877 | +typedef struct netif_extra_info netif_extra_info_t; |
2878 | |
2879 | -struct xen_netif_tx_response { |
2880 | - uint16_t id; |
2881 | - int16_t status; /* NETIF_RSP_* */ |
2882 | +struct netif_tx_response { |
2883 | + uint16_t id; |
2884 | + int16_t status; /* NETIF_RSP_* */ |
2885 | }; |
2886 | +typedef struct netif_tx_response netif_tx_response_t; |
2887 | |
2888 | -struct xen_netif_rx_request { |
2889 | - uint16_t id; /* Echoed in response message. */ |
2890 | - grant_ref_t gref; /* Reference to incoming granted frame */ |
2891 | +struct netif_rx_request { |
2892 | + uint16_t id; /* Echoed in response message. */ |
2893 | + grant_ref_t gref; /* Reference to incoming granted frame */ |
2894 | }; |
2895 | +typedef struct netif_rx_request netif_rx_request_t; |
2896 | |
2897 | /* Packet data has been validated against protocol checksum. */ |
2898 | #define _NETRXF_data_validated (0) |
2899 | @@ -131,23 +171,20 @@ struct xen_netif_rx_request { |
2900 | #define _NETRXF_extra_info (3) |
2901 | #define NETRXF_extra_info (1U<<_NETRXF_extra_info) |
2902 | |
2903 | -struct xen_netif_rx_response { |
2904 | +struct netif_rx_response { |
2905 | uint16_t id; |
2906 | uint16_t offset; /* Offset in page of start of received packet */ |
2907 | uint16_t flags; /* NETRXF_* */ |
2908 | int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ |
2909 | }; |
2910 | +typedef struct netif_rx_response netif_rx_response_t; |
2911 | |
2912 | /* |
2913 | * Generate netif ring structures and types. |
2914 | */ |
2915 | |
2916 | -DEFINE_RING_TYPES(xen_netif_tx, |
2917 | - struct xen_netif_tx_request, |
2918 | - struct xen_netif_tx_response); |
2919 | -DEFINE_RING_TYPES(xen_netif_rx, |
2920 | - struct xen_netif_rx_request, |
2921 | - struct xen_netif_rx_response); |
2922 | +DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); |
2923 | +DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); |
2924 | |
2925 | #define NETIF_RSP_DROPPED -2 |
2926 | #define NETIF_RSP_ERROR -1 |
2927 | @@ -156,3 +193,13 @@ DEFINE_RING_TYPES(xen_netif_rx, |
2928 | #define NETIF_RSP_NULL 1 |
2929 | |
2930 | #endif |
2931 | + |
2932 | +/* |
2933 | + * Local variables: |
2934 | + * mode: C |
2935 | + * c-set-style: "BSD" |
2936 | + * c-basic-offset: 4 |
2937 | + * tab-width: 4 |
2938 | + * indent-tabs-mode: nil |
2939 | + * End: |
2940 | + */ |
2941 | Index: head-2008-04-15/include/xen/interface/io/ring.h |
2942 | =================================================================== |
2943 | --- head-2008-04-15.orig/include/xen/interface/io/ring.h 2008-04-15 09:41:09.000000000 +0200 |
2944 | +++ head-2008-04-15/include/xen/interface/io/ring.h 2008-04-15 09:59:33.000000000 +0200 |
2945 | @@ -1,18 +1,44 @@ |
2946 | /****************************************************************************** |
2947 | * ring.h |
2948 | - * |
2949 | + * |
2950 | * Shared producer-consumer ring macros. |
2951 | * |
2952 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
2953 | + * of this software and associated documentation files (the "Software"), to |
2954 | + * deal in the Software without restriction, including without limitation the |
2955 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
2956 | + * sell copies of the Software, and to permit persons to whom the Software is |
2957 | + * furnished to do so, subject to the following conditions: |
2958 | + * |
2959 | + * The above copyright notice and this permission notice shall be included in |
2960 | + * all copies or substantial portions of the Software. |
2961 | + * |
2962 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
2963 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
2964 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
2965 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
2966 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
2967 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
2968 | + * DEALINGS IN THE SOFTWARE. |
2969 | + * |
2970 | * Tim Deegan and Andrew Warfield November 2004. |
2971 | */ |
2972 | |
2973 | #ifndef __XEN_PUBLIC_IO_RING_H__ |
2974 | #define __XEN_PUBLIC_IO_RING_H__ |
2975 | |
2976 | +#include "../xen-compat.h" |
2977 | + |
2978 | +#if __XEN_INTERFACE_VERSION__ < 0x00030208 |
2979 | +#define xen_mb() mb() |
2980 | +#define xen_rmb() rmb() |
2981 | +#define xen_wmb() wmb() |
2982 | +#endif |
2983 | + |
2984 | typedef unsigned int RING_IDX; |
2985 | |
2986 | /* Round a 32-bit unsigned constant down to the nearest power of two. */ |
2987 | -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) |
2988 | +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) |
2989 | #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) |
2990 | #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) |
2991 | #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) |
2992 | @@ -21,198 +47,209 @@ typedef unsigned int RING_IDX; |
2993 | /* |
2994 | * Calculate size of a shared ring, given the total available space for the |
2995 | * ring and indexes (_sz), and the name tag of the request/response structure. |
2996 | - * A ring contains as many entries as will fit, rounded down to the nearest |
2997 | + * A ring contains as many entries as will fit, rounded down to the nearest |
2998 | * power of two (so we can mask with (size-1) to loop around). |
2999 | */ |
3000 | #define __RING_SIZE(_s, _sz) \ |
3001 | - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) |
3002 | + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) |
3003 | |
3004 | /* |
3005 | * Macros to make the correct C datatypes for a new kind of ring. |
3006 | - * |
3007 | + * |
3008 | * To make a new ring datatype, you need to have two message structures, |
3009 | - * let's say struct request, and struct response already defined. |
3010 | + * let's say request_t, and response_t already defined. |
3011 | * |
3012 | * In a header where you want the ring datatype declared, you then do: |
3013 | * |
3014 | - * DEFINE_RING_TYPES(mytag, struct request, struct response); |
3015 | + * DEFINE_RING_TYPES(mytag, request_t, response_t); |
3016 | * |
3017 | * These expand out to give you a set of types, as you can see below. |
3018 | * The most important of these are: |
3019 | - * |
3020 | - * struct mytag_sring - The shared ring. |
3021 | - * struct mytag_front_ring - The 'front' half of the ring. |
3022 | - * struct mytag_back_ring - The 'back' half of the ring. |
3023 | + * |
3024 | + * mytag_sring_t - The shared ring. |
3025 | + * mytag_front_ring_t - The 'front' half of the ring. |
3026 | + * mytag_back_ring_t - The 'back' half of the ring. |
3027 | * |
3028 | * To initialize a ring in your code you need to know the location and size |
3029 | * of the shared memory area (PAGE_SIZE, for instance). To initialise |
3030 | * the front half: |
3031 | * |
3032 | - * struct mytag_front_ring front_ring; |
3033 | - * SHARED_RING_INIT((struct mytag_sring *)shared_page); |
3034 | - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, |
3035 | - * PAGE_SIZE); |
3036 | + * mytag_front_ring_t front_ring; |
3037 | + * SHARED_RING_INIT((mytag_sring_t *)shared_page); |
3038 | + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); |
3039 | * |
3040 | * Initializing the back follows similarly (note that only the front |
3041 | * initializes the shared ring): |
3042 | * |
3043 | - * struct mytag_back_ring back_ring; |
3044 | - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, |
3045 | - * PAGE_SIZE); |
3046 | + * mytag_back_ring_t back_ring; |
3047 | + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); |
3048 | */ |
3049 | |
3050 | -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ |
3051 | - \ |
3052 | -/* Shared ring entry */ \ |
3053 | -union __name##_sring_entry { \ |
3054 | - __req_t req; \ |
3055 | - __rsp_t rsp; \ |
3056 | -}; \ |
3057 | - \ |
3058 | -/* Shared ring page */ \ |
3059 | -struct __name##_sring { \ |
3060 | - RING_IDX req_prod, req_event; \ |
3061 | - RING_IDX rsp_prod, rsp_event; \ |
3062 | - uint8_t pad[48]; \ |
3063 | - union __name##_sring_entry ring[1]; /* variable-length */ \ |
3064 | -}; \ |
3065 | - \ |
3066 | -/* "Front" end's private variables */ \ |
3067 | -struct __name##_front_ring { \ |
3068 | - RING_IDX req_prod_pvt; \ |
3069 | - RING_IDX rsp_cons; \ |
3070 | - unsigned int nr_ents; \ |
3071 | - struct __name##_sring *sring; \ |
3072 | -}; \ |
3073 | - \ |
3074 | -/* "Back" end's private variables */ \ |
3075 | -struct __name##_back_ring { \ |
3076 | - RING_IDX rsp_prod_pvt; \ |
3077 | - RING_IDX req_cons; \ |
3078 | - unsigned int nr_ents; \ |
3079 | - struct __name##_sring *sring; \ |
3080 | -}; |
3081 | +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ |
3082 | + \ |
3083 | +/* Shared ring entry */ \ |
3084 | +union __name##_sring_entry { \ |
3085 | + __req_t req; \ |
3086 | + __rsp_t rsp; \ |
3087 | +}; \ |
3088 | + \ |
3089 | +/* Shared ring page */ \ |
3090 | +struct __name##_sring { \ |
3091 | + RING_IDX req_prod, req_event; \ |
3092 | + RING_IDX rsp_prod, rsp_event; \ |
3093 | + uint8_t pad[48]; \ |
3094 | + union __name##_sring_entry ring[1]; /* variable-length */ \ |
3095 | +}; \ |
3096 | + \ |
3097 | +/* "Front" end's private variables */ \ |
3098 | +struct __name##_front_ring { \ |
3099 | + RING_IDX req_prod_pvt; \ |
3100 | + RING_IDX rsp_cons; \ |
3101 | + unsigned int nr_ents; \ |
3102 | + struct __name##_sring *sring; \ |
3103 | +}; \ |
3104 | + \ |
3105 | +/* "Back" end's private variables */ \ |
3106 | +struct __name##_back_ring { \ |
3107 | + RING_IDX rsp_prod_pvt; \ |
3108 | + RING_IDX req_cons; \ |
3109 | + unsigned int nr_ents; \ |
3110 | + struct __name##_sring *sring; \ |
3111 | +}; \ |
3112 | + \ |
3113 | +/* Syntactic sugar */ \ |
3114 | +typedef struct __name##_sring __name##_sring_t; \ |
3115 | +typedef struct __name##_front_ring __name##_front_ring_t; \ |
3116 | +typedef struct __name##_back_ring __name##_back_ring_t |
3117 | |
3118 | /* |
3119 | * Macros for manipulating rings. |
3120 | - * |
3121 | - * FRONT_RING_whatever works on the "front end" of a ring: here |
3122 | + * |
3123 | + * FRONT_RING_whatever works on the "front end" of a ring: here |
3124 | * requests are pushed on to the ring and responses taken off it. |
3125 | - * |
3126 | - * BACK_RING_whatever works on the "back end" of a ring: here |
3127 | + * |
3128 | + * BACK_RING_whatever works on the "back end" of a ring: here |
3129 | * requests are taken off the ring and responses put on. |
3130 | - * |
3131 | - * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. |
3132 | - * This is OK in 1-for-1 request-response situations where the |
3133 | + * |
3134 | + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. |
3135 | + * This is OK in 1-for-1 request-response situations where the |
3136 | * requestor (front end) never has more than RING_SIZE()-1 |
3137 | * outstanding requests. |
3138 | */ |
3139 | |
3140 | /* Initialising empty rings */ |
3141 | -#define SHARED_RING_INIT(_s) do { \ |
3142 | - (_s)->req_prod = (_s)->rsp_prod = 0; \ |
3143 | - (_s)->req_event = (_s)->rsp_event = 1; \ |
3144 | - memset((_s)->pad, 0, sizeof((_s)->pad)); \ |
3145 | +#define SHARED_RING_INIT(_s) do { \ |
3146 | + (_s)->req_prod = (_s)->rsp_prod = 0; \ |
3147 | + (_s)->req_event = (_s)->rsp_event = 1; \ |
3148 | + (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ |
3149 | } while(0) |
3150 | |
3151 | -#define FRONT_RING_INIT(_r, _s, __size) do { \ |
3152 | - (_r)->req_prod_pvt = 0; \ |
3153 | - (_r)->rsp_cons = 0; \ |
3154 | - (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3155 | - (_r)->sring = (_s); \ |
3156 | +#define FRONT_RING_INIT(_r, _s, __size) do { \ |
3157 | + (_r)->req_prod_pvt = 0; \ |
3158 | + (_r)->rsp_cons = 0; \ |
3159 | + (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3160 | + (_r)->sring = (_s); \ |
3161 | } while (0) |
3162 | |
3163 | -#define BACK_RING_INIT(_r, _s, __size) do { \ |
3164 | - (_r)->rsp_prod_pvt = 0; \ |
3165 | - (_r)->req_cons = 0; \ |
3166 | - (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3167 | - (_r)->sring = (_s); \ |
3168 | +#define BACK_RING_INIT(_r, _s, __size) do { \ |
3169 | + (_r)->rsp_prod_pvt = 0; \ |
3170 | + (_r)->req_cons = 0; \ |
3171 | + (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3172 | + (_r)->sring = (_s); \ |
3173 | } while (0) |
3174 | |
3175 | /* Initialize to existing shared indexes -- for recovery */ |
3176 | -#define FRONT_RING_ATTACH(_r, _s, __size) do { \ |
3177 | - (_r)->sring = (_s); \ |
3178 | - (_r)->req_prod_pvt = (_s)->req_prod; \ |
3179 | - (_r)->rsp_cons = (_s)->rsp_prod; \ |
3180 | - (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3181 | +#define FRONT_RING_ATTACH(_r, _s, __size) do { \ |
3182 | + (_r)->sring = (_s); \ |
3183 | + (_r)->req_prod_pvt = (_s)->req_prod; \ |
3184 | + (_r)->rsp_cons = (_s)->rsp_prod; \ |
3185 | + (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3186 | } while (0) |
3187 | |
3188 | -#define BACK_RING_ATTACH(_r, _s, __size) do { \ |
3189 | - (_r)->sring = (_s); \ |
3190 | - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ |
3191 | - (_r)->req_cons = (_s)->req_prod; \ |
3192 | - (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3193 | +#define BACK_RING_ATTACH(_r, _s, __size) do { \ |
3194 | + (_r)->sring = (_s); \ |
3195 | + (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ |
3196 | + (_r)->req_cons = (_s)->req_prod; \ |
3197 | + (_r)->nr_ents = __RING_SIZE(_s, __size); \ |
3198 | } while (0) |
3199 | |
3200 | /* How big is this ring? */ |
3201 | -#define RING_SIZE(_r) \ |
3202 | +#define RING_SIZE(_r) \ |
3203 | ((_r)->nr_ents) |
3204 | |
3205 | /* Number of free requests (for use on front side only). */ |
3206 | -#define RING_FREE_REQUESTS(_r) \ |
3207 | +#define RING_FREE_REQUESTS(_r) \ |
3208 | (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) |
3209 | |
3210 | /* Test if there is an empty slot available on the front ring. |
3211 | * (This is only meaningful from the front. ) |
3212 | */ |
3213 | -#define RING_FULL(_r) \ |
3214 | +#define RING_FULL(_r) \ |
3215 | (RING_FREE_REQUESTS(_r) == 0) |
3216 | |
3217 | /* Test if there are outstanding messages to be processed on a ring. */ |
3218 | -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ |
3219 | +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ |
3220 | ((_r)->sring->rsp_prod - (_r)->rsp_cons) |
3221 | |
3222 | -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ |
3223 | - ({ \ |
3224 | - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ |
3225 | - unsigned int rsp = RING_SIZE(_r) - \ |
3226 | - ((_r)->req_cons - (_r)->rsp_prod_pvt); \ |
3227 | - req < rsp ? req : rsp; \ |
3228 | - }) |
3229 | +#ifdef __GNUC__ |
3230 | +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ |
3231 | + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ |
3232 | + unsigned int rsp = RING_SIZE(_r) - \ |
3233 | + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ |
3234 | + req < rsp ? req : rsp; \ |
3235 | +}) |
3236 | +#else |
3237 | +/* Same as above, but without the nice GCC ({ ... }) syntax. */ |
3238 | +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ |
3239 | + ((((_r)->sring->req_prod - (_r)->req_cons) < \ |
3240 | + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ |
3241 | + ((_r)->sring->req_prod - (_r)->req_cons) : \ |
3242 | + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) |
3243 | +#endif |
3244 | |
3245 | /* Direct access to individual ring elements, by index. */ |
3246 | -#define RING_GET_REQUEST(_r, _idx) \ |
3247 | +#define RING_GET_REQUEST(_r, _idx) \ |
3248 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) |
3249 | |
3250 | -#define RING_GET_RESPONSE(_r, _idx) \ |
3251 | +#define RING_GET_RESPONSE(_r, _idx) \ |
3252 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) |
3253 | |
3254 | /* Loop termination condition: Would the specified index overflow the ring? */ |
3255 | -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ |
3256 | +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ |
3257 | (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) |
3258 | |
3259 | -#define RING_PUSH_REQUESTS(_r) do { \ |
3260 | - wmb(); /* back sees requests /before/ updated producer index */ \ |
3261 | - (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |
3262 | +#define RING_PUSH_REQUESTS(_r) do { \ |
3263 | + xen_wmb(); /* back sees requests /before/ updated producer index */ \ |
3264 | + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |
3265 | } while (0) |
3266 | |
3267 | -#define RING_PUSH_RESPONSES(_r) do { \ |
3268 | - wmb(); /* front sees responses /before/ updated producer index */ \ |
3269 | - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ |
3270 | +#define RING_PUSH_RESPONSES(_r) do { \ |
3271 | + xen_wmb(); /* front sees resps /before/ updated producer index */ \ |
3272 | + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ |
3273 | } while (0) |
3274 | |
3275 | /* |
3276 | * Notification hold-off (req_event and rsp_event): |
3277 | - * |
3278 | + * |
3279 | * When queueing requests or responses on a shared ring, it may not always be |
3280 | * necessary to notify the remote end. For example, if requests are in flight |
3281 | * in a backend, the front may be able to queue further requests without |
3282 | * notifying the back (if the back checks for new requests when it queues |
3283 | * responses). |
3284 | - * |
3285 | + * |
3286 | * When enqueuing requests or responses: |
3287 | - * |
3288 | + * |
3289 | * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument |
3290 | * is a boolean return value. True indicates that the receiver requires an |
3291 | * asynchronous notification. |
3292 | - * |
3293 | + * |
3294 | * After dequeuing requests or responses (before sleeping the connection): |
3295 | - * |
3296 | + * |
3297 | * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). |
3298 | * The second argument is a boolean return value. True indicates that there |
3299 | * are pending messages on the ring (i.e., the connection should not be put |
3300 | * to sleep). |
3301 | - * |
3302 | + * |
3303 | * These macros will set the req_event/rsp_event field to trigger a |
3304 | * notification on the very next message that is enqueued. If you want to |
3305 | * create batches of work (i.e., only receive a notification after several |
3306 | @@ -221,40 +258,50 @@ struct __name##_back_ring { \ |
3307 | * field appropriately. |
3308 | */ |
3309 | |
3310 | -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ |
3311 | - RING_IDX __old = (_r)->sring->req_prod; \ |
3312 | - RING_IDX __new = (_r)->req_prod_pvt; \ |
3313 | - wmb(); /* back sees requests /before/ updated producer index */ \ |
3314 | - (_r)->sring->req_prod = __new; \ |
3315 | - mb(); /* back sees new requests /before/ we check req_event */ \ |
3316 | - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ |
3317 | - (RING_IDX)(__new - __old)); \ |
3318 | +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ |
3319 | + RING_IDX __old = (_r)->sring->req_prod; \ |
3320 | + RING_IDX __new = (_r)->req_prod_pvt; \ |
3321 | + xen_wmb(); /* back sees requests /before/ updated producer index */ \ |
3322 | + (_r)->sring->req_prod = __new; \ |
3323 | + xen_mb(); /* back sees new requests /before/ we check req_event */ \ |
3324 | + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ |
3325 | + (RING_IDX)(__new - __old)); \ |
3326 | } while (0) |
3327 | |
3328 | -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ |
3329 | - RING_IDX __old = (_r)->sring->rsp_prod; \ |
3330 | - RING_IDX __new = (_r)->rsp_prod_pvt; \ |
3331 | - wmb(); /* front sees responses /before/ updated producer index */ \ |
3332 | - (_r)->sring->rsp_prod = __new; \ |
3333 | - mb(); /* front sees new responses /before/ we check rsp_event */ \ |
3334 | - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ |
3335 | - (RING_IDX)(__new - __old)); \ |
3336 | +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ |
3337 | + RING_IDX __old = (_r)->sring->rsp_prod; \ |
3338 | + RING_IDX __new = (_r)->rsp_prod_pvt; \ |
3339 | + xen_wmb(); /* front sees resps /before/ updated producer index */ \ |
3340 | + (_r)->sring->rsp_prod = __new; \ |
3341 | + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ |
3342 | + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ |
3343 | + (RING_IDX)(__new - __old)); \ |
3344 | } while (0) |
3345 | |
3346 | -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ |
3347 | - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
3348 | - if (_work_to_do) break; \ |
3349 | - (_r)->sring->req_event = (_r)->req_cons + 1; \ |
3350 | - mb(); \ |
3351 | - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
3352 | +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ |
3353 | + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
3354 | + if (_work_to_do) break; \ |
3355 | + (_r)->sring->req_event = (_r)->req_cons + 1; \ |
3356 | + xen_mb(); \ |
3357 | + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ |
3358 | } while (0) |
3359 | |
3360 | -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ |
3361 | - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
3362 | - if (_work_to_do) break; \ |
3363 | - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ |
3364 | - mb(); \ |
3365 | - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
3366 | +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ |
3367 | + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
3368 | + if (_work_to_do) break; \ |
3369 | + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ |
3370 | + xen_mb(); \ |
3371 | + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ |
3372 | } while (0) |
3373 | |
3374 | #endif /* __XEN_PUBLIC_IO_RING_H__ */ |
3375 | + |
3376 | +/* |
3377 | + * Local variables: |
3378 | + * mode: C |
3379 | + * c-set-style: "BSD" |
3380 | + * c-basic-offset: 4 |
3381 | + * tab-width: 4 |
3382 | + * indent-tabs-mode: nil |
3383 | + * End: |
3384 | + */ |
3385 | Index: head-2008-04-15/include/xen/interface/io/xenbus.h |
3386 | =================================================================== |
3387 | --- head-2008-04-15.orig/include/xen/interface/io/xenbus.h 2008-04-15 09:41:09.000000000 +0200 |
3388 | +++ head-2008-04-15/include/xen/interface/io/xenbus.h 2008-04-15 09:59:33.000000000 +0200 |
3389 | @@ -3,42 +3,78 @@ |
3390 | * |
3391 | * Xenbus protocol details. |
3392 | * |
3393 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
3394 | + * of this software and associated documentation files (the "Software"), to |
3395 | + * deal in the Software without restriction, including without limitation the |
3396 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
3397 | + * sell copies of the Software, and to permit persons to whom the Software is |
3398 | + * furnished to do so, subject to the following conditions: |
3399 | + * |
3400 | + * The above copyright notice and this permission notice shall be included in |
3401 | + * all copies or substantial portions of the Software. |
3402 | + * |
3403 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
3404 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
3405 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
3406 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
3407 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
3408 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
3409 | + * DEALINGS IN THE SOFTWARE. |
3410 | + * |
3411 | * Copyright (C) 2005 XenSource Ltd. |
3412 | */ |
3413 | |
3414 | #ifndef _XEN_PUBLIC_IO_XENBUS_H |
3415 | #define _XEN_PUBLIC_IO_XENBUS_H |
3416 | |
3417 | -/* The state of either end of the Xenbus, i.e. the current communication |
3418 | - status of initialisation across the bus. States here imply nothing about |
3419 | - the state of the connection between the driver and the kernel's device |
3420 | - layers. */ |
3421 | -enum xenbus_state |
3422 | -{ |
3423 | - XenbusStateUnknown = 0, |
3424 | - XenbusStateInitialising = 1, |
3425 | - XenbusStateInitWait = 2, /* Finished early |
3426 | - initialisation, but waiting |
3427 | - for information from the peer |
3428 | - or hotplug scripts. */ |
3429 | - XenbusStateInitialised = 3, /* Initialised and waiting for a |
3430 | - connection from the peer. */ |
3431 | - XenbusStateConnected = 4, |
3432 | - XenbusStateClosing = 5, /* The device is being closed |
3433 | - due to an error or an unplug |
3434 | - event. */ |
3435 | - XenbusStateClosed = 6 |
3436 | +/* |
3437 | + * The state of either end of the Xenbus, i.e. the current communication |
3438 | + * status of initialisation across the bus. States here imply nothing about |
3439 | + * the state of the connection between the driver and the kernel's device |
3440 | + * layers. |
3441 | + */ |
3442 | +enum xenbus_state { |
3443 | + XenbusStateUnknown = 0, |
3444 | + |
3445 | + XenbusStateInitialising = 1, |
3446 | + |
3447 | + /* |
3448 | + * InitWait: Finished early initialisation but waiting for information |
3449 | + * from the peer or hotplug scripts. |
3450 | + */ |
3451 | + XenbusStateInitWait = 2, |
3452 | + |
3453 | + /* |
3454 | + * Initialised: Waiting for a connection from the peer. |
3455 | + */ |
3456 | + XenbusStateInitialised = 3, |
3457 | + |
3458 | + XenbusStateConnected = 4, |
3459 | + |
3460 | + /* |
3461 | + * Closing: The device is being closed due to an error or an unplug event. |
3462 | + */ |
3463 | + XenbusStateClosing = 5, |
3464 | + |
3465 | + XenbusStateClosed = 6, |
3466 | + |
3467 | + /* |
3468 | + * Reconfiguring: The device is being reconfigured. |
3469 | + */ |
3470 | + XenbusStateReconfiguring = 7, |
3471 | |
3472 | + XenbusStateReconfigured = 8 |
3473 | }; |
3474 | +typedef enum xenbus_state XenbusState; |
3475 | |
3476 | #endif /* _XEN_PUBLIC_IO_XENBUS_H */ |
3477 | |
3478 | /* |
3479 | * Local variables: |
3480 | - * c-file-style: "linux" |
3481 | - * indent-tabs-mode: t |
3482 | - * c-indent-level: 8 |
3483 | - * c-basic-offset: 8 |
3484 | - * tab-width: 8 |
3485 | + * mode: C |
3486 | + * c-set-style: "BSD" |
3487 | + * c-basic-offset: 4 |
3488 | + * tab-width: 4 |
3489 | + * indent-tabs-mode: nil |
3490 | * End: |
3491 | */ |
3492 | Index: head-2008-04-15/include/xen/interface/io/xs_wire.h |
3493 | =================================================================== |
3494 | --- head-2008-04-15.orig/include/xen/interface/io/xs_wire.h 2008-04-15 09:41:09.000000000 +0200 |
3495 | +++ head-2008-04-15/include/xen/interface/io/xs_wire.h 2008-04-15 09:59:33.000000000 +0200 |
3496 | @@ -1,6 +1,25 @@ |
3497 | /* |
3498 | * Details of the "wire" protocol between Xen Store Daemon and client |
3499 | * library or guest kernel. |
3500 | + * |
3501 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
3502 | + * of this software and associated documentation files (the "Software"), to |
3503 | + * deal in the Software without restriction, including without limitation the |
3504 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
3505 | + * sell copies of the Software, and to permit persons to whom the Software is |
3506 | + * furnished to do so, subject to the following conditions: |
3507 | + * |
3508 | + * The above copyright notice and this permission notice shall be included in |
3509 | + * all copies or substantial portions of the Software. |
3510 | + * |
3511 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
3512 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
3513 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
3514 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
3515 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
3516 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
3517 | + * DEALINGS IN THE SOFTWARE. |
3518 | + * |
3519 | * Copyright (C) 2005 Rusty Russell IBM Corporation |
3520 | */ |
3521 | |
3522 | @@ -26,7 +45,9 @@ enum xsd_sockmsg_type |
3523 | XS_SET_PERMS, |
3524 | XS_WATCH_EVENT, |
3525 | XS_ERROR, |
3526 | - XS_IS_DOMAIN_INTRODUCED |
3527 | + XS_IS_DOMAIN_INTRODUCED, |
3528 | + XS_RESUME, |
3529 | + XS_SET_TARGET |
3530 | }; |
3531 | |
3532 | #define XS_WRITE_NONE "NONE" |
3533 | @@ -40,7 +61,11 @@ struct xsd_errors |
3534 | const char *errstring; |
3535 | }; |
3536 | #define XSD_ERROR(x) { x, #x } |
3537 | -static struct xsd_errors xsd_errors[] __attribute__((unused)) = { |
3538 | +static struct xsd_errors xsd_errors[] |
3539 | +#if defined(__GNUC__) |
3540 | +__attribute__((unused)) |
3541 | +#endif |
3542 | + = { |
3543 | XSD_ERROR(EINVAL), |
3544 | XSD_ERROR(EACCES), |
3545 | XSD_ERROR(EEXIST), |
3546 | @@ -84,4 +109,21 @@ struct xenstore_domain_interface { |
3547 | XENSTORE_RING_IDX rsp_cons, rsp_prod; |
3548 | }; |
3549 | |
3550 | +/* Violating this is very bad. See docs/misc/xenstore.txt. */ |
3551 | +#define XENSTORE_PAYLOAD_MAX 4096 |
3552 | + |
3553 | +/* Violating these just gets you an error back */ |
3554 | +#define XENSTORE_ABS_PATH_MAX 3072 |
3555 | +#define XENSTORE_REL_PATH_MAX 2048 |
3556 | + |
3557 | #endif /* _XS_WIRE_H */ |
3558 | + |
3559 | +/* |
3560 | + * Local variables: |
3561 | + * mode: C |
3562 | + * c-set-style: "BSD" |
3563 | + * c-basic-offset: 4 |
3564 | + * tab-width: 4 |
3565 | + * indent-tabs-mode: nil |
3566 | + * End: |
3567 | + */ |
3568 | Index: head-2008-04-15/include/xen/interface/memory.h |
3569 | =================================================================== |
3570 | --- head-2008-04-15.orig/include/xen/interface/memory.h 2008-04-15 09:41:09.000000000 +0200 |
3571 | +++ head-2008-04-15/include/xen/interface/memory.h 2008-04-15 09:59:33.000000000 +0200 |
3572 | @@ -1,7 +1,25 @@ |
3573 | /****************************************************************************** |
3574 | * memory.h |
3575 | - * |
3576 | + * |
3577 | * Memory reservation and information. |
3578 | + * |
3579 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
3580 | + * of this software and associated documentation files (the "Software"), to |
3581 | + * deal in the Software without restriction, including without limitation the |
3582 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
3583 | + * sell copies of the Software, and to permit persons to whom the Software is |
3584 | + * furnished to do so, subject to the following conditions: |
3585 | + * |
3586 | + * The above copyright notice and this permission notice shall be included in |
3587 | + * all copies or substantial portions of the Software. |
3588 | + * |
3589 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
3590 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
3591 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
3592 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
3593 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
3594 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
3595 | + * DEALINGS IN THE SOFTWARE. |
3596 | * |
3597 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> |
3598 | */ |
3599 | @@ -10,8 +28,8 @@ |
3600 | #define __XEN_PUBLIC_MEMORY_H__ |
3601 | |
3602 | /* |
3603 | - * Increase or decrease the specified domain's memory reservation. Returns a |
3604 | - * -ve errcode on failure, or the # extents successfully allocated or freed. |
3605 | + * Increase or decrease the specified domain's memory reservation. Returns the |
3606 | + * number of extents successfully allocated or freed. |
3607 | * arg == addr of struct xen_memory_reservation. |
3608 | */ |
3609 | #define XENMEM_increase_reservation 0 |
3610 | @@ -29,15 +47,15 @@ struct xen_memory_reservation { |
3611 | * OUT: GMFN bases of extents that were allocated |
3612 | * (NB. This command also updates the mach_to_phys translation table) |
3613 | */ |
3614 | - GUEST_HANDLE(ulong) extent_start; |
3615 | + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; |
3616 | |
3617 | /* Number of extents, and size/alignment of each (2^extent_order pages). */ |
3618 | - unsigned long nr_extents; |
3619 | + xen_ulong_t nr_extents; |
3620 | unsigned int extent_order; |
3621 | |
3622 | /* |
3623 | - * Maximum # bits addressable by the user of the allocated region (e.g., |
3624 | - * I/O devices often have a 32-bit limitation even in 64-bit systems). If |
3625 | + * Maximum # bits addressable by the user of the allocated region (e.g., |
3626 | + * I/O devices often have a 32-bit limitation even in 64-bit systems). If |
3627 | * zero then the user has no addressing restriction. |
3628 | * This field is not used by XENMEM_decrease_reservation. |
3629 | */ |
3630 | @@ -48,9 +66,52 @@ struct xen_memory_reservation { |
3631 | * Unprivileged domains can specify only DOMID_SELF. |
3632 | */ |
3633 | domid_t domid; |
3634 | +}; |
3635 | +typedef struct xen_memory_reservation xen_memory_reservation_t; |
3636 | +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); |
3637 | |
3638 | +/* |
3639 | + * An atomic exchange of memory pages. If return code is zero then |
3640 | + * @out.extent_list provides GMFNs of the newly-allocated memory. |
3641 | + * Returns zero on complete success, otherwise a negative error code. |
3642 | + * On complete success then always @nr_exchanged == @in.nr_extents. |
3643 | + * On partial success @nr_exchanged indicates how much work was done. |
3644 | + */ |
3645 | +#define XENMEM_exchange 11 |
3646 | +struct xen_memory_exchange { |
3647 | + /* |
3648 | + * [IN] Details of memory extents to be exchanged (GMFN bases). |
3649 | + * Note that @in.address_bits is ignored and unused. |
3650 | + */ |
3651 | + struct xen_memory_reservation in; |
3652 | + |
3653 | + /* |
3654 | + * [IN/OUT] Details of new memory extents. |
3655 | + * We require that: |
3656 | + * 1. @in.domid == @out.domid |
3657 | + * 2. @in.nr_extents << @in.extent_order == |
3658 | + * @out.nr_extents << @out.extent_order |
3659 | + * 3. @in.extent_start and @out.extent_start lists must not overlap |
3660 | + * 4. @out.extent_start lists GPFN bases to be populated |
3661 | + * 5. @out.extent_start is overwritten with allocated GMFN bases |
3662 | + */ |
3663 | + struct xen_memory_reservation out; |
3664 | + |
3665 | + /* |
3666 | + * [OUT] Number of input extents that were successfully exchanged: |
3667 | + * 1. The first @nr_exchanged input extents were successfully |
3668 | + * deallocated. |
3669 | + * 2. The corresponding first entries in the output extent list correctly |
3670 | + * indicate the GMFNs that were successfully exchanged. |
3671 | + * 3. All other input and output extents are untouched. |
3672 | + * 4. If not all input exents are exchanged then the return code of this |
3673 | + * command will be non-zero. |
3674 | + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! |
3675 | + */ |
3676 | + xen_ulong_t nr_exchanged; |
3677 | }; |
3678 | -DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); |
3679 | +typedef struct xen_memory_exchange xen_memory_exchange_t; |
3680 | +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); |
3681 | |
3682 | /* |
3683 | * Returns the maximum machine frame number of mapped RAM in this system. |
3684 | @@ -68,6 +129,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_re |
3685 | #define XENMEM_maximum_reservation 4 |
3686 | |
3687 | /* |
3688 | + * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. |
3689 | + */ |
3690 | +#define XENMEM_maximum_gpfn 14 |
3691 | + |
3692 | +/* |
3693 | * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys |
3694 | * mapping table. Architectures which do not have a m2p table do not implement |
3695 | * this command. |
3696 | @@ -86,7 +152,7 @@ struct xen_machphys_mfn_list { |
3697 | * any large discontiguities in the machine address space, 2MB gaps in |
3698 | * the machphys table will be represented by an MFN base of zero. |
3699 | */ |
3700 | - GUEST_HANDLE(ulong) extent_start; |
3701 | + XEN_GUEST_HANDLE(xen_pfn_t) extent_start; |
3702 | |
3703 | /* |
3704 | * Number of extents written to the above array. This will be smaller |
3705 | @@ -94,7 +160,22 @@ struct xen_machphys_mfn_list { |
3706 | */ |
3707 | unsigned int nr_extents; |
3708 | }; |
3709 | -DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); |
3710 | +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; |
3711 | +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); |
3712 | + |
3713 | +/* |
3714 | + * Returns the location in virtual address space of the machine_to_phys |
3715 | + * mapping table. Architectures which do not have a m2p table, or which do not |
3716 | + * map it by default into guest address space, do not implement this command. |
3717 | + * arg == addr of xen_machphys_mapping_t. |
3718 | + */ |
3719 | +#define XENMEM_machphys_mapping 12 |
3720 | +struct xen_machphys_mapping { |
3721 | + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ |
3722 | + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ |
3723 | +}; |
3724 | +typedef struct xen_machphys_mapping xen_machphys_mapping_t; |
3725 | +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); |
3726 | |
3727 | /* |
3728 | * Sets the GPFN at which a particular page appears in the specified guest's |
3729 | @@ -112,12 +193,13 @@ struct xen_add_to_physmap { |
3730 | unsigned int space; |
3731 | |
3732 | /* Index into source mapping space. */ |
3733 | - unsigned long idx; |
3734 | + xen_ulong_t idx; |
3735 | |
3736 | /* GPFN where the source mapping page should appear. */ |
3737 | - unsigned long gpfn; |
3738 | + xen_pfn_t gpfn; |
3739 | }; |
3740 | -DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); |
3741 | +typedef struct xen_add_to_physmap xen_add_to_physmap_t; |
3742 | +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); |
3743 | |
3744 | /* |
3745 | * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error |
3746 | @@ -129,17 +211,71 @@ struct xen_translate_gpfn_list { |
3747 | domid_t domid; |
3748 | |
3749 | /* Length of list. */ |
3750 | - unsigned long nr_gpfns; |
3751 | + xen_ulong_t nr_gpfns; |
3752 | |
3753 | /* List of GPFNs to translate. */ |
3754 | - GUEST_HANDLE(ulong) gpfn_list; |
3755 | + XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list; |
3756 | |
3757 | /* |
3758 | * Output list to contain MFN translations. May be the same as the input |
3759 | * list (in which case each input GPFN is overwritten with the output MFN). |
3760 | */ |
3761 | - GUEST_HANDLE(ulong) mfn_list; |
3762 | + XEN_GUEST_HANDLE(xen_pfn_t) mfn_list; |
3763 | }; |
3764 | -DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); |
3765 | +typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t; |
3766 | +DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t); |
3767 | + |
3768 | +/* |
3769 | + * Returns the pseudo-physical memory map as it was when the domain |
3770 | + * was started (specified by XENMEM_set_memory_map). |
3771 | + * arg == addr of xen_memory_map_t. |
3772 | + */ |
3773 | +#define XENMEM_memory_map 9 |
3774 | +struct xen_memory_map { |
3775 | + /* |
3776 | + * On call the number of entries which can be stored in buffer. On |
3777 | + * return the number of entries which have been stored in |
3778 | + * buffer. |
3779 | + */ |
3780 | + unsigned int nr_entries; |
3781 | + |
3782 | + /* |
3783 | + * Entries in the buffer are in the same format as returned by the |
3784 | + * BIOS INT 0x15 EAX=0xE820 call. |
3785 | + */ |
3786 | + XEN_GUEST_HANDLE(void) buffer; |
3787 | +}; |
3788 | +typedef struct xen_memory_map xen_memory_map_t; |
3789 | +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); |
3790 | + |
3791 | +/* |
3792 | + * Returns the real physical memory map. Passes the same structure as |
3793 | + * XENMEM_memory_map. |
3794 | + * arg == addr of xen_memory_map_t. |
3795 | + */ |
3796 | +#define XENMEM_machine_memory_map 10 |
3797 | + |
3798 | +/* |
3799 | + * Set the pseudo-physical memory map of a domain, as returned by |
3800 | + * XENMEM_memory_map. |
3801 | + * arg == addr of xen_foreign_memory_map_t. |
3802 | + */ |
3803 | +#define XENMEM_set_memory_map 13 |
3804 | +struct xen_foreign_memory_map { |
3805 | + domid_t domid; |
3806 | + struct xen_memory_map map; |
3807 | +}; |
3808 | +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; |
3809 | +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); |
3810 | |
3811 | #endif /* __XEN_PUBLIC_MEMORY_H__ */ |
3812 | + |
3813 | +/* |
3814 | + * Local variables: |
3815 | + * mode: C |
3816 | + * c-set-style: "BSD" |
3817 | + * c-basic-offset: 4 |
3818 | + * tab-width: 4 |
3819 | + * indent-tabs-mode: nil |
3820 | + * End: |
3821 | + */ |
3822 | Index: head-2008-04-15/include/xen/interface/physdev.h |
3823 | =================================================================== |
3824 | --- head-2008-04-15.orig/include/xen/interface/physdev.h 2008-04-15 09:41:09.000000000 +0200 |
3825 | +++ head-2008-04-15/include/xen/interface/physdev.h 2008-04-15 09:59:33.000000000 +0200 |
3826 | @@ -24,7 +24,7 @@ |
3827 | /* |
3828 | * Prototype for this hypercall is: |
3829 | * int physdev_op(int cmd, void *args) |
3830 | - * @cmd == PHYSDEVOP_??? (physdev operation). |
3831 | + * @cmd == PHYSDEVOP_??? (physdev operation). |
3832 | * @args == Operation-specific extra arguments (NULL if none). |
3833 | */ |
3834 | |
3835 | @@ -32,114 +32,142 @@ |
3836 | * Notify end-of-interrupt (EOI) for the specified IRQ. |
3837 | * @arg == pointer to physdev_eoi structure. |
3838 | */ |
3839 | -#define PHYSDEVOP_eoi 12 |
3840 | +#define PHYSDEVOP_eoi 12 |
3841 | struct physdev_eoi { |
3842 | - /* IN */ |
3843 | - uint32_t irq; |
3844 | + /* IN */ |
3845 | + uint32_t irq; |
3846 | }; |
3847 | +typedef struct physdev_eoi physdev_eoi_t; |
3848 | +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); |
3849 | |
3850 | /* |
3851 | * Query the status of an IRQ line. |
3852 | * @arg == pointer to physdev_irq_status_query structure. |
3853 | */ |
3854 | -#define PHYSDEVOP_irq_status_query 5 |
3855 | +#define PHYSDEVOP_irq_status_query 5 |
3856 | struct physdev_irq_status_query { |
3857 | - /* IN */ |
3858 | - uint32_t irq; |
3859 | - /* OUT */ |
3860 | - uint32_t flags; /* XENIRQSTAT_* */ |
3861 | + /* IN */ |
3862 | + uint32_t irq; |
3863 | + /* OUT */ |
3864 | + uint32_t flags; /* XENIRQSTAT_* */ |
3865 | }; |
3866 | +typedef struct physdev_irq_status_query physdev_irq_status_query_t; |
3867 | +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); |
3868 | |
3869 | /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ |
3870 | -#define _XENIRQSTAT_needs_eoi (0) |
3871 | -#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) |
3872 | +#define _XENIRQSTAT_needs_eoi (0) |
3873 | +#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) |
3874 | |
3875 | /* IRQ shared by multiple guests? */ |
3876 | -#define _XENIRQSTAT_shared (1) |
3877 | -#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) |
3878 | +#define _XENIRQSTAT_shared (1) |
3879 | +#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) |
3880 | |
3881 | /* |
3882 | * Set the current VCPU's I/O privilege level. |
3883 | * @arg == pointer to physdev_set_iopl structure. |
3884 | */ |
3885 | -#define PHYSDEVOP_set_iopl 6 |
3886 | +#define PHYSDEVOP_set_iopl 6 |
3887 | struct physdev_set_iopl { |
3888 | - /* IN */ |
3889 | - uint32_t iopl; |
3890 | + /* IN */ |
3891 | + uint32_t iopl; |
3892 | }; |
3893 | +typedef struct physdev_set_iopl physdev_set_iopl_t; |
3894 | +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); |
3895 | |
3896 | /* |
3897 | * Set the current VCPU's I/O-port permissions bitmap. |
3898 | * @arg == pointer to physdev_set_iobitmap structure. |
3899 | */ |
3900 | -#define PHYSDEVOP_set_iobitmap 7 |
3901 | +#define PHYSDEVOP_set_iobitmap 7 |
3902 | struct physdev_set_iobitmap { |
3903 | - /* IN */ |
3904 | - uint8_t * bitmap; |
3905 | - uint32_t nr_ports; |
3906 | + /* IN */ |
3907 | +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 |
3908 | + XEN_GUEST_HANDLE(uint8) bitmap; |
3909 | +#else |
3910 | + uint8_t *bitmap; |
3911 | +#endif |
3912 | + uint32_t nr_ports; |
3913 | }; |
3914 | +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; |
3915 | +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); |
3916 | |
3917 | /* |
3918 | * Read or write an IO-APIC register. |
3919 | * @arg == pointer to physdev_apic structure. |
3920 | */ |
3921 | -#define PHYSDEVOP_apic_read 8 |
3922 | -#define PHYSDEVOP_apic_write 9 |
3923 | +#define PHYSDEVOP_apic_read 8 |
3924 | +#define PHYSDEVOP_apic_write 9 |
3925 | struct physdev_apic { |
3926 | - /* IN */ |
3927 | - unsigned long apic_physbase; |
3928 | - uint32_t reg; |
3929 | - /* IN or OUT */ |
3930 | - uint32_t value; |
3931 | + /* IN */ |
3932 | + unsigned long apic_physbase; |
3933 | + uint32_t reg; |
3934 | + /* IN or OUT */ |
3935 | + uint32_t value; |
3936 | }; |
3937 | +typedef struct physdev_apic physdev_apic_t; |
3938 | +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); |
3939 | |
3940 | /* |
3941 | * Allocate or free a physical upcall vector for the specified IRQ line. |
3942 | * @arg == pointer to physdev_irq structure. |
3943 | */ |
3944 | -#define PHYSDEVOP_alloc_irq_vector 10 |
3945 | -#define PHYSDEVOP_free_irq_vector 11 |
3946 | +#define PHYSDEVOP_alloc_irq_vector 10 |
3947 | +#define PHYSDEVOP_free_irq_vector 11 |
3948 | struct physdev_irq { |
3949 | - /* IN */ |
3950 | - uint32_t irq; |
3951 | - /* IN or OUT */ |
3952 | - uint32_t vector; |
3953 | + /* IN */ |
3954 | + uint32_t irq; |
3955 | + /* IN or OUT */ |
3956 | + uint32_t vector; |
3957 | }; |
3958 | +typedef struct physdev_irq physdev_irq_t; |
3959 | +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); |
3960 | |
3961 | /* |
3962 | * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() |
3963 | * hypercall since 0x00030202. |
3964 | */ |
3965 | struct physdev_op { |
3966 | - uint32_t cmd; |
3967 | - union { |
3968 | - struct physdev_irq_status_query irq_status_query; |
3969 | - struct physdev_set_iopl set_iopl; |
3970 | - struct physdev_set_iobitmap set_iobitmap; |
3971 | - struct physdev_apic apic_op; |
3972 | - struct physdev_irq irq_op; |
3973 | - } u; |
3974 | + uint32_t cmd; |
3975 | + union { |
3976 | + struct physdev_irq_status_query irq_status_query; |
3977 | + struct physdev_set_iopl set_iopl; |
3978 | + struct physdev_set_iobitmap set_iobitmap; |
3979 | + struct physdev_apic apic_op; |
3980 | + struct physdev_irq irq_op; |
3981 | + } u; |
3982 | }; |
3983 | +typedef struct physdev_op physdev_op_t; |
3984 | +DEFINE_XEN_GUEST_HANDLE(physdev_op_t); |
3985 | |
3986 | /* |
3987 | * Notify that some PIRQ-bound event channels have been unmasked. |
3988 | * ** This command is obsolete since interface version 0x00030202 and is ** |
3989 | - * ** unsupported by newer versions of Xen. ** |
3990 | + * ** unsupported by newer versions of Xen. ** |
3991 | */ |
3992 | -#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 |
3993 | +#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 |
3994 | |
3995 | /* |
3996 | * These all-capitals physdev operation names are superceded by the new names |
3997 | * (defined above) since interface version 0x00030202. |
3998 | */ |
3999 | -#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query |
4000 | -#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl |
4001 | -#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap |
4002 | -#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read |
4003 | -#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write |
4004 | -#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector |
4005 | -#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector |
4006 | +#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query |
4007 | +#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl |
4008 | +#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap |
4009 | +#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read |
4010 | +#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write |
4011 | +#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector |
4012 | +#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector |
4013 | #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi |
4014 | -#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared |
4015 | +#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared |
4016 | |
4017 | #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ |
4018 | + |
4019 | +/* |
4020 | + * Local variables: |
4021 | + * mode: C |
4022 | + * c-set-style: "BSD" |
4023 | + * c-basic-offset: 4 |
4024 | + * tab-width: 4 |
4025 | + * indent-tabs-mode: nil |
4026 | + * End: |
4027 | + */ |
4028 | Index: head-2008-04-15/include/xen/interface/sched.h |
4029 | =================================================================== |
4030 | --- head-2008-04-15.orig/include/xen/interface/sched.h 2008-04-15 09:41:09.000000000 +0200 |
4031 | +++ head-2008-04-15/include/xen/interface/sched.h 2008-04-15 09:59:33.000000000 +0200 |
4032 | @@ -1,7 +1,25 @@ |
4033 | /****************************************************************************** |
4034 | * sched.h |
4035 | - * |
4036 | + * |
4037 | * Scheduler state interactions |
4038 | + * |
4039 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
4040 | + * of this software and associated documentation files (the "Software"), to |
4041 | + * deal in the Software without restriction, including without limitation the |
4042 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
4043 | + * sell copies of the Software, and to permit persons to whom the Software is |
4044 | + * furnished to do so, subject to the following conditions: |
4045 | + * |
4046 | + * The above copyright notice and this permission notice shall be included in |
4047 | + * all copies or substantial portions of the Software. |
4048 | + * |
4049 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
4050 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
4051 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
4052 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
4053 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
4054 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
4055 | + * DEALINGS IN THE SOFTWARE. |
4056 | * |
4057 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> |
4058 | */ |
4059 | @@ -13,17 +31,17 @@ |
4060 | |
4061 | /* |
4062 | * The prototype for this hypercall is: |
4063 | - * long sched_op_new(int cmd, void *arg) |
4064 | + * long sched_op(int cmd, void *arg) |
4065 | * @cmd == SCHEDOP_??? (scheduler operation). |
4066 | * @arg == Operation-specific extra argument(s), as described below. |
4067 | - * |
4068 | - * **NOTE**: |
4069 | - * Versions of Xen prior to 3.0.2 provide only the following legacy version |
4070 | + * |
4071 | + * Versions of Xen prior to 3.0.2 provided only the following legacy version |
4072 | * of this hypercall, supporting only the commands yield, block and shutdown: |
4073 | * long sched_op(int cmd, unsigned long arg) |
4074 | * @cmd == SCHEDOP_??? (scheduler operation). |
4075 | * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) |
4076 | * == SHUTDOWN_* code (SCHEDOP_shutdown) |
4077 | + * This legacy version is available to new guests as sched_op_compat(). |
4078 | */ |
4079 | |
4080 | /* |
4081 | @@ -49,7 +67,8 @@ |
4082 | struct sched_shutdown { |
4083 | unsigned int reason; /* SHUTDOWN_* */ |
4084 | }; |
4085 | -DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); |
4086 | +typedef struct sched_shutdown sched_shutdown_t; |
4087 | +DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); |
4088 | |
4089 | /* |
4090 | * Poll a set of event-channel ports. Return when one or more are pending. An |
4091 | @@ -58,11 +77,26 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow |
4092 | */ |
4093 | #define SCHEDOP_poll 3 |
4094 | struct sched_poll { |
4095 | - GUEST_HANDLE(evtchn_port_t) ports; |
4096 | + XEN_GUEST_HANDLE(evtchn_port_t) ports; |
4097 | unsigned int nr_ports; |
4098 | uint64_t timeout; |
4099 | }; |
4100 | -DEFINE_GUEST_HANDLE_STRUCT(sched_poll); |
4101 | +typedef struct sched_poll sched_poll_t; |
4102 | +DEFINE_XEN_GUEST_HANDLE(sched_poll_t); |
4103 | + |
4104 | +/* |
4105 | + * Declare a shutdown for another domain. The main use of this function is |
4106 | + * in interpreting shutdown requests and reasons for fully-virtualized |
4107 | + * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. |
4108 | + * @arg == pointer to sched_remote_shutdown structure. |
4109 | + */ |
4110 | +#define SCHEDOP_remote_shutdown 4 |
4111 | +struct sched_remote_shutdown { |
4112 | + domid_t domain_id; /* Remote domain ID */ |
4113 | + unsigned int reason; /* SHUTDOWN_xxx reason */ |
4114 | +}; |
4115 | +typedef struct sched_remote_shutdown sched_remote_shutdown_t; |
4116 | +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); |
4117 | |
4118 | /* |
4119 | * Reason codes for SCHEDOP_shutdown. These may be interpreted by control |
4120 | @@ -75,3 +109,13 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll); |
4121 | #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ |
4122 | |
4123 | #endif /* __XEN_PUBLIC_SCHED_H__ */ |
4124 | + |
4125 | +/* |
4126 | + * Local variables: |
4127 | + * mode: C |
4128 | + * c-set-style: "BSD" |
4129 | + * c-basic-offset: 4 |
4130 | + * tab-width: 4 |
4131 | + * indent-tabs-mode: nil |
4132 | + * End: |
4133 | + */ |
4134 | Index: head-2008-04-15/include/xen/interface/vcpu.h |
4135 | =================================================================== |
4136 | --- head-2008-04-15.orig/include/xen/interface/vcpu.h 2008-04-15 09:41:09.000000000 +0200 |
4137 | +++ head-2008-04-15/include/xen/interface/vcpu.h 2008-04-15 09:59:33.000000000 +0200 |
4138 | @@ -1,8 +1,8 @@ |
4139 | /****************************************************************************** |
4140 | * vcpu.h |
4141 | - * |
4142 | + * |
4143 | * VCPU initialisation, query, and hotplug. |
4144 | - * |
4145 | + * |
4146 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4147 | * of this software and associated documentation files (the "Software"), to |
4148 | * deal in the Software without restriction, including without limitation the |
4149 | @@ -29,62 +29,64 @@ |
4150 | |
4151 | /* |
4152 | * Prototype for this hypercall is: |
4153 | - * int vcpu_op(int cmd, int vcpuid, void *extra_args) |
4154 | - * @cmd == VCPUOP_??? (VCPU operation). |
4155 | - * @vcpuid == VCPU to operate on. |
4156 | + * int vcpu_op(int cmd, int vcpuid, void *extra_args) |
4157 | + * @cmd == VCPUOP_??? (VCPU operation). |
4158 | + * @vcpuid == VCPU to operate on. |
4159 | * @extra_args == Operation-specific extra arguments (NULL if none). |
4160 | */ |
4161 | |
4162 | /* |
4163 | - * Initialise a VCPU. Each VCPU can be initialised only once. A |
4164 | + * Initialise a VCPU. Each VCPU can be initialised only once. A |
4165 | * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. |
4166 | - * |
4167 | + * |
4168 | * @extra_arg == pointer to vcpu_guest_context structure containing initial |
4169 | - * state for the VCPU. |
4170 | + * state for the VCPU. |
4171 | */ |
4172 | -#define VCPUOP_initialise 0 |
4173 | +#define VCPUOP_initialise 0 |
4174 | |
4175 | /* |
4176 | * Bring up a VCPU. This makes the VCPU runnable. This operation will fail |
4177 | * if the VCPU has not been initialised (VCPUOP_initialise). |
4178 | */ |
4179 | -#define VCPUOP_up 1 |
4180 | +#define VCPUOP_up 1 |
4181 | |
4182 | /* |
4183 | * Bring down a VCPU (i.e., make it non-runnable). |
4184 | * There are a few caveats that callers should observe: |
4185 | - * 1. This operation may return, and VCPU_is_up may return false, before the |
4186 | - * VCPU stops running (i.e., the command is asynchronous). It is a good |
4187 | - * idea to ensure that the VCPU has entered a non-critical loop before |
4188 | - * bringing it down. Alternatively, this operation is guaranteed |
4189 | - * synchronous if invoked by the VCPU itself. |
4190 | - * 2. After a VCPU is initialised, there is currently no way to drop all its |
4191 | - * references to domain memory. Even a VCPU that is down still holds |
4192 | - * memory references via its pagetable base pointer and GDT. It is good |
4193 | - * practise to move a VCPU onto an 'idle' or default page table, LDT and |
4194 | - * GDT before bringing it down. |
4195 | + * 1. This operation may return, and VCPU_is_up may return false, before the |
4196 | + * VCPU stops running (i.e., the command is asynchronous). It is a good |
4197 | + * idea to ensure that the VCPU has entered a non-critical loop before |
4198 | + * bringing it down. Alternatively, this operation is guaranteed |
4199 | + * synchronous if invoked by the VCPU itself. |
4200 | + * 2. After a VCPU is initialised, there is currently no way to drop all its |
4201 | + * references to domain memory. Even a VCPU that is down still holds |
4202 | + * memory references via its pagetable base pointer and GDT. It is good |
4203 | + * practise to move a VCPU onto an 'idle' or default page table, LDT and |
4204 | + * GDT before bringing it down. |
4205 | */ |
4206 | -#define VCPUOP_down 2 |
4207 | +#define VCPUOP_down 2 |
4208 | |
4209 | /* Returns 1 if the given VCPU is up. */ |
4210 | -#define VCPUOP_is_up 3 |
4211 | +#define VCPUOP_is_up 3 |
4212 | |
4213 | /* |
4214 | * Return information about the state and running time of a VCPU. |
4215 | * @extra_arg == pointer to vcpu_runstate_info structure. |
4216 | */ |
4217 | -#define VCPUOP_get_runstate_info 4 |
4218 | +#define VCPUOP_get_runstate_info 4 |
4219 | struct vcpu_runstate_info { |
4220 | - /* VCPU's current state (RUNSTATE_*). */ |
4221 | - int state; |
4222 | - /* When was current state entered (system time, ns)? */ |
4223 | - uint64_t state_entry_time; |
4224 | - /* |
4225 | - * Time spent in each RUNSTATE_* (ns). The sum of these times is |
4226 | - * guaranteed not to drift from system time. |
4227 | - */ |
4228 | - uint64_t time[4]; |
4229 | + /* VCPU's current state (RUNSTATE_*). */ |
4230 | + int state; |
4231 | + /* When was current state entered (system time, ns)? */ |
4232 | + uint64_t state_entry_time; |
4233 | + /* |
4234 | + * Time spent in each RUNSTATE_* (ns). The sum of these times is |
4235 | + * guaranteed not to drift from system time. |
4236 | + */ |
4237 | + uint64_t time[4]; |
4238 | }; |
4239 | +typedef struct vcpu_runstate_info vcpu_runstate_info_t; |
4240 | +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); |
4241 | |
4242 | /* VCPU is currently running on a physical CPU. */ |
4243 | #define RUNSTATE_running 0 |
4244 | @@ -107,62 +109,105 @@ struct vcpu_runstate_info { |
4245 | * Register a shared memory area from which the guest may obtain its own |
4246 | * runstate information without needing to execute a hypercall. |
4247 | * Notes: |
4248 | - * 1. The registered address may be virtual or physical, depending on the |
4249 | - * platform. The virtual address should be registered on x86 systems. |
4250 | - * 2. Only one shared area may be registered per VCPU. The shared area is |
4251 | - * updated by the hypervisor each time the VCPU is scheduled. Thus |
4252 | - * runstate.state will always be RUNSTATE_running and |
4253 | - * runstate.state_entry_time will indicate the system time at which the |
4254 | - * VCPU was last scheduled to run. |
4255 | + * 1. The registered address may be virtual or physical or guest handle, |
4256 | + * depending on the platform. Virtual address or guest handle should be |
4257 | + * registered on x86 systems. |
4258 | + * 2. Only one shared area may be registered per VCPU. The shared area is |
4259 | + * updated by the hypervisor each time the VCPU is scheduled. Thus |
4260 | + * runstate.state will always be RUNSTATE_running and |
4261 | + * runstate.state_entry_time will indicate the system time at which the |
4262 | + * VCPU was last scheduled to run. |
4263 | * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. |
4264 | */ |
4265 | #define VCPUOP_register_runstate_memory_area 5 |
4266 | struct vcpu_register_runstate_memory_area { |
4267 | - union { |
4268 | - struct vcpu_runstate_info *v; |
4269 | - uint64_t p; |
4270 | - } addr; |
4271 | + union { |
4272 | + XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; |
4273 | + struct vcpu_runstate_info *v; |
4274 | + uint64_t p; |
4275 | + } addr; |
4276 | }; |
4277 | +typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; |
4278 | +DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); |
4279 | |
4280 | /* |
4281 | * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer |
4282 | * which can be set via these commands. Periods smaller than one millisecond |
4283 | * may not be supported. |
4284 | */ |
4285 | -#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ |
4286 | -#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ |
4287 | +#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ |
4288 | +#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ |
4289 | struct vcpu_set_periodic_timer { |
4290 | - uint64_t period_ns; |
4291 | + uint64_t period_ns; |
4292 | }; |
4293 | +typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; |
4294 | +DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); |
4295 | |
4296 | /* |
4297 | * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot |
4298 | * timer which can be set via these commands. |
4299 | */ |
4300 | -#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ |
4301 | +#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ |
4302 | #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ |
4303 | struct vcpu_set_singleshot_timer { |
4304 | - uint64_t timeout_abs_ns; |
4305 | - uint32_t flags; /* VCPU_SSHOTTMR_??? */ |
4306 | + uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ |
4307 | + uint32_t flags; /* VCPU_SSHOTTMR_??? */ |
4308 | }; |
4309 | +typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; |
4310 | +DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); |
4311 | |
4312 | /* Flags to VCPUOP_set_singleshot_timer. */ |
4313 | /* Require the timeout to be in the future (return -ETIME if it's passed). */ |
4314 | #define _VCPU_SSHOTTMR_future (0) |
4315 | #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) |
4316 | |
4317 | -/* |
4318 | +/* |
4319 | * Register a memory location in the guest address space for the |
4320 | * vcpu_info structure. This allows the guest to place the vcpu_info |
4321 | * structure in a convenient place, such as in a per-cpu data area. |
4322 | * The pointer need not be page aligned, but the structure must not |
4323 | * cross a page boundary. |
4324 | + * |
4325 | + * This may be called only once per vcpu. |
4326 | */ |
4327 | -#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ |
4328 | +#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ |
4329 | struct vcpu_register_vcpu_info { |
4330 | uint64_t mfn; /* mfn of page to place vcpu_info */ |
4331 | uint32_t offset; /* offset within page */ |
4332 | uint32_t rsvd; /* unused */ |
4333 | }; |
4334 | +typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; |
4335 | +DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); |
4336 | + |
4337 | +/* Send an NMI to the specified VCPU. @extra_arg == NULL. */ |
4338 | +#define VCPUOP_send_nmi 11 |
4339 | + |
4340 | +/* |
4341 | + * Get the physical ID information for a pinned vcpu's underlying physical |
4342 | + * processor. The physical ID informmation is architecture-specific. |
4343 | + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and |
4344 | + * greater are reserved. |
4345 | + * This command returns -EINVAL if it is not a valid operation for this VCPU. |
4346 | + */ |
4347 | +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ |
4348 | +struct vcpu_get_physid { |
4349 | + uint64_t phys_id; |
4350 | +}; |
4351 | +typedef struct vcpu_get_physid vcpu_get_physid_t; |
4352 | +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); |
4353 | +#define xen_vcpu_physid_to_x86_apicid(physid) \ |
4354 | + ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) |
4355 | +#define xen_vcpu_physid_to_x86_acpiid(physid) \ |
4356 | + ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) |
4357 | |
4358 | #endif /* __XEN_PUBLIC_VCPU_H__ */ |
4359 | + |
4360 | +/* |
4361 | + * Local variables: |
4362 | + * mode: C |
4363 | + * c-set-style: "BSD" |
4364 | + * c-basic-offset: 4 |
4365 | + * tab-width: 4 |
4366 | + * indent-tabs-mode: nil |
4367 | + * End: |
4368 | + */ |
4369 | Index: head-2008-04-15/include/xen/interface/version.h |
4370 | =================================================================== |
4371 | --- head-2008-04-15.orig/include/xen/interface/version.h 2008-04-15 09:41:09.000000000 +0200 |
4372 | +++ head-2008-04-15/include/xen/interface/version.h 2008-04-15 09:59:33.000000000 +0200 |
4373 | @@ -1,7 +1,25 @@ |
4374 | /****************************************************************************** |
4375 | * version.h |
4376 | - * |
4377 | + * |
4378 | * Xen version, type, and compile information. |
4379 | + * |
4380 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
4381 | + * of this software and associated documentation files (the "Software"), to |
4382 | + * deal in the Software without restriction, including without limitation the |
4383 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
4384 | + * sell copies of the Software, and to permit persons to whom the Software is |
4385 | + * furnished to do so, subject to the following conditions: |
4386 | + * |
4387 | + * The above copyright notice and this permission notice shall be included in |
4388 | + * all copies or substantial portions of the Software. |
4389 | + * |
4390 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
4391 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
4392 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
4393 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
4394 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
4395 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
4396 | + * DEALINGS IN THE SOFTWARE. |
4397 | * |
4398 | * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com> |
4399 | * Copyright (c) 2005, Keir Fraser <keir@xensource.com> |
4400 | @@ -10,17 +28,15 @@ |
4401 | #ifndef __XEN_PUBLIC_VERSION_H__ |
4402 | #define __XEN_PUBLIC_VERSION_H__ |
4403 | |
4404 | -/* NB. All ops return zero on success, except XENVER_version. */ |
4405 | +/* NB. All ops return zero on success, except XENVER_{version,pagesize} */ |
4406 | |
4407 | /* arg == NULL; returns major:minor (16:16). */ |
4408 | #define XENVER_version 0 |
4409 | |
4410 | /* arg == xen_extraversion_t. */ |
4411 | #define XENVER_extraversion 1 |
4412 | -struct xen_extraversion { |
4413 | - char extraversion[16]; |
4414 | -}; |
4415 | -#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion)) |
4416 | +typedef char xen_extraversion_t[16]; |
4417 | +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) |
4418 | |
4419 | /* arg == xen_compile_info_t. */ |
4420 | #define XENVER_compile_info 2 |
4421 | @@ -30,31 +46,46 @@ struct xen_compile_info { |
4422 | char compile_domain[32]; |
4423 | char compile_date[32]; |
4424 | }; |
4425 | +typedef struct xen_compile_info xen_compile_info_t; |
4426 | |
4427 | #define XENVER_capabilities 3 |
4428 | -struct xen_capabilities_info { |
4429 | - char info[1024]; |
4430 | -}; |
4431 | -#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info)) |
4432 | +typedef char xen_capabilities_info_t[1024]; |
4433 | +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) |
4434 | |
4435 | #define XENVER_changeset 4 |
4436 | -struct xen_changeset_info { |
4437 | - char info[64]; |
4438 | -}; |
4439 | -#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info)) |
4440 | +typedef char xen_changeset_info_t[64]; |
4441 | +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) |
4442 | |
4443 | #define XENVER_platform_parameters 5 |
4444 | struct xen_platform_parameters { |
4445 | unsigned long virt_start; |
4446 | }; |
4447 | +typedef struct xen_platform_parameters xen_platform_parameters_t; |
4448 | |
4449 | #define XENVER_get_features 6 |
4450 | struct xen_feature_info { |
4451 | unsigned int submap_idx; /* IN: which 32-bit submap to return */ |
4452 | uint32_t submap; /* OUT: 32-bit submap */ |
4453 | }; |
4454 | +typedef struct xen_feature_info xen_feature_info_t; |
4455 | |
4456 | /* Declares the features reported by XENVER_get_features. */ |
4457 | #include "features.h" |
4458 | |
4459 | +/* arg == NULL; returns host memory page size. */ |
4460 | +#define XENVER_pagesize 7 |
4461 | + |
4462 | +/* arg == xen_domain_handle_t. */ |
4463 | +#define XENVER_guest_handle 8 |
4464 | + |
4465 | #endif /* __XEN_PUBLIC_VERSION_H__ */ |
4466 | + |
4467 | +/* |
4468 | + * Local variables: |
4469 | + * mode: C |
4470 | + * c-set-style: "BSD" |
4471 | + * c-basic-offset: 4 |
4472 | + * tab-width: 4 |
4473 | + * indent-tabs-mode: nil |
4474 | + * End: |
4475 | + */ |
4476 | Index: head-2008-04-15/include/xen/interface/xen.h |
4477 | =================================================================== |
4478 | --- head-2008-04-15.orig/include/xen/interface/xen.h 2008-04-15 09:41:09.000000000 +0200 |
4479 | +++ head-2008-04-15/include/xen/interface/xen.h 2008-04-15 09:59:33.000000000 +0200 |
4480 | @@ -1,7 +1,25 @@ |
4481 | /****************************************************************************** |
4482 | * xen.h |
4483 | - * |
4484 | + * |
4485 | * Guest OS interface to Xen. |
4486 | + * |
4487 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
4488 | + * of this software and associated documentation files (the "Software"), to |
4489 | + * deal in the Software without restriction, including without limitation the |
4490 | + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
4491 | + * sell copies of the Software, and to permit persons to whom the Software is |
4492 | + * furnished to do so, subject to the following conditions: |
4493 | + * |
4494 | + * The above copyright notice and this permission notice shall be included in |
4495 | + * all copies or substantial portions of the Software. |
4496 | + * |
4497 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
4498 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
4499 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
4500 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
4501 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
4502 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
4503 | + * DEALINGS IN THE SOFTWARE. |
4504 | * |
4505 | * Copyright (c) 2004, K A Fraser |
4506 | */ |
4507 | @@ -9,28 +27,43 @@ |
4508 | #ifndef __XEN_PUBLIC_XEN_H__ |
4509 | #define __XEN_PUBLIC_XEN_H__ |
4510 | |
4511 | -#include <asm/xen/interface.h> |
4512 | +#include "xen-compat.h" |
4513 | |
4514 | -/* |
4515 | - * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). |
4516 | - */ |
4517 | +#if defined(__i386__) || defined(__x86_64__) |
4518 | +#include "arch-x86/xen.h" |
4519 | +#elif defined(__ia64__) |
4520 | +#include "arch-ia64.h" |
4521 | +#elif defined(__powerpc__) |
4522 | +#include "arch-powerpc.h" |
4523 | +#else |
4524 | +#error "Unsupported architecture" |
4525 | +#endif |
4526 | + |
4527 | +#ifndef __ASSEMBLY__ |
4528 | +/* Guest handles for primitive C types. */ |
4529 | +DEFINE_XEN_GUEST_HANDLE(char); |
4530 | +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); |
4531 | +DEFINE_XEN_GUEST_HANDLE(int); |
4532 | +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); |
4533 | +DEFINE_XEN_GUEST_HANDLE(long); |
4534 | +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); |
4535 | +DEFINE_XEN_GUEST_HANDLE(void); |
4536 | + |
4537 | +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); |
4538 | +#endif |
4539 | |
4540 | /* |
4541 | - * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. |
4542 | - * EAX = return value |
4543 | - * (argument registers may be clobbered on return) |
4544 | - * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. |
4545 | - * RAX = return value |
4546 | - * (argument registers not clobbered on return; RCX, R11 are) |
4547 | + * HYPERCALLS |
4548 | */ |
4549 | + |
4550 | #define __HYPERVISOR_set_trap_table 0 |
4551 | #define __HYPERVISOR_mmu_update 1 |
4552 | #define __HYPERVISOR_set_gdt 2 |
4553 | #define __HYPERVISOR_stack_switch 3 |
4554 | #define __HYPERVISOR_set_callbacks 4 |
4555 | #define __HYPERVISOR_fpu_taskswitch 5 |
4556 | -#define __HYPERVISOR_sched_op 6 |
4557 | -#define __HYPERVISOR_dom0_op 7 |
4558 | +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ |
4559 | +#define __HYPERVISOR_platform_op 7 |
4560 | #define __HYPERVISOR_set_debugreg 8 |
4561 | #define __HYPERVISOR_get_debugreg 9 |
4562 | #define __HYPERVISOR_update_descriptor 10 |
4563 | @@ -38,10 +71,10 @@ |
4564 | #define __HYPERVISOR_multicall 13 |
4565 | #define __HYPERVISOR_update_va_mapping 14 |
4566 | #define __HYPERVISOR_set_timer_op 15 |
4567 | -#define __HYPERVISOR_event_channel_op_compat 16 |
4568 | +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ |
4569 | #define __HYPERVISOR_xen_version 17 |
4570 | #define __HYPERVISOR_console_io 18 |
4571 | -#define __HYPERVISOR_physdev_op_compat 19 |
4572 | +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ |
4573 | #define __HYPERVISOR_grant_table_op 20 |
4574 | #define __HYPERVISOR_vm_assist 21 |
4575 | #define __HYPERVISOR_update_va_mapping_otherdomain 22 |
4576 | @@ -49,35 +82,90 @@ |
4577 | #define __HYPERVISOR_vcpu_op 24 |
4578 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ |
4579 | #define __HYPERVISOR_mmuext_op 26 |
4580 | -#define __HYPERVISOR_acm_op 27 |
4581 | +#define __HYPERVISOR_xsm_op 27 |
4582 | #define __HYPERVISOR_nmi_op 28 |
4583 | -#define __HYPERVISOR_sched_op_new 29 |
4584 | +#define __HYPERVISOR_sched_op 29 |
4585 | #define __HYPERVISOR_callback_op 30 |
4586 | #define __HYPERVISOR_xenoprof_op 31 |
4587 | #define __HYPERVISOR_event_channel_op 32 |
4588 | #define __HYPERVISOR_physdev_op 33 |
4589 | #define __HYPERVISOR_hvm_op 34 |
4590 | +#define __HYPERVISOR_sysctl 35 |
4591 | +#define __HYPERVISOR_domctl 36 |
4592 | +#define __HYPERVISOR_kexec_op 37 |
4593 | + |
4594 | +/* Architecture-specific hypercall definitions. */ |
4595 | +#define __HYPERVISOR_arch_0 48 |
4596 | +#define __HYPERVISOR_arch_1 49 |
4597 | +#define __HYPERVISOR_arch_2 50 |
4598 | +#define __HYPERVISOR_arch_3 51 |
4599 | +#define __HYPERVISOR_arch_4 52 |
4600 | +#define __HYPERVISOR_arch_5 53 |
4601 | +#define __HYPERVISOR_arch_6 54 |
4602 | +#define __HYPERVISOR_arch_7 55 |
4603 | + |
4604 | +/* |
4605 | + * HYPERCALL COMPATIBILITY. |
4606 | + */ |
4607 | + |
4608 | +/* New sched_op hypercall introduced in 0x00030101. */ |
4609 | +#if __XEN_INTERFACE_VERSION__ < 0x00030101 |
4610 | +#undef __HYPERVISOR_sched_op |
4611 | +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat |
4612 | +#endif |
4613 | |
4614 | -/* |
4615 | +/* New event-channel and physdev hypercalls introduced in 0x00030202. */ |
4616 | +#if __XEN_INTERFACE_VERSION__ < 0x00030202 |
4617 | +#undef __HYPERVISOR_event_channel_op |
4618 | +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat |
4619 | +#undef __HYPERVISOR_physdev_op |
4620 | +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat |
4621 | +#endif |
4622 | + |
4623 | +/* New platform_op hypercall introduced in 0x00030204. */ |
4624 | +#if __XEN_INTERFACE_VERSION__ < 0x00030204 |
4625 | +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op |
4626 | +#endif |
4627 | + |
4628 | +/* |
4629 | * VIRTUAL INTERRUPTS |
4630 | - * |
4631 | + * |
4632 | * Virtual interrupts that a guest OS may receive from Xen. |
4633 | - */ |
4634 | -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ |
4635 | -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ |
4636 | -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ |
4637 | -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ |
4638 | -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ |
4639 | -#define NR_VIRQS 8 |
4640 | + * |
4641 | + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a |
4642 | + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. |
4643 | + * The latter can be allocated only once per guest: they must initially be |
4644 | + * allocated to VCPU0 but can subsequently be re-bound. |
4645 | + */ |
4646 | +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ |
4647 | +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ |
4648 | +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ |
4649 | +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ |
4650 | +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ |
4651 | +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ |
4652 | +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ |
4653 | +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ |
4654 | + |
4655 | +/* Architecture-specific VIRQ definitions. */ |
4656 | +#define VIRQ_ARCH_0 16 |
4657 | +#define VIRQ_ARCH_1 17 |
4658 | +#define VIRQ_ARCH_2 18 |
4659 | +#define VIRQ_ARCH_3 19 |
4660 | +#define VIRQ_ARCH_4 20 |
4661 | +#define VIRQ_ARCH_5 21 |
4662 | +#define VIRQ_ARCH_6 22 |
4663 | +#define VIRQ_ARCH_7 23 |
4664 | + |
4665 | +#define NR_VIRQS 24 |
4666 | |
4667 | /* |
4668 | * MMU-UPDATE REQUESTS |
4669 | - * |
4670 | + * |
4671 | * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. |
4672 | * A foreigndom (FD) can be specified (or DOMID_SELF for none). |
4673 | * Where the FD has some effect, it is described below. |
4674 | * ptr[1:0] specifies the appropriate MMU_* command. |
4675 | - * |
4676 | + * |
4677 | * ptr[1:0] == MMU_NORMAL_PT_UPDATE: |
4678 | * Updates an entry in a page table. If updating an L1 table, and the new |
4679 | * table entry is valid/present, the mapped frame must belong to the FD, if |
4680 | @@ -87,56 +175,61 @@ |
4681 | * FD == DOMID_XEN: Map restricted areas of Xen's heap space. |
4682 | * ptr[:2] -- Machine address of the page-table entry to modify. |
4683 | * val -- Value to write. |
4684 | - * |
4685 | + * |
4686 | * ptr[1:0] == MMU_MACHPHYS_UPDATE: |
4687 | * Updates an entry in the machine->pseudo-physical mapping table. |
4688 | * ptr[:2] -- Machine address within the frame whose mapping to modify. |
4689 | * The frame must belong to the FD, if one is specified. |
4690 | * val -- Value to write into the mapping entry. |
4691 | - */ |
4692 | -#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ |
4693 | -#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ |
4694 | + * |
4695 | + * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: |
4696 | + * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed |
4697 | + * with those in @val. |
4698 | + */ |
4699 | +#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ |
4700 | +#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ |
4701 | +#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ |
4702 | |
4703 | /* |
4704 | * MMU EXTENDED OPERATIONS |
4705 | - * |
4706 | + * |
4707 | * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. |
4708 | * A foreigndom (FD) can be specified (or DOMID_SELF for none). |
4709 | * Where the FD has some effect, it is described below. |
4710 | - * |
4711 | + * |
4712 | * cmd: MMUEXT_(UN)PIN_*_TABLE |
4713 | * mfn: Machine frame number to be (un)pinned as a p.t. page. |
4714 | * The frame must belong to the FD, if one is specified. |
4715 | - * |
4716 | + * |
4717 | * cmd: MMUEXT_NEW_BASEPTR |
4718 | * mfn: Machine frame number of new page-table base to install in MMU. |
4719 | - * |
4720 | + * |
4721 | * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] |
4722 | * mfn: Machine frame number of new page-table base to install in MMU |
4723 | * when in user space. |
4724 | - * |
4725 | + * |
4726 | * cmd: MMUEXT_TLB_FLUSH_LOCAL |
4727 | * No additional arguments. Flushes local TLB. |
4728 | - * |
4729 | + * |
4730 | * cmd: MMUEXT_INVLPG_LOCAL |
4731 | * linear_addr: Linear address to be flushed from the local TLB. |
4732 | - * |
4733 | + * |
4734 | * cmd: MMUEXT_TLB_FLUSH_MULTI |
4735 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. |
4736 | - * |
4737 | + * |
4738 | * cmd: MMUEXT_INVLPG_MULTI |
4739 | * linear_addr: Linear address to be flushed. |
4740 | * vcpumask: Pointer to bitmap of VCPUs to be flushed. |
4741 | - * |
4742 | + * |
4743 | * cmd: MMUEXT_TLB_FLUSH_ALL |
4744 | * No additional arguments. Flushes all VCPUs' TLBs. |
4745 | - * |
4746 | + * |
4747 | * cmd: MMUEXT_INVLPG_ALL |
4748 | * linear_addr: Linear address to be flushed from all VCPUs' TLBs. |
4749 | - * |
4750 | + * |
4751 | * cmd: MMUEXT_FLUSH_CACHE |
4752 | * No additional arguments. Writes back and flushes cache contents. |
4753 | - * |
4754 | + * |
4755 | * cmd: MMUEXT_SET_LDT |
4756 | * linear_addr: Linear address of LDT base (NB. must be page-aligned). |
4757 | * nr_ents: Number of entries in LDT. |
4758 | @@ -159,21 +252,26 @@ |
4759 | |
4760 | #ifndef __ASSEMBLY__ |
4761 | struct mmuext_op { |
4762 | - unsigned int cmd; |
4763 | - union { |
4764 | - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ |
4765 | - unsigned long mfn; |
4766 | - /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ |
4767 | - unsigned long linear_addr; |
4768 | - } arg1; |
4769 | - union { |
4770 | - /* SET_LDT */ |
4771 | - unsigned int nr_ents; |
4772 | - /* TLB_FLUSH_MULTI, INVLPG_MULTI */ |
4773 | - void *vcpumask; |
4774 | - } arg2; |
4775 | + unsigned int cmd; |
4776 | + union { |
4777 | + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ |
4778 | + xen_pfn_t mfn; |
4779 | + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ |
4780 | + unsigned long linear_addr; |
4781 | + } arg1; |
4782 | + union { |
4783 | + /* SET_LDT */ |
4784 | + unsigned int nr_ents; |
4785 | + /* TLB_FLUSH_MULTI, INVLPG_MULTI */ |
4786 | +#if __XEN_INTERFACE_VERSION__ >= 0x00030205 |
4787 | + XEN_GUEST_HANDLE(void) vcpumask; |
4788 | +#else |
4789 | + void *vcpumask; |
4790 | +#endif |
4791 | + } arg2; |
4792 | }; |
4793 | -DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); |
4794 | +typedef struct mmuext_op mmuext_op_t; |
4795 | +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); |
4796 | #endif |
4797 | |
4798 | /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ |
4799 | @@ -198,11 +296,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); |
4800 | */ |
4801 | #define VMASST_CMD_enable 0 |
4802 | #define VMASST_CMD_disable 1 |
4803 | + |
4804 | +/* x86/32 guests: simulate full 4GB segment limits. */ |
4805 | #define VMASST_TYPE_4gb_segments 0 |
4806 | + |
4807 | +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ |
4808 | #define VMASST_TYPE_4gb_segments_notify 1 |
4809 | + |
4810 | +/* |
4811 | + * x86 guests: support writes to bottom-level PTEs. |
4812 | + * NB1. Page-directory entries cannot be written. |
4813 | + * NB2. Guest must continue to remove all writable mappings of PTEs. |
4814 | + */ |
4815 | #define VMASST_TYPE_writable_pagetables 2 |
4816 | + |
4817 | +/* x86/PAE guests: support PDPTs above 4GB. */ |
4818 | #define VMASST_TYPE_pae_extended_cr3 3 |
4819 | -#define MAX_VMASST_TYPE 3 |
4820 | + |
4821 | +#define MAX_VMASST_TYPE 3 |
4822 | |
4823 | #ifndef __ASSEMBLY__ |
4824 | |
4825 | @@ -241,18 +352,19 @@ struct mmu_update { |
4826 | uint64_t ptr; /* Machine address of PTE. */ |
4827 | uint64_t val; /* New contents of PTE. */ |
4828 | }; |
4829 | -DEFINE_GUEST_HANDLE_STRUCT(mmu_update); |
4830 | +typedef struct mmu_update mmu_update_t; |
4831 | +DEFINE_XEN_GUEST_HANDLE(mmu_update_t); |
4832 | |
4833 | /* |
4834 | * Send an array of these to HYPERVISOR_multicall(). |
4835 | * NB. The fields are natural register size for this architecture. |
4836 | */ |
4837 | struct multicall_entry { |
4838 | - unsigned long op; |
4839 | - long result; |
4840 | + unsigned long op, result; |
4841 | unsigned long args[6]; |
4842 | }; |
4843 | -DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); |
4844 | +typedef struct multicall_entry multicall_entry_t; |
4845 | +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); |
4846 | |
4847 | /* |
4848 | * Event channel endpoints per domain: |
4849 | @@ -261,175 +373,231 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent |
4850 | #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) |
4851 | |
4852 | struct vcpu_time_info { |
4853 | - /* |
4854 | - * Updates to the following values are preceded and followed |
4855 | - * by an increment of 'version'. The guest can therefore |
4856 | - * detect updates by looking for changes to 'version'. If the |
4857 | - * least-significant bit of the version number is set then an |
4858 | - * update is in progress and the guest must wait to read a |
4859 | - * consistent set of values. The correct way to interact with |
4860 | - * the version number is similar to Linux's seqlock: see the |
4861 | - * implementations of read_seqbegin/read_seqretry. |
4862 | - */ |
4863 | - uint32_t version; |
4864 | - uint32_t pad0; |
4865 | - uint64_t tsc_timestamp; /* TSC at last update of time vals. */ |
4866 | - uint64_t system_time; /* Time, in nanosecs, since boot. */ |
4867 | - /* |
4868 | - * Current system time: |
4869 | - * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul |
4870 | - * CPU frequency (Hz): |
4871 | - * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift |
4872 | - */ |
4873 | - uint32_t tsc_to_system_mul; |
4874 | - int8_t tsc_shift; |
4875 | - int8_t pad1[3]; |
4876 | + /* |
4877 | + * Updates to the following values are preceded and followed by an |
4878 | + * increment of 'version'. The guest can therefore detect updates by |
4879 | + * looking for changes to 'version'. If the least-significant bit of |
4880 | + * the version number is set then an update is in progress and the guest |
4881 | + * must wait to read a consistent set of values. |
4882 | + * The correct way to interact with the version number is similar to |
4883 | + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. |
4884 | + */ |
4885 | + uint32_t version; |
4886 | + uint32_t pad0; |
4887 | + uint64_t tsc_timestamp; /* TSC at last update of time vals. */ |
4888 | + uint64_t system_time; /* Time, in nanosecs, since boot. */ |
4889 | + /* |
4890 | + * Current system time: |
4891 | + * system_time + |
4892 | + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) |
4893 | + * CPU frequency (Hz): |
4894 | + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift |
4895 | + */ |
4896 | + uint32_t tsc_to_system_mul; |
4897 | + int8_t tsc_shift; |
4898 | + int8_t pad1[3]; |
4899 | }; /* 32 bytes */ |
4900 | +typedef struct vcpu_time_info vcpu_time_info_t; |
4901 | |
4902 | struct vcpu_info { |
4903 | - /* |
4904 | - * 'evtchn_upcall_pending' is written non-zero by Xen to indicate |
4905 | - * a pending notification for a particular VCPU. It is then cleared |
4906 | - * by the guest OS /before/ checking for pending work, thus avoiding |
4907 | - * a set-and-check race. Note that the mask is only accessed by Xen |
4908 | - * on the CPU that is currently hosting the VCPU. This means that the |
4909 | - * pending and mask flags can be updated by the guest without special |
4910 | - * synchronisation (i.e., no need for the x86 LOCK prefix). |
4911 | - * This may seem suboptimal because if the pending flag is set by |
4912 | - * a different CPU then an IPI may be scheduled even when the mask |
4913 | - * is set. However, note: |
4914 | - * 1. The task of 'interrupt holdoff' is covered by the per-event- |
4915 | - * channel mask bits. A 'noisy' event that is continually being |
4916 | - * triggered can be masked at source at this very precise |
4917 | - * granularity. |
4918 | - * 2. The main purpose of the per-VCPU mask is therefore to restrict |
4919 | - * reentrant execution: whether for concurrency control, or to |
4920 | - * prevent unbounded stack usage. Whatever the purpose, we expect |
4921 | - * that the mask will be asserted only for short periods at a time, |
4922 | - * and so the likelihood of a 'spurious' IPI is suitably small. |
4923 | - * The mask is read before making an event upcall to the guest: a |
4924 | - * non-zero mask therefore guarantees that the VCPU will not receive |
4925 | - * an upcall activation. The mask is cleared when the VCPU requests |
4926 | - * to block: this avoids wakeup-waiting races. |
4927 | - */ |
4928 | - uint8_t evtchn_upcall_pending; |
4929 | - uint8_t evtchn_upcall_mask; |
4930 | - unsigned long evtchn_pending_sel; |
4931 | - struct arch_vcpu_info arch; |
4932 | - struct vcpu_time_info time; |
4933 | + /* |
4934 | + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate |
4935 | + * a pending notification for a particular VCPU. It is then cleared |
4936 | + * by the guest OS /before/ checking for pending work, thus avoiding |
4937 | + * a set-and-check race. Note that the mask is only accessed by Xen |
4938 | + * on the CPU that is currently hosting the VCPU. This means that the |
4939 | + * pending and mask flags can be updated by the guest without special |
4940 | + * synchronisation (i.e., no need for the x86 LOCK prefix). |
4941 | + * This may seem suboptimal because if the pending flag is set by |
4942 | + * a different CPU then an IPI may be scheduled even when the mask |
4943 | + * is set. However, note: |
4944 | + * 1. The task of 'interrupt holdoff' is covered by the per-event- |
4945 | + * channel mask bits. A 'noisy' event that is continually being |
4946 | + * triggered can be masked at source at this very precise |
4947 | + * granularity. |
4948 | + * 2. The main purpose of the per-VCPU mask is therefore to restrict |
4949 | + * reentrant execution: whether for concurrency control, or to |
4950 | + * prevent unbounded stack usage. Whatever the purpose, we expect |
4951 | + * that the mask will be asserted only for short periods at a time, |
4952 | + * and so the likelihood of a 'spurious' IPI is suitably small. |
4953 | + * The mask is read before making an event upcall to the guest: a |
4954 | + * non-zero mask therefore guarantees that the VCPU will not receive |
4955 | + * an upcall activation. The mask is cleared when the VCPU requests |
4956 | + * to block: this avoids wakeup-waiting races. |
4957 | + */ |
4958 | + uint8_t evtchn_upcall_pending; |
4959 | + uint8_t evtchn_upcall_mask; |
4960 | + unsigned long evtchn_pending_sel; |
4961 | + struct arch_vcpu_info arch; |
4962 | + struct vcpu_time_info time; |
4963 | }; /* 64 bytes (x86) */ |
4964 | +#ifndef __XEN__ |
4965 | +typedef struct vcpu_info vcpu_info_t; |
4966 | +#endif |
4967 | |
4968 | /* |
4969 | * Xen/kernel shared data -- pointer provided in start_info. |
4970 | - * NB. We expect that this struct is smaller than a page. |
4971 | + * |
4972 | + * This structure is defined to be both smaller than a page, and the |
4973 | + * only data on the shared page, but may vary in actual size even within |
4974 | + * compatible Xen versions; guests should not rely on the size |
4975 | + * of this structure remaining constant. |
4976 | */ |
4977 | struct shared_info { |
4978 | - struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; |
4979 | + struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; |
4980 | |
4981 | - /* |
4982 | - * A domain can create "event channels" on which it can send and receive |
4983 | - * asynchronous event notifications. There are three classes of event that |
4984 | - * are delivered by this mechanism: |
4985 | - * 1. Bi-directional inter- and intra-domain connections. Domains must |
4986 | - * arrange out-of-band to set up a connection (usually by allocating |
4987 | - * an unbound 'listener' port and avertising that via a storage service |
4988 | - * such as xenstore). |
4989 | - * 2. Physical interrupts. A domain with suitable hardware-access |
4990 | - * privileges can bind an event-channel port to a physical interrupt |
4991 | - * source. |
4992 | - * 3. Virtual interrupts ('events'). A domain can bind an event-channel |
4993 | - * port to a virtual interrupt source, such as the virtual-timer |
4994 | - * device or the emergency console. |
4995 | - * |
4996 | - * Event channels are addressed by a "port index". Each channel is |
4997 | - * associated with two bits of information: |
4998 | - * 1. PENDING -- notifies the domain that there is a pending notification |
4999 | - * to be processed. This bit is cleared by the guest. |
5000 | - * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING |
5001 | - * will cause an asynchronous upcall to be scheduled. This bit is only |
5002 | - * updated by the guest. It is read-only within Xen. If a channel |
5003 | - * becomes pending while the channel is masked then the 'edge' is lost |
5004 | - * (i.e., when the channel is unmasked, the guest must manually handle |
5005 | - * pending notifications as no upcall will be scheduled by Xen). |
5006 | - * |
5007 | - * To expedite scanning of pending notifications, any 0->1 pending |
5008 | - * transition on an unmasked channel causes a corresponding bit in a |
5009 | - * per-vcpu selector word to be set. Each bit in the selector covers a |
5010 | - * 'C long' in the PENDING bitfield array. |
5011 | - */ |
5012 | - unsigned long evtchn_pending[sizeof(unsigned long) * 8]; |
5013 | - unsigned long evtchn_mask[sizeof(unsigned long) * 8]; |
5014 | - |
5015 | - /* |
5016 | - * Wallclock time: updated only by control software. Guests should base |
5017 | - * their gettimeofday() syscall on this wallclock-base value. |
5018 | - */ |
5019 | - uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ |
5020 | - uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ |
5021 | - uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ |
5022 | + /* |
5023 | + * A domain can create "event channels" on which it can send and receive |
5024 | + * asynchronous event notifications. There are three classes of event that |
5025 | + * are delivered by this mechanism: |
5026 | + * 1. Bi-directional inter- and intra-domain connections. Domains must |
5027 | + * arrange out-of-band to set up a connection (usually by allocating |
5028 | + * an unbound 'listener' port and avertising that via a storage service |
5029 | + * such as xenstore). |
5030 | + * 2. Physical interrupts. A domain with suitable hardware-access |
5031 | + * privileges can bind an event-channel port to a physical interrupt |
5032 | + * source. |
5033 | + * 3. Virtual interrupts ('events'). A domain can bind an event-channel |
5034 | + * port to a virtual interrupt source, such as the virtual-timer |
5035 | + * device or the emergency console. |
5036 | + * |
5037 | + * Event channels are addressed by a "port index". Each channel is |
5038 | + * associated with two bits of information: |
5039 | + * 1. PENDING -- notifies the domain that there is a pending notification |
5040 | + * to be processed. This bit is cleared by the guest. |
5041 | + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING |
5042 | + * will cause an asynchronous upcall to be scheduled. This bit is only |
5043 | + * updated by the guest. It is read-only within Xen. If a channel |
5044 | + * becomes pending while the channel is masked then the 'edge' is lost |
5045 | + * (i.e., when the channel is unmasked, the guest must manually handle |
5046 | + * pending notifications as no upcall will be scheduled by Xen). |
5047 | + * |
5048 | + * To expedite scanning of pending notifications, any 0->1 pending |
5049 | + * transition on an unmasked channel causes a corresponding bit in a |
5050 | + * per-vcpu selector word to be set. Each bit in the selector covers a |
5051 | + * 'C long' in the PENDING bitfield array. |
5052 | + */ |
5053 | + unsigned long evtchn_pending[sizeof(unsigned long) * 8]; |
5054 | + unsigned long evtchn_mask[sizeof(unsigned long) * 8]; |
5055 | + |
5056 | + /* |
5057 | + * Wallclock time: updated only by control software. Guests should base |
5058 | + * their gettimeofday() syscall on this wallclock-base value. |
5059 | + */ |
5060 | + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ |
5061 | + uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ |
5062 | + uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ |
5063 | |
5064 | - struct arch_shared_info arch; |
5065 | + struct arch_shared_info arch; |
5066 | |
5067 | }; |
5068 | +#ifndef __XEN__ |
5069 | +typedef struct shared_info shared_info_t; |
5070 | +#endif |
5071 | |
5072 | /* |
5073 | - * Start-of-day memory layout for the initial domain (DOM0): |
5074 | + * Start-of-day memory layout: |
5075 | * 1. The domain is started within contiguous virtual-memory region. |
5076 | - * 2. The contiguous region begins and ends on an aligned 4MB boundary. |
5077 | - * 3. The region start corresponds to the load address of the OS image. |
5078 | - * If the load address is not 4MB aligned then the address is rounded down. |
5079 | - * 4. This the order of bootstrap elements in the initial virtual region: |
5080 | + * 2. The contiguous region ends on an aligned 4MB boundary. |
5081 | + * 3. This the order of bootstrap elements in the initial virtual region: |
5082 | * a. relocated kernel image |
5083 | * b. initial ram disk [mod_start, mod_len] |
5084 | * c. list of allocated page frames [mfn_list, nr_pages] |
5085 | * d. start_info_t structure [register ESI (x86)] |
5086 | * e. bootstrap page tables [pt_base, CR3 (x86)] |
5087 | * f. bootstrap stack [register ESP (x86)] |
5088 | - * 5. Bootstrap elements are packed together, but each is 4kB-aligned. |
5089 | - * 6. The initial ram disk may be omitted. |
5090 | - * 7. The list of page frames forms a contiguous 'pseudo-physical' memory |
5091 | + * 4. Bootstrap elements are packed together, but each is 4kB-aligned. |
5092 | + * 5. The initial ram disk may be omitted. |
5093 | + * 6. The list of page frames forms a contiguous 'pseudo-physical' memory |
5094 | * layout for the domain. In particular, the bootstrap virtual-memory |
5095 | * region is a 1:1 mapping to the first section of the pseudo-physical map. |
5096 | - * 8. All bootstrap elements are mapped read-writable for the guest OS. The |
5097 | + * 7. All bootstrap elements are mapped read-writable for the guest OS. The |
5098 | * only exception is the bootstrap page table, which is mapped read-only. |
5099 | - * 9. There is guaranteed to be at least 512kB padding after the final |
5100 | + * 8. There is guaranteed to be at least 512kB padding after the final |
5101 | * bootstrap element. If necessary, the bootstrap virtual region is |
5102 | * extended by an extra 4MB to ensure this. |
5103 | */ |
5104 | |
5105 | #define MAX_GUEST_CMDLINE 1024 |
5106 | struct start_info { |
5107 | - /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ |
5108 | - char magic[32]; /* "xen-<version>-<platform>". */ |
5109 | - unsigned long nr_pages; /* Total pages allocated to this domain. */ |
5110 | - unsigned long shared_info; /* MACHINE address of shared info struct. */ |
5111 | - uint32_t flags; /* SIF_xxx flags. */ |
5112 | - unsigned long store_mfn; /* MACHINE page number of shared page. */ |
5113 | - uint32_t store_evtchn; /* Event channel for store communication. */ |
5114 | - union { |
5115 | - struct { |
5116 | - unsigned long mfn; /* MACHINE page number of console page. */ |
5117 | - uint32_t evtchn; /* Event channel for console page. */ |
5118 | - } domU; |
5119 | - struct { |
5120 | - uint32_t info_off; /* Offset of console_info struct. */ |
5121 | - uint32_t info_size; /* Size of console_info struct from start.*/ |
5122 | - } dom0; |
5123 | - } console; |
5124 | - /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ |
5125 | - unsigned long pt_base; /* VIRTUAL address of page directory. */ |
5126 | - unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ |
5127 | - unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ |
5128 | - unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ |
5129 | - unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ |
5130 | - int8_t cmd_line[MAX_GUEST_CMDLINE]; |
5131 | + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ |
5132 | + char magic[32]; /* "xen-<version>-<platform>". */ |
5133 | + unsigned long nr_pages; /* Total pages allocated to this domain. */ |
5134 | + unsigned long shared_info; /* MACHINE address of shared info struct. */ |
5135 | + uint32_t flags; /* SIF_xxx flags. */ |
5136 | + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ |
5137 | + uint32_t store_evtchn; /* Event channel for store communication. */ |
5138 | + union { |
5139 | + struct { |
5140 | + xen_pfn_t mfn; /* MACHINE page number of console page. */ |
5141 | + uint32_t evtchn; /* Event channel for console page. */ |
5142 | + } domU; |
5143 | + struct { |
5144 | + uint32_t info_off; /* Offset of console_info struct. */ |
5145 | + uint32_t info_size; /* Size of console_info struct from start.*/ |
5146 | + } dom0; |
5147 | + } console; |
5148 | + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ |
5149 | + unsigned long pt_base; /* VIRTUAL address of page directory. */ |
5150 | + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ |
5151 | + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ |
5152 | + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ |
5153 | + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ |
5154 | + int8_t cmd_line[MAX_GUEST_CMDLINE]; |
5155 | }; |
5156 | +typedef struct start_info start_info_t; |
5157 | + |
5158 | +/* New console union for dom0 introduced in 0x00030203. */ |
5159 | +#if __XEN_INTERFACE_VERSION__ < 0x00030203 |
5160 | +#define console_mfn console.domU.mfn |
5161 | +#define console_evtchn console.domU.evtchn |
5162 | +#endif |
5163 | |
5164 | /* These flags are passed in the 'flags' field of start_info_t. */ |
5165 | #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ |
5166 | #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ |
5167 | |
5168 | -typedef uint64_t cpumap_t; |
5169 | +typedef struct dom0_vga_console_info { |
5170 | + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ |
5171 | +#define XEN_VGATYPE_TEXT_MODE_3 0x03 |
5172 | +#define XEN_VGATYPE_VESA_LFB 0x23 |
5173 | + |
5174 | + union { |
5175 | + struct { |
5176 | + /* Font height, in pixels. */ |
5177 | + uint16_t font_height; |
5178 | + /* Cursor location (column, row). */ |
5179 | + uint16_t cursor_x, cursor_y; |
5180 | + /* Number of rows and columns (dimensions in characters). */ |
5181 | + uint16_t rows, columns; |
5182 | + } text_mode_3; |
5183 | + |
5184 | + struct { |
5185 | + /* Width and height, in pixels. */ |
5186 | + uint16_t width, height; |
5187 | + /* Bytes per scan line. */ |
5188 | + uint16_t bytes_per_line; |
5189 | + /* Bits per pixel. */ |
5190 | + uint16_t bits_per_pixel; |
5191 | + /* LFB physical address, and size (in units of 64kB). */ |
5192 | + uint32_t lfb_base; |
5193 | + uint32_t lfb_size; |
5194 | + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ |
5195 | + uint8_t red_pos, red_size; |
5196 | + uint8_t green_pos, green_size; |
5197 | + uint8_t blue_pos, blue_size; |
5198 | + uint8_t rsvd_pos, rsvd_size; |
5199 | +#if __XEN_INTERFACE_VERSION__ >= 0x00030206 |
5200 | + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ |
5201 | + uint32_t gbl_caps; |
5202 | + /* Mode attributes (offset 0x0, VESA command 0x4f01). */ |
5203 | + uint16_t mode_attrs; |
5204 | +#endif |
5205 | + } vesa_lfb; |
5206 | + } u; |
5207 | +} dom0_vga_console_info_t; |
5208 | +#define xen_vga_console_info dom0_vga_console_info |
5209 | +#define xen_vga_console_info_t dom0_vga_console_info_t |
5210 | |
5211 | typedef uint8_t xen_domain_handle_t[16]; |
5212 | |
5213 | @@ -437,6 +605,11 @@ typedef uint8_t xen_domain_handle_t[16]; |
5214 | #define __mk_unsigned_long(x) x ## UL |
5215 | #define mk_unsigned_long(x) __mk_unsigned_long(x) |
5216 | |
5217 | +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); |
5218 | +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); |
5219 | +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); |
5220 | +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); |
5221 | + |
5222 | #else /* __ASSEMBLY__ */ |
5223 | |
5224 | /* In assembly code we cannot use C numeric constant suffixes. */ |
5225 | @@ -444,4 +617,24 @@ typedef uint8_t xen_domain_handle_t[16]; |
5226 | |
5227 | #endif /* !__ASSEMBLY__ */ |
5228 | |
5229 | +/* Default definitions for macros used by domctl/sysctl. */ |
5230 | +#if defined(__XEN__) || defined(__XEN_TOOLS__) |
5231 | +#ifndef uint64_aligned_t |
5232 | +#define uint64_aligned_t uint64_t |
5233 | +#endif |
5234 | +#ifndef XEN_GUEST_HANDLE_64 |
5235 | +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) |
5236 | +#endif |
5237 | +#endif |
5238 | + |
5239 | #endif /* __XEN_PUBLIC_XEN_H__ */ |
5240 | + |
5241 | +/* |
5242 | + * Local variables: |
5243 | + * mode: C |
5244 | + * c-set-style: "BSD" |
5245 | + * c-basic-offset: 4 |
5246 | + * tab-width: 4 |
5247 | + * indent-tabs-mode: nil |
5248 | + * End: |
5249 | + */ |
5250 | Index: head-2008-04-15/include/xen/xenbus.h |
5251 | =================================================================== |
5252 | --- head-2008-04-15.orig/include/xen/xenbus.h 2008-04-15 09:41:09.000000000 +0200 |
5253 | +++ head-2008-04-15/include/xen/xenbus.h 2008-04-15 09:59:33.000000000 +0200 |
5254 | @@ -5,23 +5,23 @@ |
5255 | * |
5256 | * Copyright (C) 2005 Rusty Russell, IBM Corporation |
5257 | * Copyright (C) 2005 XenSource Ltd. |
5258 | - * |
5259 | + * |
5260 | * This program is free software; you can redistribute it and/or |
5261 | * modify it under the terms of the GNU General Public License version 2 |
5262 | * as published by the Free Software Foundation; or, when distributed |
5263 | * separately from the Linux kernel or incorporated into other |
5264 | * software packages, subject to the following license: |
5265 | - * |
5266 | + * |
5267 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5268 | * of this source file (the "Software"), to deal in the Software without |
5269 | * restriction, including without limitation the rights to use, copy, modify, |
5270 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
5271 | * and to permit persons to whom the Software is furnished to do so, subject to |
5272 | * the following conditions: |
5273 | - * |
5274 | + * |
5275 | * The above copyright notice and this permission notice shall be included in |
5276 | * all copies or substantial portions of the Software. |
5277 | - * |
5278 | + * |
5279 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
5280 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
5281 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
5282 | @@ -39,6 +39,7 @@ |
5283 | #include <linux/mutex.h> |
5284 | #include <linux/completion.h> |
5285 | #include <linux/init.h> |
5286 | +#include <linux/err.h> |
5287 | #include <xen/interface/xen.h> |
5288 | #include <xen/interface/grant_table.h> |
5289 | #include <xen/interface/io/xenbus.h> |
5290 | @@ -55,8 +56,17 @@ struct xenbus_watch |
5291 | /* Callback (executed in a process context with no locks held). */ |
5292 | void (*callback)(struct xenbus_watch *, |
5293 | const char **vec, unsigned int len); |
5294 | + |
5295 | + /* See XBWF_ definitions below. */ |
5296 | + unsigned long flags; |
5297 | }; |
5298 | |
5299 | +/* |
5300 | + * Execute callback in its own kthread. Useful if the callback is long |
5301 | + * running or heavily serialised, to avoid taking out the main xenwatch thread |
5302 | + * for a long period of time (or even unwittingly causing a deadlock). |
5303 | + */ |
5304 | +#define XBWF_new_thread 1 |
5305 | |
5306 | /* A xenbus device. */ |
5307 | struct xenbus_device { |
5308 | @@ -97,6 +107,7 @@ struct xenbus_driver { |
5309 | int (*uevent)(struct xenbus_device *, char **, int, char *, int); |
5310 | struct device_driver driver; |
5311 | int (*read_otherend_details)(struct xenbus_device *dev); |
5312 | + int (*is_ready)(struct xenbus_device *dev); |
5313 | }; |
5314 | |
5315 | static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) |
5316 | @@ -104,27 +115,8 @@ static inline struct xenbus_driver *to_x |
5317 | return container_of(drv, struct xenbus_driver, driver); |
5318 | } |
5319 | |
5320 | -int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, |
5321 | - struct module *owner, |
5322 | - const char *mod_name); |
5323 | - |
5324 | -static inline int __must_check |
5325 | -xenbus_register_frontend(struct xenbus_driver *drv) |
5326 | -{ |
5327 | - WARN_ON(drv->owner != THIS_MODULE); |
5328 | - return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); |
5329 | -} |
5330 | - |
5331 | -int __must_check __xenbus_register_backend(struct xenbus_driver *drv, |
5332 | - struct module *owner, |
5333 | - const char *mod_name); |
5334 | -static inline int __must_check |
5335 | -xenbus_register_backend(struct xenbus_driver *drv) |
5336 | -{ |
5337 | - WARN_ON(drv->owner != THIS_MODULE); |
5338 | - return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); |
5339 | -} |
5340 | - |
5341 | +int xenbus_register_frontend(struct xenbus_driver *drv); |
5342 | +int xenbus_register_backend(struct xenbus_driver *drv); |
5343 | void xenbus_unregister_driver(struct xenbus_driver *drv); |
5344 | |
5345 | struct xenbus_transaction |
5346 | @@ -135,8 +127,6 @@ struct xenbus_transaction |
5347 | /* Nil transaction ID. */ |
5348 | #define XBT_NIL ((struct xenbus_transaction) { 0 }) |
5349 | |
5350 | -int __init xenbus_dev_init(void); |
5351 | - |
5352 | char **xenbus_directory(struct xenbus_transaction t, |
5353 | const char *dir, const char *node, unsigned int *num); |
5354 | void *xenbus_read(struct xenbus_transaction t, |
5355 | @@ -166,7 +156,6 @@ int xenbus_printf(struct xenbus_transact |
5356 | int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); |
5357 | |
5358 | /* notifer routines for when the xenstore comes up */ |
5359 | -extern int xenstored_ready; |
5360 | int register_xenstore_notifier(struct notifier_block *nb); |
5361 | void unregister_xenstore_notifier(struct notifier_block *nb); |
5362 | |
5363 | @@ -179,12 +168,9 @@ void xs_suspend_cancel(void); |
5364 | /* Used by xenbus_dev to borrow kernel's store connection. */ |
5365 | void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); |
5366 | |
5367 | -struct work_struct; |
5368 | - |
5369 | /* Prepare for domain suspend: then resume or cancel the suspend. */ |
5370 | void xenbus_suspend(void); |
5371 | void xenbus_resume(void); |
5372 | -void xenbus_probe(struct work_struct *); |
5373 | void xenbus_suspend_cancel(void); |
5374 | |
5375 | #define XENBUS_IS_ERR_READ(str) ({ \ |
5376 | @@ -197,38 +183,125 @@ void xenbus_suspend_cancel(void); |
5377 | |
5378 | #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) |
5379 | |
5380 | + |
5381 | +/** |
5382 | + * Register a watch on the given path, using the given xenbus_watch structure |
5383 | + * for storage, and the given callback function as the callback. Return 0 on |
5384 | + * success, or -errno on error. On success, the given path will be saved as |
5385 | + * watch->node, and remains the caller's to free. On error, watch->node will |
5386 | + * be NULL, the device will switch to XenbusStateClosing, and the error will |
5387 | + * be saved in the store. |
5388 | + */ |
5389 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, |
5390 | struct xenbus_watch *watch, |
5391 | void (*callback)(struct xenbus_watch *, |
5392 | const char **, unsigned int)); |
5393 | -int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, |
5394 | - void (*callback)(struct xenbus_watch *, |
5395 | - const char **, unsigned int), |
5396 | - const char *pathfmt, ...) |
5397 | - __attribute__ ((format (printf, 4, 5))); |
5398 | |
5399 | + |
5400 | +/** |
5401 | + * Register a watch on the given path/path2, using the given xenbus_watch |
5402 | + * structure for storage, and the given callback function as the callback. |
5403 | + * Return 0 on success, or -errno on error. On success, the watched path |
5404 | + * (path/path2) will be saved as watch->node, and becomes the caller's to |
5405 | + * kfree(). On error, watch->node will be NULL, so the caller has nothing to |
5406 | + * free, the device will switch to XenbusStateClosing, and the error will be |
5407 | + * saved in the store. |
5408 | + */ |
5409 | +int xenbus_watch_path2(struct xenbus_device *dev, const char *path, |
5410 | + const char *path2, struct xenbus_watch *watch, |
5411 | + void (*callback)(struct xenbus_watch *, |
5412 | + const char **, unsigned int)); |
5413 | + |
5414 | + |
5415 | +/** |
5416 | + * Advertise in the store a change of the given driver to the given new_state. |
5417 | + * Return 0 on success, or -errno on error. On error, the device will switch |
5418 | + * to XenbusStateClosing, and the error will be saved in the store. |
5419 | + */ |
5420 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); |
5421 | + |
5422 | + |
5423 | +/** |
5424 | + * Grant access to the given ring_mfn to the peer of the given device. Return |
5425 | + * 0 on success, or -errno on error. On error, the device will switch to |
5426 | + * XenbusStateClosing, and the error will be saved in the store. |
5427 | + */ |
5428 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); |
5429 | -int xenbus_map_ring_valloc(struct xenbus_device *dev, |
5430 | - int gnt_ref, void **vaddr); |
5431 | + |
5432 | + |
5433 | +/** |
5434 | + * Map a page of memory into this domain from another domain's grant table. |
5435 | + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the |
5436 | + * page to that address, and sets *vaddr to that address. |
5437 | + * xenbus_map_ring does not allocate the virtual address space (you must do |
5438 | + * this yourself!). It only maps in the page to the specified address. |
5439 | + * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
5440 | + * or -ENOMEM on error. If an error is returned, device will switch to |
5441 | + * XenbusStateClosing and the error message will be saved in XenStore. |
5442 | + */ |
5443 | +struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, |
5444 | + int gnt_ref); |
5445 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, |
5446 | grant_handle_t *handle, void *vaddr); |
5447 | |
5448 | -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); |
5449 | + |
5450 | +/** |
5451 | + * Unmap a page of memory in this domain that was imported from another domain. |
5452 | + * Use xenbus_unmap_ring_vfree if you mapped in your memory with |
5453 | + * xenbus_map_ring_valloc (it will free the virtual address space). |
5454 | + * Returns 0 on success and returns GNTST_* on error |
5455 | + * (see xen/include/interface/grant_table.h). |
5456 | + */ |
5457 | +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); |
5458 | int xenbus_unmap_ring(struct xenbus_device *dev, |
5459 | grant_handle_t handle, void *vaddr); |
5460 | |
5461 | + |
5462 | +/** |
5463 | + * Allocate an event channel for the given xenbus_device, assigning the newly |
5464 | + * created local port to *port. Return 0 on success, or -errno on error. On |
5465 | + * error, the device will switch to XenbusStateClosing, and the error will be |
5466 | + * saved in the store. |
5467 | + */ |
5468 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); |
5469 | -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port); |
5470 | + |
5471 | + |
5472 | +/** |
5473 | + * Free an existing event channel. Returns 0 on success or -errno on error. |
5474 | + */ |
5475 | int xenbus_free_evtchn(struct xenbus_device *dev, int port); |
5476 | |
5477 | + |
5478 | +/** |
5479 | + * Return the state of the driver rooted at the given store path, or |
5480 | + * XenbusStateUnknown if no state can be read. |
5481 | + */ |
5482 | enum xenbus_state xenbus_read_driver_state(const char *path); |
5483 | |
5484 | -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); |
5485 | -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); |
5486 | + |
5487 | +/*** |
5488 | + * Report the given negative errno into the store, along with the given |
5489 | + * formatted message. |
5490 | + */ |
5491 | +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, |
5492 | + ...); |
5493 | + |
5494 | + |
5495 | +/*** |
5496 | + * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by |
5497 | + * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly |
5498 | + * closedown of this driver and its peer. |
5499 | + */ |
5500 | +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, |
5501 | + ...); |
5502 | + |
5503 | +int xenbus_dev_init(void); |
5504 | |
5505 | const char *xenbus_strstate(enum xenbus_state state); |
5506 | int xenbus_dev_is_online(struct xenbus_device *dev); |
5507 | int xenbus_frontend_closed(struct xenbus_device *dev); |
5508 | |
5509 | +int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); |
5510 | +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); |
5511 | + |
5512 | #endif /* _XEN_XENBUS_H */ |