summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel P. Berrange <berrange@redhat.com>2013-11-21 16:34:33 +0000
committerDaniel P. Berrange <berrange@redhat.com>2013-11-21 16:34:33 +0000
commit3a6a9f5b3b49577e0ce8fc0c5b1abd04d2b04217 (patch)
treeb6fc8ec98781bbfcac60805a2ab0bcddb2b74407
parent4b6944d3c0ff43683fc310da061fac6e05c57b66 (diff)
downloadlibvirt-python-v7-3a6a9f5b3b49577e0ce8fc0c5b1abd04d2b04217.tar.gz
libvirt-python-v7-3a6a9f5b3b49577e0ce8fc0c5b1abd04d2b04217.tar.xz
libvirt-python-v7-3a6a9f5b3b49577e0ce8fc0c5b1abd04d2b04217.zip
Move python example programs into python/examples/ subdirectory
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
-rw-r--r--examples/Makefile.am21
-rw-r--r--examples/README33
-rw-r--r--examples/consolecallback.py88
-rwxr-xr-xexamples/dominfo.py80
-rwxr-xr-xexamples/domrestore.py36
-rwxr-xr-xexamples/domsave.py40
-rwxr-xr-xexamples/domstart.py50
-rwxr-xr-xexamples/esxlist.py155
-rw-r--r--examples/event-test.py591
-rwxr-xr-xexamples/topology.py45
10 files changed, 1139 insertions, 0 deletions
diff --git a/examples/Makefile.am b/examples/Makefile.am
new file mode 100644
index 0000000..7823c20
--- /dev/null
+++ b/examples/Makefile.am
@@ -0,0 +1,21 @@
+## Copyright (C) 2005-2013 Red Hat, Inc.
+##
+## This library is free software; you can redistribute it and/or
+## modify it under the terms of the GNU Lesser General Public
+## License as published by the Free Software Foundation; either
+## version 2.1 of the License, or (at your option) any later version.
+##
+## This library is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## Lesser General Public License for more details.
+##
+## You should have received a copy of the GNU Lesser General Public
+## License along with this library. If not, see
+## <http://www.gnu.org/licenses/>.
+
+EXTRA_DIST= \
+ README \
+ consolecallback.py \
+ topology.py \
+ dominfo.py domrestore.py domsave.py domstart.py esxlist.py
diff --git a/examples/README b/examples/README
new file mode 100644
index 0000000..f4db76c
--- /dev/null
+++ b/examples/README
@@ -0,0 +1,33 @@
+Some simple examples on how to use the Python API for libvirt
+
+The examples are:
+
+dominfo.py - print information about a running domU based on the results of
+ virDomainGetInfo and virDomainGetXMLDesc
+domstart.py - create a domU from an XML description if the domU isn't
+ running yet
+domsave.py - save all running domU's into a directory
+domrestore.py - restore domU's from their saved files in a directory
+esxlist.py - list active domains of an VMware ESX host and print some info.
+ also demonstrates how to use the libvirt.openAuth() method
+
+The XML files in this directory are examples of the XML format that libvirt
+expects, and will have to be adapted for your setup. They are only needed
+for domstart.py
+
+
+Some additional notes for the esxlist.py example:
+
+You may see remote errors complaining about missing certificates:
+
+ Cannot access CA certificate '/usr/local/etc/pki/CA/cacert.pem': No such file
+ or directory
+
+This is expected, libvirt tries to find network and storage drivers for ESX,
+but those are not implemented yet (November 2009). While searching for this
+drivers, libvirt may try to start a local libvirtd instance, but fails because
+of the missing certificates. It'll warn about that:
+
+ Failed to find the network: Is the daemon running?
+
+This is also expected and can be ignored.
diff --git a/examples/consolecallback.py b/examples/consolecallback.py
new file mode 100644
index 0000000..d8e33a9
--- /dev/null
+++ b/examples/consolecallback.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+# consolecallback - provide a persistent console that survives guest reboots
+
+import sys, os, logging, libvirt, tty, termios, atexit
+
+def reset_term():
+ termios.tcsetattr(0, termios.TCSADRAIN, attrs)
+
+def error_handler(unused, error):
+ # The console stream errors on VM shutdown; we don't care
+ if (error[0] == libvirt.VIR_ERR_RPC and
+ error[1] == libvirt.VIR_FROM_STREAMS):
+ return
+ logging.warn(error)
+
+class Console(object):
+ def __init__(self, uri, uuid):
+ self.uri = uri
+ self.uuid = uuid
+ self.connection = libvirt.open(uri)
+ self.domain = self.connection.lookupByUUIDString(uuid)
+ self.state = self.domain.state(0)
+ self.connection.domainEventRegister(lifecycle_callback, self)
+ self.stream = None
+ self.run_console = True
+ logging.info("%s initial state %d, reason %d",
+ self.uuid, self.state[0], self.state[1])
+
+def check_console(console):
+ if (console.state[0] == libvirt.VIR_DOMAIN_RUNNING or
+ console.state[0] == libvirt.VIR_DOMAIN_PAUSED):
+ if console.stream is None:
+ console.stream = console.connection.newStream(libvirt.VIR_STREAM_NONBLOCK)
+ console.domain.openConsole(None, console.stream, 0)
+ console.stream.eventAddCallback(libvirt.VIR_STREAM_EVENT_READABLE, stream_callback, console)
+ else:
+ if console.stream:
+ console.stream.eventRemoveCallback()
+ console.stream = None
+
+ return console.run_console
+
+def stdin_callback(watch, fd, events, console):
+ readbuf = os.read(fd, 1024)
+ if readbuf.startswith(""):
+ console.run_console = False
+ return
+ if console.stream:
+ console.stream.send(readbuf)
+
+def stream_callback(stream, events, console):
+ try:
+ received_data = console.stream.recv(1024)
+ except:
+ return
+ os.write(0, received_data)
+
+def lifecycle_callback (connection, domain, event, detail, console):
+ console.state = console.domain.state(0)
+ logging.info("%s transitioned to state %d, reason %d",
+ console.uuid, console.state[0], console.state[1])
+
+# main
+if len(sys.argv) != 3:
+ print "Usage:", sys.argv[0], "URI UUID"
+ print "for example:", sys.argv[0], "'qemu:///system' '32ad945f-7e78-c33a-e96d-39f25e025d81'"
+ sys.exit(1)
+
+uri = sys.argv[1]
+uuid = sys.argv[2]
+
+print "Escape character is ^]"
+logging.basicConfig(filename='msg.log', level=logging.DEBUG)
+logging.info("URI: %s", uri)
+logging.info("UUID: %s", uuid)
+
+libvirt.virEventRegisterDefaultImpl()
+libvirt.registerErrorHandler(error_handler, None)
+
+atexit.register(reset_term)
+attrs = termios.tcgetattr(0)
+tty.setraw(0)
+
+console = Console(uri, uuid)
+console.stdin_watch = libvirt.virEventAddHandle(0, libvirt.VIR_EVENT_HANDLE_READABLE, stdin_callback, console)
+
+while check_console(console):
+ libvirt.virEventRunDefaultImpl()
diff --git a/examples/dominfo.py b/examples/dominfo.py
new file mode 100755
index 0000000..bfa3ca3
--- /dev/null
+++ b/examples/dominfo.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# dominfo - print some information about a domain
+
+import libvirt
+import sys
+import os
+import libxml2
+import pdb
+
+def usage():
+ print 'Usage: %s DOMAIN' % sys.argv[0]
+ print ' Print information about the domain DOMAIN'
+
+def print_section(title):
+ print "\n%s" % title
+ print "=" * 60
+
+def print_entry(key, value):
+ print "%-10s %-10s" % (key, value)
+
+def print_xml(key, ctx, path):
+ res = ctx.xpathEval(path)
+ if res is None or len(res) == 0:
+ value="Unknown"
+ else:
+ value = res[0].content
+ print_entry(key, value)
+ return value
+
+if len(sys.argv) != 2:
+ usage()
+ sys.exit(2)
+
+name = sys.argv[1]
+
+# Connect to libvirt
+conn = libvirt.openReadOnly(None)
+if conn is None:
+ print 'Failed to open connection to the hypervisor'
+ sys.exit(1)
+
+try:
+ dom = conn.lookupByName(name)
+ # Annoyiingly, libvirt prints its own error message here
+except libvirt.libvirtError:
+ print "Domain %s is not running" % name
+ sys.exit(0)
+
+info = dom.info()
+print_section("Domain info")
+print_entry("State:", info[0])
+print_entry("MaxMem:", info[1])
+print_entry("UsedMem:", info[2])
+print_entry("VCPUs:", info[3])
+
+# Read some info from the XML desc
+xmldesc = dom.XMLDesc(0)
+doc = libxml2.parseDoc(xmldesc)
+ctx = doc.xpathNewContext()
+print_section("Kernel")
+print_xml("Type:", ctx, "/domain/os/type")
+print_xml("Kernel:", ctx, "/domain/os/kernel")
+print_xml("initrd:", ctx, "/domain/os/initrd")
+print_xml("cmdline:", ctx, "/domain/os/cmdline")
+
+print_section("Devices")
+devs = ctx.xpathEval("/domain/devices/*")
+for d in devs:
+ ctx.setContextNode(d)
+ #pdb.set_trace()
+ type = print_xml("Type:", ctx, "@type")
+ if type == "file":
+ print_xml("Source:", ctx, "source/@file")
+ print_xml("Target:", ctx, "target/@dev")
+ elif type == "block":
+ print_xml("Source:", ctx, "source/@dev")
+ print_xml("Target:", ctx, "target/@dev")
+ elif type == "bridge":
+ print_xml("Source:", ctx, "source/@bridge")
+ print_xml("MAC Addr:", ctx, "mac/@address")
diff --git a/examples/domrestore.py b/examples/domrestore.py
new file mode 100755
index 0000000..fffc90f
--- /dev/null
+++ b/examples/domrestore.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# domstart - make sure a given domU is running, if not start it
+
+import libvirt
+import sys
+import os
+import libxml2
+import pdb
+
+def usage():
+ print 'Usage: %s DIR' % sys.argv[0]
+ print ' Restore all the domains contained in DIR'
+ print ' It is assumed that all files in DIR are'
+ print ' images of domU\'s previously created with save'
+
+if len(sys.argv) != 2:
+ usage()
+ sys.exit(2)
+
+dir = sys.argv[1]
+imgs = os.listdir(dir)
+
+conn = libvirt.open(None)
+if conn is None:
+ print 'Failed to open connection to the hypervisor'
+ sys.exit(1)
+
+for img in imgs:
+ file = os.path.join(dir, img)
+ print "Restoring %s ... " % img,
+ sys.stdout.flush()
+ ret = conn.restore(file)
+ if ret == 0:
+ print "done"
+ else:
+ print "error %d" % ret
diff --git a/examples/domsave.py b/examples/domsave.py
new file mode 100755
index 0000000..bac4536
--- /dev/null
+++ b/examples/domsave.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# domstart - make sure a given domU is running, if not start it
+
+import libvirt
+import sys
+import os
+import libxml2
+import pdb
+
+def usage():
+ print 'Usage: %s DIR' % sys.argv[0]
+ print ' Save all currently running domU\'s into DIR'
+ print ' DIR must exist and be writable by this process'
+
+if len(sys.argv) != 2:
+ usage()
+ sys.exit(2)
+
+dir = sys.argv[1]
+
+conn = libvirt.open(None)
+if conn is None:
+ print 'Failed to open connection to the hypervisor'
+ sys.exit(1)
+
+doms = conn.listDomainsID()
+for id in doms:
+ if id == 0:
+ continue
+ dom = conn.lookupByID(id)
+ print "Saving %s[%d] ... " % (dom.name(), id),
+ sys.stdout.flush()
+ path = os.path.join(dir, dom.name())
+ ret = dom.save(path)
+ if ret == 0:
+ print "done"
+ else:
+ print "error %d" % ret
+
+#pdb.set_trace()
diff --git a/examples/domstart.py b/examples/domstart.py
new file mode 100755
index 0000000..b14fad1
--- /dev/null
+++ b/examples/domstart.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# domstart - make sure a given domU is running, if not start it
+
+import libvirt
+import sys
+import os
+import libxml2
+import pdb
+
+# Parse the XML description of domU from FNAME
+# and return a tuple (name, xmldesc) where NAME
+# is the name of the domain, and xmldesc is the contetn of FNAME
+def read_domain(fname):
+ fp = open(fname, "r")
+ xmldesc = fp.read()
+ fp.close()
+
+ doc = libxml2.parseDoc(xmldesc)
+ name = doc.xpathNewContext().xpathEval("/domain/name")[0].content
+ return (name, xmldesc)
+
+def usage():
+ print 'Usage: %s domain.xml' % sys.argv[0]
+ print ' Check that the domain described by DOMAIN.XML is running'
+ print ' If the domain is not running, create it'
+ print ' DOMAIN.XML must be a XML description of the domain'
+ print ' in libvirt\'s XML format'
+
+if len(sys.argv) != 2:
+ usage()
+ sys.exit(2)
+
+(name, xmldesc) = read_domain(sys.argv[1])
+
+conn = libvirt.open(None)
+if conn is None:
+ print 'Failed to open connection to the hypervisor'
+ sys.exit(1)
+
+try:
+ dom = conn.lookupByName(name)
+except libvirt.libvirtError:
+ print "Starting domain %s ... " % name,
+ sys.stdout.flush()
+ dom = conn.createLinux(xmldesc, 0)
+ if dom is None:
+ print "failed"
+ sys.exit(1)
+ else:
+ print "done"
diff --git a/examples/esxlist.py b/examples/esxlist.py
new file mode 100755
index 0000000..c55424f
--- /dev/null
+++ b/examples/esxlist.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+# esxlist - list active domains of an ESX host and print some info.
+# also demonstrates how to use the libvirt.openAuth() method
+
+import libvirt
+import sys
+import os
+import libxml2
+import getpass
+
+
+def usage():
+ print "Usage: %s HOSTNAME" % sys.argv[0]
+ print " List active domains of HOSTNAME and print some info"
+
+
+# This is the callback method passed to libvirt.openAuth() (see below).
+#
+# The credentials argument is a list of credentials that libvirt (actually
+# the ESX driver) would like to request. An element of this list is itself a
+# list containing 5 items (4 inputs, 1 output):
+# - the credential type, e.g. libvirt.VIR_CRED_AUTHNAME
+# - a prompt to be displayed to the user
+# - a challenge, the ESX driver sets this to the hostname to allow automatic
+# distinction between requests for ESX and vCenter credentials
+# - a default result for the request
+# - a place to store the actual result for the request
+#
+# The user_data argument is the user data item of the auth argument (see below)
+# passed to libvirt.openAuth().
+def request_credentials(credentials, user_data):
+ for credential in credentials:
+ if credential[0] == libvirt.VIR_CRED_AUTHNAME:
+ # prompt the user to input a authname. display the provided message
+ credential[4] = raw_input(credential[1] + ": ")
+
+ # if the user just hits enter raw_input() returns an empty string.
+ # in this case return the default result through the last item of
+ # the list
+ if len(credential[4]) == 0:
+ credential[4] = credential[3]
+ elif credential[0] == libvirt.VIR_CRED_NOECHOPROMPT:
+ # use the getpass module to prompt the user to input a password.
+ # display the provided message and return the result through the
+ # last item of the list
+ credential[4] = getpass.getpass(credential[1] + ": ")
+ else:
+ return -1
+
+ return 0
+
+
+def print_section(title):
+ print "\n%s" % title
+ print "=" * 60
+
+
+def print_entry(key, value):
+ print "%-10s %-10s" % (key, value)
+
+
+def print_xml(key, ctx, path):
+ res = ctx.xpathEval(path)
+
+ if res is None or len(res) == 0:
+ value = "Unknown"
+ else:
+ value = res[0].content
+
+ print_entry(key, value)
+
+ return value
+
+
+if len(sys.argv) != 2:
+ usage()
+ sys.exit(2)
+
+
+hostname = sys.argv[1]
+
+# Connect to libvirt
+uri = "esx://%s/?no_verify=1" % hostname
+
+# The auth argument is a list that contains 3 items:
+# - a list of supported credential types
+# - a callable that takes 2 arguments
+# - user data that will be passed to the callable as second argument
+#
+# In this example the supported credential types are VIR_CRED_AUTHNAME and
+# VIR_CRED_NOECHOPROMPT, the callable is the unbound method request_credentials
+# (see above) and the user data is None.
+#
+# libvirt (actually the ESX driver) will call the callable to request
+# credentials in order to log into the ESX host. The callable would also be
+# called if the connection URI would reference a vCenter to request credentials
+# in order to log into the vCenter
+auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT],
+ request_credentials, None]
+conn = libvirt.openAuth(uri, auth, 0)
+
+if conn is None:
+ print "Failed to open connection to %s" % hostname
+ sys.exit(1)
+
+state_names = { libvirt.VIR_DOMAIN_RUNNING : "running",
+ libvirt.VIR_DOMAIN_BLOCKED : "idle",
+ libvirt.VIR_DOMAIN_PAUSED : "paused",
+ libvirt.VIR_DOMAIN_SHUTDOWN : "in shutdown",
+ libvirt.VIR_DOMAIN_SHUTOFF : "shut off",
+ libvirt.VIR_DOMAIN_CRASHED : "crashed",
+ libvirt.VIR_DOMAIN_NOSTATE : "no state" }
+
+for id in conn.listDomainsID():
+ domain = conn.lookupByID(id)
+ info = domain.info()
+
+ print_section("Domain " + domain.name())
+ print_entry("ID:", id)
+ print_entry("UUID:", domain.UUIDString())
+ print_entry("State:", state_names[info[0]])
+ print_entry("MaxMem:", info[1])
+ print_entry("UsedMem:", info[2])
+ print_entry("VCPUs:", info[3])
+
+ # Read some info from the XML desc
+ print_section("Devices of " + domain.name())
+
+ xmldesc = domain.XMLDesc(0)
+ doc = libxml2.parseDoc(xmldesc)
+ ctx = doc.xpathNewContext()
+ devs = ctx.xpathEval("/domain/devices/*")
+ first = True
+
+ for d in devs:
+ ctx.setContextNode(d)
+
+ if not first:
+ print "------------------------------------------------------------"
+ else:
+ first = False
+
+ print_entry("Device", d.name)
+
+ type = print_xml("Type:", ctx, "@type")
+
+ if type == "file":
+ print_xml("Source:", ctx, "source/@file")
+ print_xml("Target:", ctx, "target/@dev")
+ elif type == "block":
+ print_xml("Source:", ctx, "source/@dev")
+ print_xml("Target:", ctx, "target/@dev")
+ elif type == "bridge":
+ print_xml("Source:", ctx, "source/@bridge")
+ print_xml("MAC Addr:", ctx, "mac/@address")
diff --git a/examples/event-test.py b/examples/event-test.py
new file mode 100644
index 0000000..84f5259
--- /dev/null
+++ b/examples/event-test.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python -u
+#
+#
+#
+#################################################################################
+# Start off by implementing a general purpose event loop for anyones use
+#################################################################################
+
+import sys
+import getopt
+import os
+import libvirt
+import select
+import errno
+import time
+import threading
+
+# For the sake of demonstration, this example program includes
+# an implementation of a pure python event loop. Most applications
+# would be better off just using the default libvirt event loop
+# APIs, instead of implementing this in python. The exception is
+# where an application wants to integrate with an existing 3rd
+# party event loop impl
+#
+# Change this to 'False' to make the demo use the native
+# libvirt event loop impl
+use_pure_python_event_loop = True
+
+do_debug = False
+def debug(msg):
+ global do_debug
+ if do_debug:
+ print msg
+
+#
+# This general purpose event loop will support waiting for file handle
+# I/O and errors events, as well as scheduling repeatable timers with
+# a fixed interval.
+#
+# It is a pure python implementation based around the poll() API
+#
+class virEventLoopPure:
+ # This class contains the data we need to track for a
+ # single file handle
+ class virEventLoopPureHandle:
+ def __init__(self, handle, fd, events, cb, opaque):
+ self.handle = handle
+ self.fd = fd
+ self.events = events
+ self.cb = cb
+ self.opaque = opaque
+
+ def get_id(self):
+ return self.handle
+
+ def get_fd(self):
+ return self.fd
+
+ def get_events(self):
+ return self.events
+
+ def set_events(self, events):
+ self.events = events
+
+ def dispatch(self, events):
+ self.cb(self.handle,
+ self.fd,
+ events,
+ self.opaque)
+
+ # This class contains the data we need to track for a
+ # single periodic timer
+ class virEventLoopPureTimer:
+ def __init__(self, timer, interval, cb, opaque):
+ self.timer = timer
+ self.interval = interval
+ self.cb = cb
+ self.opaque = opaque
+ self.lastfired = 0
+
+ def get_id(self):
+ return self.timer
+
+ def get_interval(self):
+ return self.interval
+
+ def set_interval(self, interval):
+ self.interval = interval
+
+ def get_last_fired(self):
+ return self.lastfired
+
+ def set_last_fired(self, now):
+ self.lastfired = now
+
+ def dispatch(self):
+ self.cb(self.timer,
+ self.opaque)
+
+
+ def __init__(self):
+ self.poll = select.poll()
+ self.pipetrick = os.pipe()
+ self.pendingWakeup = False
+ self.runningPoll = False
+ self.nextHandleID = 1
+ self.nextTimerID = 1
+ self.handles = []
+ self.timers = []
+ self.quit = False
+
+ # The event loop can be used from multiple threads at once.
+ # Specifically while the main thread is sleeping in poll()
+ # waiting for events to occur, another thread may come along
+ # and add/update/remove a file handle, or timer. When this
+ # happens we need to interrupt the poll() sleep in the other
+ # thread, so that it'll see the file handle / timer changes.
+ #
+ # Using OS level signals for this is very unreliable and
+ # hard to implement correctly. Thus we use the real classic
+ # "self pipe" trick. A anonymous pipe, with one end registered
+ # with the event loop for input events. When we need to force
+ # the main thread out of a poll() sleep, we simple write a
+ # single byte of data to the other end of the pipe.
+ debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
+ self.poll.register(self.pipetrick[0], select.POLLIN)
+
+
+ # Calculate when the next timeout is due to occur, returning
+ # the absolute timestamp for the next timeout, or 0 if there is
+ # no timeout due
+ def next_timeout(self):
+ next = 0
+ for t in self.timers:
+ last = t.get_last_fired()
+ interval = t.get_interval()
+ if interval < 0:
+ continue
+ if next == 0 or (last + interval) < next:
+ next = last + interval
+
+ return next
+
+ # Lookup a virEventLoopPureHandle object based on file descriptor
+ def get_handle_by_fd(self, fd):
+ for h in self.handles:
+ if h.get_fd() == fd:
+ return h
+ return None
+
+ # Lookup a virEventLoopPureHandle object based on its event loop ID
+ def get_handle_by_id(self, handleID):
+ for h in self.handles:
+ if h.get_id() == handleID:
+ return h
+ return None
+
+
+ # This is the heart of the event loop, performing one single
+ # iteration. It asks when the next timeout is due, and then
+ # calcuates the maximum amount of time it is able to sleep
+ # for in poll() pending file handle events.
+ #
+ # It then goes into the poll() sleep.
+ #
+ # When poll() returns, there will zero or more file handle
+ # events which need to be dispatched to registered callbacks
+ # It may also be time to fire some periodic timers.
+ #
+ # Due to the coarse granularity of schedular timeslices, if
+ # we ask for a sleep of 500ms in order to satisfy a timer, we
+ # may return up to 1 schedular timeslice early. So even though
+ # our sleep timeout was reached, the registered timer may not
+ # technically be at its expiry point. This leads to us going
+ # back around the loop with a crazy 5ms sleep. So when checking
+ # if timeouts are due, we allow a margin of 20ms, to avoid
+ # these pointless repeated tiny sleeps.
+ def run_once(self):
+ sleep = -1
+ self.runningPoll = True
+ try:
+ next = self.next_timeout()
+ debug("Next timeout due at %d" % next)
+ if next > 0:
+ now = int(time.time() * 1000)
+ if now >= next:
+ sleep = 0
+ else:
+ sleep = (next - now) / 1000.0
+
+ debug("Poll with a sleep of %d" % sleep)
+ events = self.poll.poll(sleep)
+
+ # Dispatch any file handle events that occurred
+ for (fd, revents) in events:
+ # See if the events was from the self-pipe
+ # telling us to wakup. if so, then discard
+ # the data just continue
+ if fd == self.pipetrick[0]:
+ self.pendingWakeup = False
+ data = os.read(fd, 1)
+ continue
+
+ h = self.get_handle_by_fd(fd)
+ if h:
+ debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
+ h.dispatch(self.events_from_poll(revents))
+
+ now = int(time.time() * 1000)
+ for t in self.timers:
+ interval = t.get_interval()
+ if interval < 0:
+ continue
+
+ want = t.get_last_fired() + interval
+ # Deduct 20ms, since scheduler timeslice
+ # means we could be ever so slightly early
+ if now >= (want-20):
+ debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
+ t.set_last_fired(now)
+ t.dispatch()
+
+ except (os.error, select.error), e:
+ if e.args[0] != errno.EINTR:
+ raise
+ finally:
+ self.runningPoll = False
+
+
+ # Actually the event loop forever
+ def run_loop(self):
+ self.quit = False
+ while not self.quit:
+ self.run_once()
+
+ def interrupt(self):
+ if self.runningPoll and not self.pendingWakeup:
+ self.pendingWakeup = True
+ os.write(self.pipetrick[1], 'c')
+
+
+ # Registers a new file handle 'fd', monitoring for 'events' (libvirt
+ # event constants), firing the callback cb() when an event occurs.
+ # Returns a unique integer identier for this handle, that should be
+ # used to later update/remove it
+ def add_handle(self, fd, events, cb, opaque):
+ handleID = self.nextHandleID + 1
+ self.nextHandleID = self.nextHandleID + 1
+
+ h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
+ self.handles.append(h)
+
+ self.poll.register(fd, self.events_to_poll(events))
+ self.interrupt()
+
+ debug("Add handle %d fd %d events %d" % (handleID, fd, events))
+
+ return handleID
+
+ # Registers a new timer with periodic expiry at 'interval' ms,
+ # firing cb() each time the timer expires. If 'interval' is -1,
+ # then the timer is registered, but not enabled
+ # Returns a unique integer identier for this handle, that should be
+ # used to later update/remove it
+ def add_timer(self, interval, cb, opaque):
+ timerID = self.nextTimerID + 1
+ self.nextTimerID = self.nextTimerID + 1
+
+ h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
+ self.timers.append(h)
+ self.interrupt()
+
+ debug("Add timer %d interval %d" % (timerID, interval))
+
+ return timerID
+
+ # Change the set of events to be monitored on the file handle
+ def update_handle(self, handleID, events):
+ h = self.get_handle_by_id(handleID)
+ if h:
+ h.set_events(events)
+ self.poll.unregister(h.get_fd())
+ self.poll.register(h.get_fd(), self.events_to_poll(events))
+ self.interrupt()
+
+ debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
+
+ # Change the periodic frequency of the timer
+ def update_timer(self, timerID, interval):
+ for h in self.timers:
+ if h.get_id() == timerID:
+ h.set_interval(interval)
+ self.interrupt()
+
+ debug("Update timer %d interval %d" % (timerID, interval))
+ break
+
+ # Stop monitoring for events on the file handle
+ def remove_handle(self, handleID):
+ handles = []
+ for h in self.handles:
+ if h.get_id() == handleID:
+ self.poll.unregister(h.get_fd())
+ debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
+ else:
+ handles.append(h)
+ self.handles = handles
+ self.interrupt()
+
+ # Stop firing the periodic timer
+ def remove_timer(self, timerID):
+ timers = []
+ for h in self.timers:
+ if h.get_id() != timerID:
+ timers.append(h)
+ debug("Remove timer %d" % timerID)
+ self.timers = timers
+ self.interrupt()
+
+ # Convert from libvirt event constants, to poll() events constants
+ def events_to_poll(self, events):
+ ret = 0
+ if events & libvirt.VIR_EVENT_HANDLE_READABLE:
+ ret |= select.POLLIN
+ if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
+ ret |= select.POLLOUT
+ if events & libvirt.VIR_EVENT_HANDLE_ERROR:
+ ret |= select.POLLERR
+ if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
+ ret |= select.POLLHUP
+ return ret
+
+ # Convert from poll() event constants, to libvirt events constants
+ def events_from_poll(self, events):
+ ret = 0
+ if events & select.POLLIN:
+ ret |= libvirt.VIR_EVENT_HANDLE_READABLE
+ if events & select.POLLOUT:
+ ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE
+ if events & select.POLLNVAL:
+ ret |= libvirt.VIR_EVENT_HANDLE_ERROR
+ if events & select.POLLERR:
+ ret |= libvirt.VIR_EVENT_HANDLE_ERROR
+ if events & select.POLLHUP:
+ ret |= libvirt.VIR_EVENT_HANDLE_HANGUP
+ return ret
+
+
+###########################################################################
+# Now glue an instance of the general event loop into libvirt's event loop
+###########################################################################
+
+# This single global instance of the event loop wil be used for
+# monitoring libvirt events
+eventLoop = virEventLoopPure()
+
+# This keeps track of what thread is running the event loop,
+# (if it is run in a background thread)
+eventLoopThread = None
+
+
+# These next set of 6 methods are the glue between the official
+# libvirt events API, and our particular impl of the event loop
+#
+# There is no reason why the 'virEventLoopPure' has to be used.
+# An application could easily may these 6 glue methods hook into
+# another event loop such as GLib's, or something like the python
+# Twisted event framework.
+
+def virEventAddHandleImpl(fd, events, cb, opaque):
+ global eventLoop
+ return eventLoop.add_handle(fd, events, cb, opaque)
+
+def virEventUpdateHandleImpl(handleID, events):
+ global eventLoop
+ return eventLoop.update_handle(handleID, events)
+
+def virEventRemoveHandleImpl(handleID):
+ global eventLoop
+ return eventLoop.remove_handle(handleID)
+
+def virEventAddTimerImpl(interval, cb, opaque):
+ global eventLoop
+ return eventLoop.add_timer(interval, cb, opaque)
+
+def virEventUpdateTimerImpl(timerID, interval):
+ global eventLoop
+ return eventLoop.update_timer(timerID, interval)
+
+def virEventRemoveTimerImpl(timerID):
+ global eventLoop
+ return eventLoop.remove_timer(timerID)
+
+# This tells libvirt what event loop implementation it
+# should use
+def virEventLoopPureRegister():
+ libvirt.virEventRegisterImpl(virEventAddHandleImpl,
+ virEventUpdateHandleImpl,
+ virEventRemoveHandleImpl,
+ virEventAddTimerImpl,
+ virEventUpdateTimerImpl,
+ virEventRemoveTimerImpl)
+
+# Directly run the event loop in the current thread
+def virEventLoopPureRun():
+ global eventLoop
+ eventLoop.run_loop()
+
+def virEventLoopNativeRun():
+ while True:
+ libvirt.virEventRunDefaultImpl()
+
+# Spawn a background thread to run the event loop
+def virEventLoopPureStart():
+ global eventLoopThread
+ virEventLoopPureRegister()
+ eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
+ eventLoopThread.setDaemon(True)
+ eventLoopThread.start()
+
+def virEventLoopNativeStart():
+ global eventLoopThread
+ libvirt.virEventRegisterDefaultImpl()
+ eventLoopThread = threading.Thread(target=virEventLoopNativeRun, name="libvirtEventLoop")
+ eventLoopThread.setDaemon(True)
+ eventLoopThread.start()
+
+
+##########################################################################
+# Everything that now follows is a simple demo of domain lifecycle events
+##########################################################################
+def eventToString(event):
+ eventStrings = ( "Defined",
+ "Undefined",
+ "Started",
+ "Suspended",
+ "Resumed",
+ "Stopped",
+ "Shutdown",
+ "PMSuspended",
+ "Crashed" )
+ return eventStrings[event]
+
+def detailToString(event, detail):
+ eventStrings = (
+ ( "Added", "Updated" ),
+ ( "Removed", ),
+ ( "Booted", "Migrated", "Restored", "Snapshot", "Wakeup" ),
+ ( "Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error" ),
+ ( "Unpaused", "Migrated", "Snapshot" ),
+ ( "Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot"),
+ ( "Finished", ),
+ ( "Memory", "Disk" ),
+ ( "Panicked", )
+ )
+ return eventStrings[event][detail]
+
+def myDomainEventCallback1 (conn, dom, event, detail, opaque):
+ print "myDomainEventCallback1 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
+ eventToString(event),
+ detailToString(event, detail))
+
+def myDomainEventCallback2 (conn, dom, event, detail, opaque):
+ print "myDomainEventCallback2 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
+ eventToString(event),
+ detailToString(event, detail))
+
+def myDomainEventRebootCallback(conn, dom, opaque):
+ print "myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID())
+
+def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
+ print "myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset)
+
+def myDomainEventWatchdogCallback(conn, dom, action, opaque):
+ print "myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action)
+
+def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
+ print "myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action)
+
+def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
+ print "myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme)
+
+def myDomainEventDiskChangeCallback(conn, dom, oldSrcPath, newSrcPath, devAlias, reason, opaque):
+ print "myDomainEventDiskChangeCallback: Domain %s(%s) disk change oldSrcPath: %s newSrcPath: %s devAlias: %s reason: %s" % (
+ dom.name(), dom.ID(), oldSrcPath, newSrcPath, devAlias, reason)
+def myDomainEventTrayChangeCallback(conn, dom, devAlias, reason, opaque):
+ print "myDomainEventTrayChangeCallback: Domain %s(%s) tray change devAlias: %s reason: %s" % (
+ dom.name(), dom.ID(), devAlias, reason)
+def myDomainEventPMWakeupCallback(conn, dom, reason, opaque):
+ print "myDomainEventPMWakeupCallback: Domain %s(%s) system pmwakeup" % (
+ dom.name(), dom.ID())
+def myDomainEventPMSuspendCallback(conn, dom, reason, opaque):
+ print "myDomainEventPMSuspendCallback: Domain %s(%s) system pmsuspend" % (
+ dom.name(), dom.ID())
+def myDomainEventBalloonChangeCallback(conn, dom, actual, opaque):
+ print "myDomainEventBalloonChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), actual)
+def myDomainEventPMSuspendDiskCallback(conn, dom, reason, opaque):
+ print "myDomainEventPMSuspendDiskCallback: Domain %s(%s) system pmsuspend_disk" % (
+ dom.name(), dom.ID())
+def myDomainEventDeviceRemovedCallback(conn, dom, dev, opaque):
+ print "myDomainEventDeviceRemovedCallback: Domain %s(%s) device removed: %s" % (
+ dom.name(), dom.ID(), dev)
+
+run = True
+
+def myConnectionCloseCallback(conn, reason, opaque):
+ reasonStrings = (
+ "Error", "End-of-file", "Keepalive", "Client",
+ )
+ print "myConnectionCloseCallback: %s: %s" % (conn.getURI(), reasonStrings[reason])
+ run = False
+
+def usage(out=sys.stderr):
+ print >>out, "usage: "+os.path.basename(sys.argv[0])+" [-hdl] [uri]"
+ print >>out, " uri will default to qemu:///system"
+ print >>out, " --help, -h Print this help message"
+ print >>out, " --debug, -d Print debug output"
+ print >>out, " --loop, -l Toggle event-loop-implementation"
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "hdl", ["help", "debug", "loop"])
+ except getopt.GetoptError, err:
+ # print help information and exit:
+ print str(err) # will print something like "option -a not recognized"
+ usage()
+ sys.exit(2)
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ usage(sys.stdout)
+ sys.exit()
+ if o in ("-d", "--debug"):
+ global do_debug
+ do_debug = True
+ if o in ("-l", "--loop"):
+ global use_pure_python_event_loop
+ use_pure_python_event_loop ^= True
+
+ if len(args) >= 1:
+ uri = args[0]
+ else:
+ uri = "qemu:///system"
+
+ print "Using uri:" + uri
+
+ # Run a background thread with the event loop
+ if use_pure_python_event_loop:
+ virEventLoopPureStart()
+ else:
+ virEventLoopNativeStart()
+
+ vc = libvirt.openReadOnly(uri)
+
+ # Close connection on exit (to test cleanup paths)
+ old_exitfunc = getattr(sys, 'exitfunc', None)
+ def exit():
+ print "Closing " + str(vc)
+ vc.close()
+ if (old_exitfunc): old_exitfunc()
+ sys.exitfunc = exit
+
+ vc.registerCloseCallback(myConnectionCloseCallback, None)
+
+ #Add 2 callbacks to prove this works with more than just one
+ vc.domainEventRegister(myDomainEventCallback1,None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DISK_CHANGE, myDomainEventDiskChangeCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_TRAY_CHANGE, myDomainEventTrayChangeCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMWAKEUP, myDomainEventPMWakeupCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND, myDomainEventPMSuspendCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE, myDomainEventBalloonChangeCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK, myDomainEventPMSuspendDiskCallback, None)
+ vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED, myDomainEventDeviceRemovedCallback, None)
+
+ vc.setKeepAlive(5, 3)
+
+ # The rest of your app would go here normally, but for sake
+ # of demo we'll just go to sleep. The other option is to
+ # run the event loop in your main thread if your app is
+ # totally event based.
+ while run:
+ time.sleep(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/topology.py b/examples/topology.py
new file mode 100755
index 0000000..62effe3
--- /dev/null
+++ b/examples/topology.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Parse topology information from the capabilities XML and use
+# them to calculate host topology
+#
+# Authors:
+# Amador Pahim <apahim@redhat.com>
+# Peter Krempa <pkrempa@redhat.com>
+
+import libvirt
+import sys
+from xml.dom import minidom
+
+try:
+ conn = libvirt.openReadOnly(None)
+except libvirt.libvirtError:
+ print 'Failed to connect to the hypervisor'
+ sys.exit(1)
+
+try:
+ capsXML = conn.getCapabilities()
+except libvirt.libvirtError:
+ print 'Failed to request capabilities'
+ sys.exit(1)
+
+caps = minidom.parseString(capsXML)
+host = caps.getElementsByTagName('host')[0]
+cells = host.getElementsByTagName('cells')[0]
+total_cpus = cells.getElementsByTagName('cpu').length
+
+socketIds = []
+siblingsIds = []
+
+socketIds = [ proc.getAttribute('socket_id')
+ for proc in cells.getElementsByTagName('cpu')
+ if proc.getAttribute('socket_id') not in socketIds ]
+
+siblingsIds = [ proc.getAttribute('siblings')
+ for proc in cells.getElementsByTagName('cpu')
+ if proc.getAttribute('siblings') not in siblingsIds ]
+
+print "Host topology"
+print "NUMA nodes:", cells.getAttribute('num')
+print " Sockets:", len(set(socketIds))
+print " Cores:", len(set(siblingsIds))
+print " Threads:", total_cpus