diff options
| author | Vishvananda Ishaya <vishvananda@gmail.com> | 2011-04-05 12:55:57 -0700 |
|---|---|---|
| committer | Vishvananda Ishaya <vishvananda@gmail.com> | 2011-04-05 12:55:57 -0700 |
| commit | b66535602eae6b2f91cc5573798cd837e63f8ecc (patch) | |
| tree | ecec28580523826f8315c226af32e4880478e7cf /plugins | |
| parent | e0ba72946011b67a218e3c619b3105529bb43e53 (diff) | |
| parent | 94ccd2f4a1c42a8574fe65972650428130ae850d (diff) | |
| download | nova-b66535602eae6b2f91cc5573798cd837e63f8ecc.tar.gz nova-b66535602eae6b2f91cc5573798cd837e63f8ecc.tar.xz nova-b66535602eae6b2f91cc5573798cd837e63f8ecc.zip | |
merged trunk
Diffstat (limited to 'plugins')
4 files changed, 557 insertions, 142 deletions
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index d60816ce7..48122e6d6 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -30,13 +30,14 @@ import simplejson as json def main(dom_id, command, only_this_vif=None): - xsls = execute("/usr/bin/xenstore-ls /local/domain/%s/vm-data/networking" \ - % dom_id, True) + xsls = execute('/usr/bin/xenstore-ls', + '/local/domain/%s/vm-data/networking' % dom_id, True) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: - xsr = "/usr/bin/xenstore-read /local/domain/%s/vm-data/networking/%s" - xsread = execute(xsr % (dom_id, mac), True) + xsread = execute('/usr/bin/enstore-read', + '/local/domain/%s/vm-data/networking/%s' % + (dom_id, mac), True) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": @@ -51,9 +52,10 @@ def main(dom_id, command, only_this_vif=None): apply_iptables_rules(command, params) -def execute(command, return_stdout=False): +def execute(*command, return_stdout=False): devnull = open(os.devnull, 'w') - proc = subprocess.Popen(command, shell=True, close_fds=True, + command = map(str, command) + proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() if return_stdout: @@ -67,45 +69,68 @@ def execute(command, return_stdout=False): def apply_iptables_rules(command, params): - iptables = lambda rule: execute("/sbin/iptables %s" % rule) + iptables = lambda *rule: execute('/sbin/iptables', *rule) - iptables("-D FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-D', 'FORWARD', '-m', 'physdev', + '--physdev-in', params['VIF'], + '-s', params['IP'], + '-j', 'ACCEPT') if command == 'online': - iptables("-A FORWARD -m physdev --physdev-in %(VIF)s -s %(IP)s \ - -j ACCEPT" % params) + iptables('-A', 'FORWARD', '-m', 'physdev', + '--physdev-in', params['VIF'], + '-s', params['IP'], + '-j', 'ACCEPT') def apply_arptables_rules(command, params): - arptables = lambda rule: execute("/sbin/arptables %s" % rule) - - arptables("-D FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-D FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables = lambda *rule: execute('/sbin/arptables', *rule) + + arptables('-D', 'FORWARD', '--opcode', 'Request', + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], + '-j', 'ACCEPT') + arptables('-D', 'FORWARD', '--opcode', 'Reply', + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], + '-j', 'ACCEPT') if command == 'online': - arptables("-A FORWARD --opcode Request --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) - arptables("-A FORWARD --opcode Reply --in-interface %(VIF)s \ - --source-ip %(IP)s --source-mac %(MAC)s -j ACCEPT" % params) + arptables('-A', 'FORWARD', '--opcode', 'Request', + '--in-interface', params['VIF'], + '--source-mac', params['MAC'], + '-j', 'ACCEPT') + arptables('-A', 'FORWARD', '--opcode', 'Reply', + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], + '-j', 'ACCEPT') def apply_ebtables_rules(command, params): - ebtables = lambda rule: execute("/sbin/ebtables %s" % rule) - - ebtables("-D FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s -j ACCEPT" % - params) - ebtables("-D FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s -j ACCEPT" % - params) + ebtables = lambda *rule: execute("/sbin/ebtables", *rule) + + ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-D', 'FORWARD', '-p', '0800', '-o', + params['VIF'], '--ip-dst', params['IP'], + '-j', 'ACCEPT') if command == 'online': - ebtables("-A FORWARD -p 0806 -o %(VIF)s --arp-ip-dst %(IP)s \ - -j ACCEPT" % params) - ebtables("-A FORWARD -p 0800 -o %(VIF)s --ip-dst %(IP)s \ - -j ACCEPT" % params) - - ebtables("-D FORWARD -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-A', 'FORWARD', '-p', '0806', + '-o', params['VIF'], + '--arp-ip-dst', params['IP'], + '-j', 'ACCEPT') + ebtables('-A', 'FORWARD', '-p', '0800', + '-o', params['VIF'], + '--ip-dst', params['IP'], + '-j', 'ACCEPT') + + ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], + '-i', params['VIF'], '-j', 'DROP') if command == 'online': - ebtables("-I FORWARD 1 -s ! %(MAC)s -i %(VIF)s -j DROP" % params) + ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], + '-i', params['VIF'], '-j', 'DROP') if __name__ == "__main__": diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 94eaabe73..5496a6bd5 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -22,6 +22,8 @@ # XenAPI plugin for reading/writing information to xenstore # +import base64 +import commands try: import json except ImportError: @@ -66,7 +68,7 @@ def key_init(self, arg_dict): try: resp = _wait_for_agent(self, request_id, arg_dict) except TimeoutError, e: - raise PluginError("%s" % e) + raise PluginError(e) return resp @@ -87,7 +89,7 @@ def password(self, arg_dict): try: resp = _wait_for_agent(self, request_id, arg_dict) except TimeoutError, e: - raise PluginError("%s" % e) + raise PluginError(e) return resp @@ -102,6 +104,75 @@ def resetnetwork(self, arg_dict): xenstore.write_record(self, arg_dict) +@jsonify +def inject_file(self, arg_dict): + """Expects a file path and the contents of the file to be written. Both + should be base64-encoded in order to eliminate errors as they are passed + through the stack. Writes that information to xenstore for the agent, + which will decode the file and intended path, and create it on the + instance. The original agent munged both of these into a single entry; + the new agent keeps them separate. We will need to test for the new agent, + and write the xenstore records to match the agent version. We will also + need to test to determine if the file injection method on the agent has + been disabled, and raise a NotImplemented error if that is the case. + """ + b64_path = arg_dict["b64_path"] + b64_file = arg_dict["b64_file"] + request_id = arg_dict["id"] + if self._agent_has_method("file_inject"): + # New version of the agent. Agent should receive a 'value' + # key whose value is a dictionary containing 'b64_path' and + # 'b64_file'. See old version below. + arg_dict["value"] = json.dumps({"name": "file_inject", + "value": {"b64_path": b64_path, "b64_file": b64_file}}) + elif self._agent_has_method("injectfile"): + # Old agent requires file path and file contents to be + # combined into one base64 value. + raw_path = base64.b64decode(b64_path) + raw_file = base64.b64decode(b64_file) + new_b64 = base64.b64encode("%s,%s") % (raw_path, raw_file) + arg_dict["value"] = json.dumps({"name": "injectfile", + "value": new_b64}) + else: + # Either the methods don't exist in the agent, or they + # have been disabled. + raise NotImplementedError(_("NOT IMPLEMENTED: Agent does not" + " support file injection.")) + arg_dict["path"] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + try: + resp = _wait_for_agent(self, request_id, arg_dict) + except TimeoutError, e: + raise PluginError(e) + return resp + + +def _agent_has_method(self, method): + """Check that the agent has a particular method by checking its + features. Cache the features so we don't have to query the agent + every time we need to check. + """ + try: + self._agent_methods + except AttributeError: + self._agent_methods = [] + if not self._agent_methods: + # Haven't been defined + tmp_id = commands.getoutput("uuidgen") + dct = {} + dct["value"] = json.dumps({"name": "features", "value": ""}) + dct["path"] = "data/host/%s" % tmp_id + xenstore.write_record(self, dct) + try: + resp = _wait_for_agent(self, tmp_id, dct) + except TimeoutError, e: + raise PluginError(e) + response = json.loads(resp) + # The agent returns a comma-separated list of methods. + self._agent_methods = response.split(",") + return method in self._agent_methods + + def _wait_for_agent(self, request_id, arg_dict): """Periodically checks xenstore for a response from the agent. The request is always written to 'data/host/{id}', and @@ -119,9 +190,8 @@ def _wait_for_agent(self, request_id, arg_dict): # First, delete the request record arg_dict["path"] = "data/host/%s" % request_id xenstore.delete_record(self, arg_dict) - raise TimeoutError( - "TIMEOUT: No response from agent within %s seconds." % - AGENT_TIMEOUT) + raise TimeoutError(_("TIMEOUT: No response from agent within" + " %s seconds.") % AGENT_TIMEOUT) ret = xenstore.read_record(self, arg_dict) # Note: the response for None with be a string that includes # double quotes. @@ -136,4 +206,5 @@ if __name__ == "__main__": XenAPIPlugin.dispatch( {"key_init": key_init, "password": password, - "resetnetwork": resetnetwork}) + "resetnetwork": resetnetwork, + "inject_file": inject_file}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 61b947c25..0a45f3873 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -21,17 +21,14 @@ # XenAPI plugin for managing glance images # -import base64 -import errno -import hmac import httplib import os import os.path import pickle -import sha +import shlex +import shutil import subprocess -import time -import urlparse +import tempfile import XenAPIPlugin @@ -41,30 +38,6 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -FILE_SR_PATH = '/var/run/sr-mount' - - -def remove_kernel_ramdisk(session, args): - """Removes kernel and/or ramdisk from dom0's file system""" - kernel_file = exists(args, 'kernel-file') - ramdisk_file = exists(args, 'ramdisk-file') - if kernel_file: - os.remove(kernel_file) - if ramdisk_file: - os.remove(ramdisk_file) - return "ok" - - -def copy_kernel_vdi(session, args): - vdi = exists(args, 'vdi-ref') - size = exists(args, 'image-size') - #Use the uuid as a filename - vdi_uuid = session.xenapi.VDI.get_uuid(vdi) - copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} - filename = with_vdi_in_dom0(session, vdi, False, - lambda dev: - _copy_kernel_vdi('/dev/%s' % dev, copy_args)) - return filename def _copy_kernel_vdi(dest, copy_args): @@ -89,93 +62,321 @@ def _copy_kernel_vdi(dest, copy_args): return filename -def put_vdis(session, args): +def _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port): + """Download the tarball image from Glance and extract it into the staging + area. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + conn.request('GET', '/images/%s' % image_id) + resp = conn.getresponse() + if resp.status == httplib.NOT_FOUND: + raise Exception("Image '%s' not found in Glance" % image_id) + elif resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + + tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() + tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) + + chunk = resp.read(CHUNK_SIZE) + while chunk: + tar_proc.stdin.write(chunk) + chunk = resp.read(CHUNK_SIZE) + + _finish_subprocess(tar_proc, tar_cmd) + conn.close() + + +def _fixup_vhds(sr_path, staging_path, uuid_stack): + """Fixup the downloaded VHDs before we move them into the SR. + + We cannot extract VHDs directly into the SR since they don't yet have + UUIDs, aren't properly associated with each other, and would be subject to + a race-condition of one-file being present and the other not being + downloaded yet. + + To avoid these we problems, we use a staging area to fixup the VHDs before + moving them into the SR. The steps involved are: + + 1. Extracting tarball into staging area + + 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') + + 3. Linking the two VHDs together + + 4. Pseudo-atomically moving the images into the SR. (It's not really + atomic because it takes place as two os.rename operations; however, + the chances of an SR.scan occuring between the two rename() + invocations is so small that we can safely ignore it) + """ + def rename_with_uuid(orig_path): + """Rename VHD using UUID so that it will be recognized by SR on a + subsequent scan. + + Since Python2.4 doesn't have the `uuid` module, we pass a stack of + pre-computed UUIDs from the compute worker. + """ + orig_dirname = os.path.dirname(orig_path) + uuid = uuid_stack.pop() + new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) + os.rename(orig_path, new_path) + return new_path, uuid + + def link_vhds(child_path, parent_path): + """Use vhd-util to associate the snapshot VHD with its base_copy. + + This needs to be done before we move both VHDs into the SR to prevent + the base_copy from being DOA (deleted-on-arrival). + """ + modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = _make_subprocess(modify_cmd, stderr=True) + _finish_subprocess(modify_proc, modify_cmd) + + def move_into_sr(orig_path): + """Move a file into the SR""" + filename = os.path.basename(orig_path) + new_path = os.path.join(sr_path, filename) + os.rename(orig_path, new_path) + return new_path + + def assert_vhd_not_hidden(path): + """ + This is a sanity check on the image; if a snap.vhd isn't + present, then the image.vhd better not be marked 'hidden' or it will + be deleted when moved into the SR. + """ + query_cmd = "vhd-util query -n %(path)s -f" % locals() + query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True) + out, err = _finish_subprocess(query_proc, query_cmd) + + for line in out.splitlines(): + if line.startswith('hidden'): + value = line.split(':')[1].strip() + if value == "1": + raise Exception( + "VHD %(path)s is marked as hidden without child" % + locals()) + + orig_base_copy_path = os.path.join(staging_path, 'image.vhd') + if not os.path.exists(orig_base_copy_path): + raise Exception("Invalid image: image.vhd not present") + + base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) + + vdi_uuid = base_copy_uuid + orig_snap_path = os.path.join(staging_path, 'snap.vhd') + if os.path.exists(orig_snap_path): + snap_path, snap_uuid = rename_with_uuid(orig_snap_path) + vdi_uuid = snap_uuid + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(snap_path, base_copy_path) + move_into_sr(snap_path) + else: + assert_vhd_not_hidden(base_copy_path) + + move_into_sr(base_copy_path) + return vdi_uuid + + +def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): + """Hard-link VHDs into staging area with appropriate filename + ('snap' or 'image.vhd') + """ + for name, uuid in vdi_uuids.items(): + source = os.path.join(sr_path, "%s.vhd" % uuid) + link_name = os.path.join(staging_path, "%s.vhd" % name) + os.link(source, link_name) + + +def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): + """ + Create a tarball of the image and then stream that into Glance + using chunked-transfer-encoded HTTP. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + # NOTE(sirp): httplib under python2.4 won't accept a file-like object + # to request + conn.putrequest('PUT', '/images/%s' % image_id) + + # NOTE(sirp): There is some confusion around OVF. Here's a summary of + # where we currently stand: + # 1. OVF as a container format is misnamed. We really should be using + # OVA since that is the name for the container format; OVF is the + # standard applied to the manifest file contained within. + # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA + # compliant, we'll need to embed a minimal OVF manifest as the first + # file. + headers = { + 'content-type': 'application/octet-stream', + 'transfer-encoding': 'chunked', + 'x-image-meta-is-public': 'True', + 'x-image-meta-status': 'queued', + 'x-image-meta-disk-format': 'vhd', + 'x-image-meta-container-format': 'ovf', + 'x-image-meta-property-os-type': os_type} + + for header, value in headers.iteritems(): + conn.putheader(header, value) + conn.endheaders() + + tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals() + tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True) + + chunk = tar_proc.stdout.read(CHUNK_SIZE) + while chunk: + conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) + chunk = tar_proc.stdout.read(CHUNK_SIZE) + conn.send("0\r\n\r\n") + + _finish_subprocess(tar_proc, tar_cmd) + + resp = conn.getresponse() + if resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % resp.status) + conn.close() + + +def _make_staging_area(sr_path): + """ + The staging area is a place where we can temporarily store and + manipulate VHDs. The use of the staging area is different for upload and + download: + + Download + ======== + + When we download the tarball, the VHDs contained within will have names + like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before + moving them into the SR. However, since 'image.vhd' may be a base_copy, we + need to link it to 'snap.vhd' (using vhd-util modify) before moving both + into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted). + The staging area gives us a place to perform these operations before they + are moved to the SR, scanned, and then registered with XenServer. + + Upload + ====== + + On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' + in the case of the snapshot VHD, and 'image.vhd' in the case of the + base_copy. The staging area provides a directory in which we can create + hard-links to rename the VHDs without affecting what's in the SR. + + + NOTE + ==== + + The staging area is created as a subdirectory within the SR in order to + guarantee that it resides within the same filesystem and therefore permit + hard-linking and cheap file moves. + """ + staging_path = tempfile.mkdtemp(dir=sr_path) + return staging_path + + +def _cleanup_staging_area(staging_path): + """Remove staging area directory + + On upload, the staging area contains hard-links to the VHDs in the SR; + it's safe to remove the staging-area because the SR will keep the link + count > 0 (so the VHDs in the SR will not be deleted). + """ + shutil.rmtree(staging_path) + + +def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False): + """Make a subprocess according to the given command-line string + """ + kwargs = {} + kwargs['stdout'] = stdout and subprocess.PIPE or None + kwargs['stderr'] = stderr and subprocess.PIPE or None + kwargs['stdin'] = stdin and subprocess.PIPE or None + args = shlex.split(cmdline) + proc = subprocess.Popen(args, **kwargs) + return proc + + +def _finish_subprocess(proc, cmdline): + """Ensure that the process returned a zero exit code indicating success + """ + out, err = proc.communicate() + ret = proc.returncode + if ret != 0: + raise Exception("'%(cmdline)s' returned non-zero exit code: " + "retcode=%(ret)i, stderr='%(err)s'" % locals()) + return out, err + + +def download_vhd(session, args): + """Download an image from Glance, unbundle it, and then deposit the VHDs + into the storage repository + """ params = pickle.loads(exists(args, 'params')) - vdi_uuids = params["vdi_uuids"] image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] + uuid_stack = params["uuid_stack"] + sr_path = params["sr_path"] - sr_path = get_sr_path(session) - #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) - tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] - paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids] - tar_cmd.extend(paths) - logging.debug("Bundling image with cmd: %s", tar_cmd) - subprocess.call(tar_cmd) - logging.debug("Writing to test file %s", tmp_file) - put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) - # FIXME(sirp): return anything useful here? - return "" - - -def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): - size = os.path.getsize(tmp_file) - basename = os.path.basename(tmp_file) - - bundle = open(tmp_file, 'r') + staging_path = _make_staging_area(sr_path) try: - headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-size': size, - 'content-length': size, - 'content-type': 'application/octet-stream', - } - conn = httplib.HTTPConnection(glance_host, glance_port) - #NOTE(sirp): httplib under python2.4 won't accept a file-like object - # to request - conn.putrequest('PUT', '/images/%s' % image_id) - - for header, value in headers.iteritems(): - conn.putheader(header, value) - conn.endheaders() - - chunk = bundle.read(CHUNK_SIZE) - while chunk: - conn.send(chunk) - chunk = bundle.read(CHUNK_SIZE) - - res = conn.getresponse() - #FIXME(sirp): should this be 201 Created? - if res.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) + _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port) + vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) + return vdi_uuid finally: - bundle.close() + _cleanup_staging_area(staging_path) + + +def upload_vhd(session, args): + """Bundle the VHDs comprising an image and then stream them into Glance. + """ + params = pickle.loads(exists(args, 'params')) + vdi_uuids = params["vdi_uuids"] + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + sr_path = params["sr_path"] + os_type = params["os_type"] + staging_path = _make_staging_area(sr_path) + try: + _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) + _upload_tarball(staging_path, image_id, glance_host, glance_port, + os_type) + finally: + _cleanup_staging_area(staging_path) -def get_sr_path(session): - sr_ref = find_sr(session) + return "" # Nothing useful to return on an upload - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') - sr_rec = session.xenapi.SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - sr_path = os.path.join(FILE_SR_PATH, sr_uuid) - return sr_path +def copy_kernel_vdi(session, args): + vdi = exists(args, 'vdi-ref') + size = exists(args, 'image-size') + #Use the uuid as a filename + vdi_uuid = session.xenapi.VDI.get_uuid(vdi) + copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} + filename = with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev, copy_args)) + return filename -#TODO(sirp): both objectstore and glance need this, should this be refactored -#into common lib -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None +def remove_kernel_ramdisk(session, args): + """Removes kernel and/or ramdisk from dom0's file system""" + kernel_file = exists(args, 'kernel-file') + ramdisk_file = exists(args, 'ramdisk-file') + if kernel_file: + os.remove(kernel_file) + if ramdisk_file: + os.remove(ramdisk_file) + return "ok" if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, + XenAPIPlugin.dispatch({'upload_vhd': upload_vhd, + 'download_vhd': download_vhd, 'copy_kernel_vdi': copy_kernel_vdi, 'remove_kernel_ramdisk': remove_kernel_ramdisk}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration new file mode 100644 index 000000000..75c653408 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -0,0 +1,118 @@ +#!/usr/bin/env python + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XenAPI Plugin for transfering data between host nodes +""" + +import os +import os.path +import pickle +import shlex +import shutil +import subprocess + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging('migration') + + +def move_vhds_into_sr(session, args): + """Moves the VHDs from their copied location to the SR""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + + old_base_copy_uuid = params['old_base_copy_uuid'] + old_cow_uuid = params['old_cow_uuid'] + + new_base_copy_uuid = params['new_base_copy_uuid'] + new_cow_uuid = params['new_cow_uuid'] + + sr_path = params['sr_path'] + sr_temp_path = "%s/images/" % sr_path + + # Discover the copied VHDs locally, and then set up paths to copy + # them to under the SR + source_image_path = "%s/instance%d" % ('/images/', instance_id) + source_base_copy_path = "%s/%s.vhd" % (source_image_path, + old_base_copy_uuid) + source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid) + + temp_vhd_path = "%s/instance%d/" % (sr_temp_path, instance_id) + new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid) + new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid) + + logging.debug('Creating temporary SR path %s' % temp_vhd_path) + os.makedirs(temp_vhd_path) + + logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path)) + shutil.move(source_base_copy_path, new_base_copy_path) + + logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path)) + shutil.move(source_cow_path, new_cow_path) + + logging.debug('Cleaning up %s' % source_image_path) + os.rmdir(source_image_path) + + # Link the COW to the base copy + logging.debug('Attaching COW to the base copy %s -> %s' % + (new_cow_path, new_base_copy_path)) + subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' % + (new_cow_path, new_base_copy_path))) + logging.debug('Moving VHDs into SR %s' % sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path) + shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path) + + logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path) + os.rmdir(temp_vhd_path) + return "" + + +def transfer_vhd(session, args): + """Rsyncs a VHD to an adjacent host""" + params = pickle.loads(exists(args, 'params')) + instance_id = params['instance_id'] + host = params['host'] + vdi_uuid = params['vdi_uuid'] + sr_path = params['sr_path'] + vhd_path = "%s.vhd" % vdi_uuid + + source_path = "%s/%s" % (sr_path, vhd_path) + dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id) + + logging.debug("Preparing to transmit %s to %s" % (source_path, + dest_path)) + + ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"' + + rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s' + % (ssh_cmd, source_path, dest_path)) + + logging.debug('rsync %s' % (' '.join(rsync_args, ))) + + rsync_proc = subprocess.Popen(rsync_args, stdout=subprocess.PIPE) + logging.debug('Rsync output: \n %s' % rsync_proc.communicate()[0]) + logging.debug('Rsync return: %d' % rsync_proc.returncode) + if rsync_proc.returncode != 0: + raise Exception("Unexpected VHD transfer failure") + return "" + + +if __name__ == '__main__': + XenAPIPlugin.dispatch({'transfer_vhd': transfer_vhd, + 'move_vhds_into_sr': move_vhds_into_sr, }) |
