summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-conductor51
-rw-r--r--doc/source/conf.py4
-rw-r--r--doc/source/man/nova-conductor.rst45
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--nova/compute/manager.py2
-rw-r--r--nova/conductor/__init__.py (renamed from nova/virt/baremetal/__init__.py)17
-rw-r--r--nova/conductor/api.py61
-rw-r--r--nova/conductor/manager.py51
-rw-r--r--nova/conductor/rpcapi.py43
-rw-r--r--nova/db/api.py7
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/scheduler/filters/image_props_filter.py2
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py265
-rw-r--r--nova/tests/baremetal/test_tilera.py84
-rw-r--r--nova/tests/compute/test_compute.py4
-rw-r--r--nova/tests/conductor/__init__.py (renamed from nova/tests/baremetal/__init__.py)0
-rw-r--r--nova/tests/conductor/test_conductor.py133
-rw-r--r--nova/virt/baremetal/dom.py264
-rw-r--r--nova/virt/baremetal/driver.py742
-rw-r--r--nova/virt/baremetal/fake.py157
-rw-r--r--nova/virt/baremetal/nodes.py41
-rw-r--r--nova/virt/baremetal/tilera.py364
-rw-r--r--nova/virt/hyperv/vmutils.py21
-rw-r--r--nova/virt/libvirt/imagecache.py3
24 files changed, 424 insertions, 1946 deletions
diff --git a/bin/nova-conductor b/bin/nova-conductor
new file mode 100755
index 000000000..2dba8ee1b
--- /dev/null
+++ b/bin/nova-conductor
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Conductor."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova import config
+from nova.openstack.common import log as logging
+from nova import service
+from nova import utils
+
+CONF = config.CONF
+CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
+
+if __name__ == '__main__':
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-conductor',
+ topic=CONF.conductor.topic,
+ manager=CONF.conductor.manager)
+ service.serve(server)
+ service.wait()
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b52bcad0d..804080e79 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -154,7 +154,9 @@ man_pages = [
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
- [u'OpenStack'], 1)
+ [u'OpenStack'], 1),
+ ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
+ [u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
diff --git a/doc/source/man/nova-conductor.rst b/doc/source/man/nova-conductor.rst
new file mode 100644
index 000000000..7a32730e1
--- /dev/null
+++ b/doc/source/man/nova-conductor.rst
@@ -0,0 +1,45 @@
+==========
+nova-conductor
+==========
+
+--------------------------------
+Server for the Nova Conductor
+--------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-11-16
+:Copyright: OpenStack LLC
+:Version: 2012.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-conductor [options]
+
+DESCRIPTION
+===========
+
+nova-conductor is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for Nova.
+
+OPTIONS
+=======
+
+ **General options**
+
+FILES
+========
+
+* /etc/nova/nova.conf
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 3970974c0..85603fe59 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -117,7 +117,7 @@ paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 0ad3cfc77..18cc91e12 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -636,7 +636,7 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state = task_states.SCHEDULING
rescheduled = self._reschedule(context, request_spec,
- instance['uuid'], filter_properties,
+ filter_properties, instance['uuid'],
self.scheduler_rpcapi.run_instance, method_args,
task_state)
diff --git a/nova/virt/baremetal/__init__.py b/nova/conductor/__init__.py
index 520feecbd..036860dbf 100644
--- a/nova/virt/baremetal/__init__.py
+++ b/nova/conductor/__init__.py
@@ -1,6 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
+# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -14,5 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-# NOTE(sdague) for more convenient compute_driver names
-from nova.virt.baremetal.driver import BareMetalDriver
+from nova.conductor import api as conductor_api
+import nova.config
+import nova.openstack.common.importutils
+
+
+def API(*args, **kwargs):
+ if nova.config.CONF.conductor.use_local:
+ api = conductor_api.LocalAPI
+ else:
+ api = conductor_api.API
+ return api(*args, **kwargs)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
new file mode 100644
index 000000000..acb412625
--- /dev/null
+++ b/nova/conductor/api.py
@@ -0,0 +1,61 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles all requests to the conductor service"""
+
+from nova.conductor import manager
+from nova.conductor import rpcapi
+from nova import config
+from nova.openstack.common import cfg
+
+conductor_opts = [
+ cfg.BoolOpt('use_local',
+ default=False,
+ help='Perform nova-conductor operations locally'),
+ cfg.StrOpt('topic',
+ default='conductor',
+ help='the topic conductor nodes listen on'),
+ cfg.StrOpt('manager',
+ default='nova.conductor.manager.ConductorManager',
+ help='full class name for the Manager for conductor'),
+]
+conductor_group = cfg.OptGroup(name='conductor',
+ title='Conductor Options')
+CONF = config.CONF
+CONF.register_group(conductor_group)
+CONF.register_opts(conductor_opts, conductor_group)
+
+
+class LocalAPI(object):
+ """A local version of the conductor API that does database updates
+ locally instead of via RPC"""
+
+ def __init__(self):
+ self._manager = manager.ConductorManager()
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database"""
+ return self._manager.instance_update(context, instance_uuid, updates)
+
+
+class API(object):
+ """Conductor API that does updates via RPC to the ConductorManager"""
+
+ def __init__(self):
+ self.conductor_rpcapi = rpcapi.ConductorAPI()
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database"""
+ return self.conductor_rpcapi.instance_update(context, instance_uuid,
+ updates)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
new file mode 100644
index 000000000..3ffe82645
--- /dev/null
+++ b/nova/conductor/manager.py
@@ -0,0 +1,51 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles database requests from other nova services"""
+
+from nova import manager
+from nova import notifications
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
+ 'power_state', 'access_ip_v4', 'access_ip_v6',
+ 'launched_at', 'terminated_at', 'host',
+ 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
+ 'instance_type_id',
+ ]
+
+
+class ConductorManager(manager.SchedulerDependentManager):
+ """Mission: TBD"""
+
+ RPC_API_VERSION = '1.0'
+
+ def __init__(self, *args, **kwargs):
+ super(ConductorManager, self).__init__(service_name='conductor',
+ *args, **kwargs)
+
+ def instance_update(self, context, instance_uuid, updates):
+ for key in updates:
+ if key not in allowed_updates:
+ LOG.error(_("Instance update attempted for "
+ "'%(key)s' on %(instance_uuid)s") % locals())
+ raise KeyError("unexpected update keyword '%s'" % key)
+ old_ref, instance_ref = self.db.instance_update_and_get_original(
+ context, instance_uuid, updates)
+ notifications.send_update(context, old_ref, instance_ref)
+ return jsonutils.to_primitive(instance_ref)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
new file mode 100644
index 000000000..7a6508f12
--- /dev/null
+++ b/nova/conductor/rpcapi.py
@@ -0,0 +1,43 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Client side of the conductor RPC API"""
+
+from nova import config
+import nova.openstack.common.rpc.proxy
+
+CONF = config.CONF
+
+
+class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
+ """Client side of the conductor RPC API
+
+ API version history:
+
+ 1.0 - Initial version.
+ """
+
+ BASE_RPC_API_VERSION = '1.0'
+
+ def __init__(self):
+ super(ConductorAPI, self).__init__(
+ topic=CONF.conductor.topic,
+ default_version=self.BASE_RPC_API_VERSION)
+
+ def instance_update(self, context, instance_uuid, updates):
+ return self.call(context,
+ self.make_msg('instance_update',
+ instance_uuid=instance_uuid,
+ updates=updates),
+ topic=self.topic)
diff --git a/nova/db/api.py b/nova/db/api.py
index 26463cd01..5b401392a 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -937,13 +937,6 @@ def quota_class_update(context, class_name, resource, limit):
###################
-def quota_usage_create(context, project_id, resource, in_use, reserved,
- until_refresh):
- """Create a quota usage for the given project and resource."""
- return IMPL.quota_usage_create(context, project_id, resource,
- in_use, reserved, until_refresh)
-
-
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 9ab47a042..34fbec6d3 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -2493,13 +2493,6 @@ def quota_usage_get_all_by_project(context, project_id):
@require_admin_context
-def quota_usage_create(context, project_id, resource, in_use, reserved,
- until_refresh):
- return _quota_usage_create(context, project_id, resource, in_use,
- reserved, until_refresh)
-
-
-@require_admin_context
def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
diff --git a/nova/scheduler/filters/image_props_filter.py b/nova/scheduler/filters/image_props_filter.py
index f9ef693b3..a545ce9ff 100644
--- a/nova/scheduler/filters/image_props_filter.py
+++ b/nova/scheduler/filters/image_props_filter.py
@@ -26,7 +26,7 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
- any architecture, hpervisor type, or virtual machine mode properties
+ any architecture, hypervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
deleted file mode 100644
index 8d6a3c261..000000000
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 University of Southern California
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import __builtin__
-
-import functools
-import mox
-import StringIO
-
-from nova.compute import power_state
-from nova import config
-from nova import exception
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests import fake_utils
-
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import driver
-
-# Same fake_domains is used by different classes,
-# but different fake_file is used by different classes for unit test.
-fake_domains = [{'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}]
-
-
-class DomainReadWriteTestCase(test.TestCase):
-
- def setUp(self):
- super(DomainReadWriteTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
-
- def test_read_domain_with_empty_list(self):
- """Read a file that contains no domains"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- fake_file = StringIO.StringIO('[]')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, [])
-
- def test_read_domain(self):
- """Read a file that contains at least one domain"""
- fake_file = StringIO.StringIO('''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]''')
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, fake_domains)
-
- def test_read_no_file(self):
- """Try to read when the file does not exist
-
- This should through and IO exception"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file',
- 'r').AndRaise(IOError(2, 'No such file or directory',
- '/tftpboot/test_fake_dom_file'))
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NotFound, dom.read_domains,
- '/tftpboot/test_fake_dom_file')
-
- def assertJSONEquals(self, x, y):
- """Check if two json strings represent the equivalent Python object"""
- self.assertEquals(jsonutils.loads(x), jsonutils.loads(y))
- return jsonutils.loads(x) == jsonutils.loads(y)
-
- def test_write_domain(self):
- """Write the domain to file"""
- self.mox.StubOutWithMock(__builtin__, 'open')
- mock_file = self.mox.CreateMock(file)
- expected_json = '''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]'''
- open('/tftpboot/test_fake_dom_file', 'w').AndReturn(mock_file)
-
- # Check if the argument to file.write() represents the same
- # Python object as expected_json
- # We can't do an exact string comparison
- # because of ordering and whitespace
- mock_file.write(mox.Func(functools.partial(self.assertJSONEquals,
- expected_json)))
- mock_file.close()
-
- self.mox.ReplayAll()
-
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
-
-class BareMetalDomTestCase(test.TestCase):
-
- def setUp(self):
- super(BareMetalDomTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- # Stub out utils.execute
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def tearDown(self):
- super(BareMetalDomTestCase, self).tearDown()
-
- # Reset the singleton state
- dom.BareMetalDom._instance = None
- dom.BareMetalDom._is_init = False
-
- def test_read_domain_only_once(self):
- """Confirm that the domain is read from a file only once,
- even if the object is instantiated multiple times"""
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Instantiate multiple instances
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
-
- def test_init_no_domains(self):
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expectd values
- self.assertEqual(bmdom.fake_dom_nums, 0)
-
- def test_init_remove_non_running_domain(self):
- """Check to see that all entries in the domain list are removed
- except for the one that is in the running state"""
-
- domains = [dict(node_id=1, name='i-00000001',
- status=power_state.NOSTATE),
- dict(node_id=2, name='i-00000002', status=power_state.RUNNING),
- dict(node_id=3, name='i-00000003', status=power_state.PAUSED),
- dict(node_id=5, name='i-00000004', status=power_state.SHUTDOWN),
- dict(node_id=7, name='i-00000005', status=power_state.CRASHED),
- dict(node_id=8, name='i-00000006', status=power_state.SUSPENDED),
- dict(node_id=9, name='i-00000007', status=power_state.NOSTATE)]
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn(domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- self.assertEqual(bmdom.domains, [{'node_id': 2,
- 'name': 'i-00000002',
- 'status': power_state.RUNNING}])
- self.assertEqual(bmdom.fake_dom_nums, 1)
-
- def test_find_domain(self):
- domain = {'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expected values
- self.assertEquals(bmdom.find_domain('instance-00000001'), domain)
-
-
-class BareMetalTestCase(test.TestCase):
-
- test_ip = '10.11.12.13'
- test_instance = {'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'mac_address': '02:12:34:46:56:67',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '123456',
- 'instance_type_id': '5'} # m1.small
-
- def setUp(self):
- super(BareMetalTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def test_get_info(self):
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- conn = driver.BareMetalDriver(None, True)
- # TODO(mikalstill): this is not a very good fake instance
- info = conn.get_info({'name': 'instance-00000001'})
-
- # Expected values
- self.assertEquals(info['mem'], 16777216)
- self.assertEquals(info['state'], 1)
- self.assertEquals(info['num_cpu'], 1)
- self.assertEquals(info['cpu_time'], 100)
- self.assertEquals(info['max_mem'], 16777216)
diff --git a/nova/tests/baremetal/test_tilera.py b/nova/tests/baremetal/test_tilera.py
deleted file mode 100644
index b86e6c9c1..000000000
--- a/nova/tests/baremetal/test_tilera.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import __builtin__
-import StringIO
-
-from nova import test
-from nova.virt.baremetal import tilera
-
-
-class TileraBareMetalNodesTestCase(test.TestCase):
-
- def setUp(self):
- super(TileraBareMetalNodesTestCase, self).setUp()
- self.board_info = "\n".join([
-'# board_id ip_address mac_address 00:1A:CA:00:57:90 '
-'00:1A:CA:00:58:98 00:1A:CA:00:58:50',
-'6 10.0.2.7 00:1A:CA:00:58:5C 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'7 10.0.2.8 00:1A:CA:00:58:A4 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'8 10.0.2.9 00:1A:CA:00:58:1A 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'9 10.0.2.10 00:1A:CA:00:58:38 10 16385 1000 0 0 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}'])
-
- def tearDown(self):
- super(TileraBareMetalNodesTestCase, self).tearDown()
-
- # Reset the singleton state
- tilera.BareMetalNodes._instance = None
- tilera.BareMetalNodes._is_init = False
-
- def test_singleton(self):
- """Confirm that the object acts like a singleton.
-
- In this case, we check that it only loads the config file once,
- even though it has been instantiated multiple times"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
-
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
-
- def test_get_hw_info(self):
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
- nodes = tilera.BareMetalNodes()
- self.assertEqual(nodes.get_hw_info('vcpus'), 10)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 5598da0db..662a90be4 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -5862,8 +5862,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.compute._deallocate_network(self.context,
self.instance)
- self.compute._reschedule(self.context, None, instance_uuid,
- {}, self.compute.scheduler_rpcapi.run_instance,
+ self.compute._reschedule(self.context, None, {}, instance_uuid,
+ self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING).AndReturn(True)
self.compute._log_original_error(exc_info, instance_uuid)
diff --git a/nova/tests/baremetal/__init__.py b/nova/tests/conductor/__init__.py
index e69de29bb..e69de29bb 100644
--- a/nova/tests/baremetal/__init__.py
+++ b/nova/tests/conductor/__init__.py
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
new file mode 100644
index 000000000..fbf7d0325
--- /dev/null
+++ b/nova/tests/conductor/test_conductor.py
@@ -0,0 +1,133 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the conductor service"""
+
+from nova.compute import instance_types
+from nova.compute import vm_states
+from nova import conductor
+from nova.conductor import api as conductor_api
+from nova.conductor import manager as conductor_manager
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import db
+from nova import notifications
+from nova import test
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+
+class BaseTestCase(test.TestCase):
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ type_id = instance_types.get_instance_type_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = 'x86_64'
+ inst['os_type'] = 'Linux'
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+
+class ConductorTestCase(BaseTestCase):
+ """Conductor Manager Tests"""
+ def setUp(self):
+ super(ConductorTestCase, self).setUp()
+ self.conductor = conductor_manager.ConductorManager()
+ self.db = None
+
+ def _do_update(self, instance_uuid, **updates):
+ return self.conductor.instance_update(self.context, instance_uuid,
+ updates)
+
+ def test_instance_update(self):
+ instance = self._create_fake_instance()
+ new_inst = self._do_update(instance['uuid'],
+ vm_state=vm_states.STOPPED)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+ self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+
+ def test_instance_update_invalid_key(self):
+ # NOTE(danms): the real DB API call ignores invalid keys
+ if self.db == None:
+ self.assertRaises(KeyError,
+ self._do_update, 'any-uuid', foobar=1)
+
+
+class ConductorRPCAPITestCase(ConductorTestCase):
+ """Conductor RPC API Tests"""
+ def setUp(self):
+ super(ConductorRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_rpcapi.ConductorAPI()
+
+
+class ConductorLocalAPITestCase(ConductorTestCase):
+ """Conductor LocalAPI Tests"""
+ def setUp(self):
+ super(ConductorLocalAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalAPI()
+ self.db = db
+
+ def _do_update(self, instance_uuid, **updates):
+ # NOTE(danms): the public API takes actual keyword arguments,
+ # so override the base class here to make the call correctly
+ return self.conductor.instance_update(self.context, instance_uuid,
+ **updates)
+
+
+class ConductorAPITestCase(ConductorLocalAPITestCase):
+ """Conductor API Tests"""
+ def setUp(self):
+ super(ConductorAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.API()
+ self.db = None
+
+
+class ConductorImportTest(test.TestCase):
+ def test_import_conductor_local(self):
+ self.flags(use_local=True, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.LocalAPI))
+
+ def test_import_conductor_rpc(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.API))
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
deleted file mode 100644
index 48a1eaeeb..000000000
--- a/nova/virt/baremetal/dom.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import power_state
-from nova import config
-from nova import exception
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova.virt.baremetal import nodes
-
-LOG = logging.getLogger(__name__)
-
-
-def read_domains(fname):
- try:
- f = open(fname, 'r')
- json = f.read()
- f.close()
- domains = jsonutils.loads(json)
- return domains
- except IOError:
- raise exception.NotFound()
-
-
-def write_domains(fname, domains):
- json = jsonutils.dumps(domains)
- f = open(fname, 'w')
- f.write(json)
- f.close()
-
-
-class BareMetalDom(object):
- """
- BareMetalDom class handles fake domain for bare metal back ends.
-
- This implements the singleton pattern.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalDom singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalDom, cls).__new__(cls)
- return cls._instance
-
- def __init__(self,
- fake_dom_file="/tftpboot/test_fake_dom_file"):
- """
- Only call __init__ the first time object is instantiated.
-
- Sets and Opens domain file: /tftpboot/test_fake_dom_file. Even though
- nova-compute service is rebooted, this file should retain the
- existing domains.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.fake_dom_file = fake_dom_file
- self.domains = []
- self.fake_dom_nums = 0
- self.baremetal_nodes = nodes.get_baremetal_nodes()
-
- self._read_domain_from_file()
-
- def _read_domain_from_file(self):
- """
- Reads the domains from a file.
- """
- try:
- self.domains = read_domains(self.fake_dom_file)
- except IOError:
- dom = []
- LOG.debug(_("No domains exist."))
- return
- msg = _("============= initial domains =========== : %s")
- LOG.debug(msg % (self.domains))
- for dom in self.domains[:]:
- if dom['status'] == power_state.BUILDING:
- LOG.debug(_("Building domain: to be removed"))
- self.destroy_domain(dom['name'])
- continue
- elif dom['status'] != power_state.RUNNING:
- LOG.debug(_("Not running domain: remove"))
- self.domains.remove(dom)
- continue
- res = self.baremetal_nodes.set_status(dom['node_id'],
- dom['status'])
- if res > 0:
- self.fake_dom_nums = self.fake_dom_nums + 1
- else:
- LOG.debug(_("domain running on an unknown node: discarded"))
- self.domains.remove(dom)
- continue
-
- LOG.debug(self.domains)
- self.store_domain()
-
- def reboot_domain(self, name):
- """
- Finds domain and deactivates (power down) bare-metal node.
-
- Activates the node again. In case of fail,
- destroys the domain from domains list.
- """
- fd = self.find_domain(name)
- if fd == []:
- msg = _("No such domain (%s)")
- raise exception.NotFound(msg % name)
- node_ip = self.baremetal_nodes.get_ip_by_id(fd['node_id'])
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
- except Exception:
- msg = _("Failed power down Bare-metal node %s")
- raise exception.NotFound(msg % fd['node_id'])
- self.change_domain_state(name, power_state.BUILDING)
- try:
- state = self.baremetal_nodes.activate_node(fd['node_id'],
- node_ip, name, fd['mac_address'], fd['ip_address'])
- self.change_domain_state(name, state)
- return state
- except Exception:
- LOG.debug(_("deactivate -> activate fails"))
- self.destroy_domain(name)
- raise
-
- def destroy_domain(self, name):
- """
- Removes domain from domains list and deactivates node.
- """
- fd = self.find_domain(name)
- if fd == []:
- LOG.debug(_("destroy_domain: no such domain"))
- msg = _("No such domain %s")
- raise exception.NotFound(msg % name)
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
-
- self.domains.remove(fd)
- msg = _("Domains: %s")
- LOG.debug(msg % (self.domains))
- self.store_domain()
- msg = _("After storing domains: %s")
- LOG.debug(msg % (self.domains))
- except Exception:
- LOG.debug(_("deactivation/removing domain failed"))
- raise
-
- def create_domain(self, xml_dict, bpath):
- """
- Adds a domain to domains list and activates an idle bare-metal node.
- """
- LOG.debug(_("===== Domain is being created ====="))
- fd = self.find_domain(xml_dict['name'])
- if fd != []:
- msg = _("Same domain name already exists")
- raise exception.NotFound(msg)
- LOG.debug(_("create_domain: before get_idle_node"))
-
- node_id = self.baremetal_nodes.get_idle_node()
- node_ip = self.baremetal_nodes.get_ip_by_id(node_id)
-
- new_dom = {'node_id': node_id,
- 'name': xml_dict['name'],
- 'memory_kb': xml_dict['memory_kb'],
- 'vcpus': xml_dict['vcpus'],
- 'mac_address': xml_dict['mac_address'],
- 'user_data': xml_dict['user_data'],
- 'ip_address': xml_dict['ip_address'],
- 'image_id': xml_dict['image_id'],
- 'kernel_id': xml_dict['kernel_id'],
- 'ramdisk_id': xml_dict['ramdisk_id'],
- 'status': power_state.BUILDING}
- self.domains.append(new_dom)
- msg = _("Created new domain: %s")
- LOG.debug(msg % (new_dom))
- self.change_domain_state(new_dom['name'], power_state.BUILDING)
-
- self.baremetal_nodes.set_image(bpath, node_id)
-
- state = power_state.NOSTATE
- try:
- state = self.baremetal_nodes.activate_node(node_id,
- node_ip, new_dom['name'], new_dom['mac_address'],
- new_dom['ip_address'], new_dom['user_data'])
- self.change_domain_state(new_dom['name'], state)
- except Exception:
- self.domains.remove(new_dom)
- self.baremetal_nodes.free_node(node_id)
- LOG.debug(_("Failed to boot Bare-metal node %s"), node_id)
- return state
-
- def change_domain_state(self, name, state):
- """
- Changes domain state by the given state and updates domain file.
- """
- l = self.find_domain(name)
- if l == []:
- msg = _("No such domain exists")
- raise exception.NotFound(msg)
- i = self.domains.index(l)
- self.domains[i]['status'] = state
- LOG.debug(_("change_domain_state: to new state %s"), str(state))
- self.store_domain()
-
- def store_domain(self):
- """
- Stores fake domains to the file.
- """
- msg = _("Stored fake domains to the file: %s")
- LOG.debug(msg % (self.domains))
- write_domains(self.fake_dom_file, self.domains)
-
- def find_domain(self, name):
- """
- Finds domain by the given name and returns the domain.
- """
- for item in self.domains:
- if item['name'] == name:
- return item
- LOG.debug(_("domain does not exist"))
- return []
-
- def list_domains(self):
- """
- Returns the instance name from domains list.
- """
- if self.domains == []:
- return []
- return [x['name'] for x in self.domains]
-
- def get_domain_info(self, instance_name):
- """
- Finds domain by the given instance_name and returns informaiton.
-
- For example, status, memory_kb, vcpus, etc.
- """
- domain = self.find_domain(instance_name)
- if domain != []:
- return [domain['status'], domain['memory_kb'],
- domain['memory_kb'],
- domain['vcpus'],
- 100]
- else:
- return [power_state.NOSTATE, '', '', '', '']
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
deleted file mode 100644
index 4d9874e7e..000000000
--- a/nova/virt/baremetal/driver.py
+++ /dev/null
@@ -1,742 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-"""
-A connection to a hypervisor through baremetal.
-
-**Related Flags**
-
-:baremetal_type: Baremetal domain type.
-:baremetal_uri: Override for the default baremetal URI (baremetal_type).
-:rescue_image_id: Rescue ami image (default: ami-rescue).
-:rescue_kernel_id: Rescue aki image (default: aki-rescue).
-:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
-:injected_network_template: Template file for injected network
-:allow_project_net_traffic: Whether to allow in project network traffic
-
-"""
-
-import hashlib
-import os
-import shutil
-
-from nova.compute import instance_types
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import config
-from nova import context as nova_context
-from nova import exception
-from nova import notifications
-from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import lockutils
-from nova.openstack.common import log as logging
-from nova import utils
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import nodes
-from nova.virt.disk import api as disk
-from nova.virt import driver
-from nova.virt.libvirt import utils as libvirt_utils
-
-
-Template = None
-
-LOG = logging.getLogger(__name__)
-
-CONF = config.CONF
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_type',
- default='baremetal',
- help='baremetal domain type'),
- ]
-
-CONF.register_opts(baremetal_opts)
-
-
-def _late_load_cheetah():
- global Template
- if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(),
- ['Template'], -1)
- Template = t.Template
-
-
-class BareMetalDriver(driver.ComputeDriver):
-
- def __init__(self, virtapi, read_only):
- _late_load_cheetah()
- # Note that baremetal doesn't have a read-only connection
- # mode, so the read_only parameter is ignored
- super(BareMetalDriver, self).__init__(virtapi)
- self.baremetal_nodes = nodes.get_baremetal_nodes()
- self._wrapped_conn = None
- self._host_state = None
-
- @property
- def HostState(self):
- if not self._host_state:
- self._host_state = HostState(self)
- return self._host_state
-
- def init_host(self, host):
- pass
-
- def _get_connection(self):
- self._wrapped_conn = dom.BareMetalDom()
- return self._wrapped_conn
- _conn = property(_get_connection)
-
- def get_pty_for_instance(self, instance_name):
- raise NotImplementedError()
-
- def list_instances(self):
- return self._conn.list_domains()
-
- def destroy(self, instance, network_info, block_device_info=None,
- cleanup=True):
- while True:
- try:
- self._conn.destroy_domain(instance['name'])
- break
- except Exception as ex:
- LOG.debug(_("Error encountered when destroying instance "
- "'%(name)s': %(ex)s") %
- {"name": instance["name"], "ex": ex},
- instance=instance)
- break
-
- if cleanup:
- self._cleanup(instance)
-
- return True
-
- def _cleanup(self, instance):
- target = os.path.join(CONF.instances_path, instance['name'])
- instance_name = instance['name']
- LOG.info(_('instance %(instance_name)s: deleting instance files'
- ' %(target)s') % locals(), instance=instance)
- if CONF.baremetal_type == 'lxc':
- disk.destroy_container(self.container)
- if os.path.exists(target):
- shutil.rmtree(target)
-
- @exception.wrap_exception
- def attach_volume(self, instance_name, device_path, mountpoint):
- raise exception.Invalid("attach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def detach_volume(self, instance_name, mountpoint):
- raise exception.Invalid("detach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def snapshot(self, instance, image_id):
- raise exception.Invalid("snapshot not supported for baremetal.")
-
- @exception.wrap_exception
- def reboot(self, instance):
- timer = utils.LoopingCall(f=None)
-
- def _wait_for_reboot():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_reboot failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_reboot
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def rescue(self, context, instance, network_info, rescue_password):
- """Loads a VM using rescue images.
-
- A rescue is normally performed when something goes wrong with the
- primary images and data needs to be corrected/recovered. Rescuing
- should not edit or over-ride the original image, only allow for
- data recovery.
-
- """
- self.destroy(instance, False)
-
- rescue_images = {'image_id': CONF.baremetal_rescue_image_id,
- 'kernel_id': CONF.baremetal_rescue_kernel_id,
- 'ramdisk_id': CONF.baremetal_rescue_ramdisk_id}
- self._create_image(instance, '.rescue', rescue_images,
- network_info=network_info)
-
- timer = utils.LoopingCall(f=None)
-
- def _wait_for_rescue():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_rescue failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_rescue
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def unrescue(self, instance, network_info):
- """Reboot the VM which is being rescued back into primary images.
-
- Because reboot destroys and re-creates instances, unresue should
- simply call reboot.
-
- """
- self.reboot(instance)
-
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None):
- LOG.debug(_("<============= spawn of baremetal =============>"))
-
- def basepath(fname='', suffix=''):
- return os.path.join(CONF.instances_path,
- instance['name'],
- fname + suffix)
- bpath = basepath(suffix='')
- timer = utils.LoopingCall(f=None)
-
- xml_dict = self.to_xml_dict(instance, network_info)
- self._create_image(context, instance, xml_dict,
- network_info=network_info,
- block_device_info=block_device_info)
- LOG.debug(_("instance %s: is building"), instance['name'],
- instance=instance)
- LOG.debug(xml_dict, instance=instance)
-
- def _wait_for_boot():
- try:
- LOG.debug(_("Key is injected but instance is not running yet"),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.BUILDING})
- notifications.send_update(context, old_ref, new_ref)
-
- state = self._conn.create_domain(xml_dict, bpath)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'],
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ACTIVE})
- notifications.send_update(context, old_ref, new_ref)
-
- LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state,
- instance=instance)
- LOG.debug(_("instance %s spawned successfully"),
- instance['name'], instance=instance)
- else:
- LOG.debug(_('instance %s:not booted'), instance['name'],
- instance=instance)
- except Exception:
- LOG.exception(_("Baremetal assignment is overcommitted."),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ERROR,
- 'power_state': power_state.FAILED})
- notifications.send_update(context, old_ref, new_ref)
-
- timer.stop()
- timer.f = _wait_for_boot
-
- return timer.start(interval=0.5).wait()
-
- def get_console_output(self, instance):
- console_log = os.path.join(CONF.instances_path, instance['name'],
- 'console.log')
-
- libvirt_utils.chown(console_log, os.getuid())
-
- fd = self._conn.find_domain(instance['name'])
-
- self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
-
- fpath = console_log
-
- return libvirt_utils.load_file(fpath)
-
- @exception.wrap_exception
- def get_ajax_console(self, instance):
- raise NotImplementedError()
-
- @exception.wrap_exception
- def get_vnc_console(self, instance):
- raise NotImplementedError()
-
- @staticmethod
- def _cache_image(fetch_func, target, fname, cow=False, *args, **kwargs):
- """Wrapper for a method that creates an image that caches the image.
-
- This wrapper will save the image into a common store and create a
- copy for use by the hypervisor.
-
- The underlying method should specify a kwarg of target representing
- where the image will be saved.
-
- fname is used as the filename of the base image. The filename needs
- to be unique to a given image.
-
- If cow is True, it will make a CoW image instead of a copy.
- """
- if not os.path.exists(target):
- base_dir = os.path.join(CONF.instances_path, '_base')
- if not os.path.exists(base_dir):
- fileutils.ensure_tree(base_dir)
- base = os.path.join(base_dir, fname)
-
- @lockutils.synchronized(fname, 'nova-')
- def call_if_not_exists(base, fetch_func, *args, **kwargs):
- if not os.path.exists(base):
- fetch_func(target=base, *args, **kwargs)
-
- call_if_not_exists(base, fetch_func, *args, **kwargs)
-
- if cow:
- libvirt_utils.create_cow_image(base, target)
- else:
- libvirt_utils.copy_image(base, target)
-
- def _create_image(self, context, inst, xml, suffix='',
- disk_images=None, network_info=None,
- block_device_info=None):
- if not suffix:
- suffix = ''
-
- # syntactic nicety
- def basepath(fname='', suffix=suffix):
- return os.path.join(CONF.instances_path,
- inst['name'],
- fname + suffix)
-
- # ensure directories exist and are writable
- fileutils.ensure_tree(basepath(suffix=''))
- utils.execute('chmod', '0777', basepath(suffix=''))
-
- LOG.info(_('instance %s: Creating image'), inst['name'],
- instance=inst)
-
- if CONF.baremetal_type == 'lxc':
- container_dir = '%s/rootfs' % basepath(suffix='')
- fileutils.ensure_tree(container_dir)
-
- # NOTE(vish): No need add the suffix to console.log
- libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
-
- if not disk_images:
- disk_images = {'image_id': inst['image_ref'],
- 'kernel_id': inst['kernel_id'],
- 'ramdisk_id': inst['ramdisk_id']}
-
- if disk_images['kernel_id']:
- fname = disk_images['kernel_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('kernel'),
- fname=fname,
- cow=False,
- image_id=disk_images['kernel_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
- if disk_images['ramdisk_id']:
- fname = disk_images['ramdisk_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('ramdisk'),
- fname=fname,
- cow=False,
- image_id=disk_images['ramdisk_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
- size = inst['root_gb'] * 1024 * 1024 * 1024
-
- inst_type_id = inst['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
- if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
- size = None
- root_fname += "_sm"
- else:
- root_fname += "_%d" % inst['root_gb']
-
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('root'),
- fname=root_fname,
- cow=False, # CONF.use_cow_images,
- image_id=disk_images['image_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- # For now, we assume that if we're not using a kernel, we're using a
- # partitioned disk image where the target partition is the first
- # partition
- target_partition = None
- if not inst['kernel_id']:
- target_partition = "1"
-
- if CONF.baremetal_type == 'lxc':
- target_partition = None
-
- if inst['key_data']:
- key = str(inst['key_data'])
- else:
- key = None
- net = None
-
- nets = []
- ifc_template = open(CONF.injected_network_template).read()
- ifc_num = -1
- have_injected_networks = False
- admin_context = nova_context.get_admin_context()
- for (network_ref, mapping) in network_info:
- ifc_num += 1
-
- if not network_ref['injected']:
- continue
-
- have_injected_networks = True
- address = mapping['ips'][0]['ip']
- netmask = mapping['ips'][0]['netmask']
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- net_info = {'name': 'eth%d' % ifc_num,
- 'address': address,
- 'netmask': netmask,
- 'gateway': mapping['gateway'],
- 'broadcast': mapping['broadcast'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6}
- nets.append(net_info)
-
- if have_injected_networks:
- net = str(Template(ifc_template,
- searchList=[{'interfaces': nets,
- 'use_ipv6': CONF.use_ipv6}]))
-
- metadata = inst.get('metadata')
- if any((key, net, metadata)):
- inst_name = inst['name']
-
- injection_path = basepath('root')
- img_id = inst['image_ref']
-
- for injection in ('metadata', 'key', 'net'):
- if locals()[injection]:
- LOG.info(_('instance %(inst_name)s: injecting '
- '%(injection)s into image %(img_id)s'),
- locals(), instance=inst)
- try:
- disk.inject_data(injection_path, key, net, metadata,
- partition=target_partition,
- use_cow=False) # CONF.use_cow_images
-
- except Exception as e:
- # This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
- ' data into image %(img_id)s (%(e)s)') % locals(),
- instance=inst)
-
- def _prepare_xml_info(self, instance, network_info, rescue,
- block_device_info=None):
- # block_device_mapping = driver.block_device_info_get_mapping(
- # block_device_info)
- _map = 0
- for (_, mapping) in network_info:
- _map += 1
-
- nics = []
- # FIXME(vish): stick this in db
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
-
- driver_type = 'raw'
-
- xml_info = {'type': CONF.baremetal_type,
- 'name': instance['name'],
- 'basepath': os.path.join(CONF.instances_path,
- instance['name']),
- 'memory_kb': inst_type['memory_mb'] * 1024,
- 'vcpus': inst_type['vcpus'],
- 'rescue': rescue,
- 'driver_type': driver_type,
- 'nics': nics,
- 'ip_address': mapping['ips'][0]['ip'],
- 'mac_address': mapping['mac'],
- 'user_data': instance['user_data'],
- 'image_id': instance['image_ref'],
- 'kernel_id': instance['kernel_id'],
- 'ramdisk_id': instance['ramdisk_id']}
-
- if not rescue:
- if instance['kernel_id']:
- xml_info['kernel'] = xml_info['basepath'] + "/kernel"
-
- if instance['ramdisk_id']:
- xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
-
- xml_info['disk'] = xml_info['basepath'] + "/disk"
- return xml_info
-
- def to_xml_dict(self, instance, rescue=False, network_info=None):
- LOG.debug(_('instance %s: starting toXML method'), instance['name'],
- instance=instance)
- xml_info = self._prepare_xml_info(instance, rescue, network_info)
- LOG.debug(_('instance %s: finished toXML method'), instance['name'],
- instance=instance)
- return xml_info
-
- def get_info(self, instance):
- """Retrieve information from baremetal for a specific instance name.
-
- If a baremetal error is encountered during lookup, we might raise a
- NotFound exception or Error exception depending on how severe the
- baremetal error is.
-
- """
- _domain_info = self._conn.get_domain_info(instance['name'])
- state, max_mem, mem, num_cpu, cpu_time = _domain_info
- return {'state': state,
- 'max_mem': max_mem,
- 'mem': mem,
- 'num_cpu': num_cpu,
- 'cpu_time': cpu_time}
-
- def _create_new_domain(self, persistent=True, launch_flags=0):
- raise NotImplementedError()
-
- def get_diagnostics(self, instance_name):
- # diagnostics are not supported for baremetal
- raise NotImplementedError()
-
- def get_disks(self, instance_name):
- raise NotImplementedError()
-
- def get_interfaces(self, instance_name):
- raise NotImplementedError()
-
- def get_vcpu_total(self):
- """Get vcpu number of physical computer.
-
- :returns: the number of cpu core.
-
- """
-
- # On certain platforms, this will raise a NotImplementedError.
- try:
- return self.baremetal_nodes.get_hw_info('vcpus')
- except NotImplementedError:
- LOG.warn(_("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "
- "This error can be safely ignored for now."))
- return False
-
- def get_memory_mb_total(self):
- """Get the total memory size(MB) of physical computer.
-
- :returns: the total amount of memory(MB).
-
- """
- return self.baremetal_nodes.get_hw_info('memory_mb')
-
- def get_local_gb_total(self):
- """Get the total hdd size(GB) of physical computer.
-
- :returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
- return self.baremetal_nodes.get_hw_info('local_gb')
-
- def get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
-
- :returns: The total number of vcpu that currently used.
-
- """
- return len(self._conn.list_domains())
-
- def get_memory_mb_used(self):
- """Get the free memory size(MB) of physical computer.
-
- :returns: the total usage of memory(MB).
-
- """
- return self.baremetal_nodes.get_hw_info('memory_mb_used')
-
- def get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
-
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
- return self.baremetal_nodes.get_hw_info('local_gb_used')
-
- def get_hypervisor_type(self):
- """Get hypervisor type.
-
- :returns: hypervisor type (ex. qemu)
-
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_type')
-
- def get_hypervisor_version(self):
- """Get hypervisor version.
-
- :returns: hypervisor version (ex. 12003)
-
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_version')
-
- def get_cpu_info(self):
- """Get cpuinfo information.
-
- Obtains cpu feature from virConnect.getCapabilities,
- and returns as a json string.
-
- :return: see above description
-
- """
- return self.baremetal_nodes.get_hw_info('cpu_info')
-
- def block_stats(self, instance_name, disk):
- raise NotImplementedError()
-
- def interface_stats(self, instance_name, interface):
- raise NotImplementedError()
-
- def get_console_pool_info(self, console_type):
- #TODO(mdragon): console proxy should be implemented for baremetal,
- # in case someone wants to use it.
- # For now return fake data.
- return {'address': '127.0.0.1',
- 'username': 'fakeuser',
- 'password': 'fakepassword'}
-
- def refresh_security_group_rules(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
-
- def refresh_security_group_members(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
-
- def refresh_instance_security_rules(self, instance):
- # Bare metal doesn't currently support security groups
- pass
-
- def get_available_resource(self, nodename):
- """Updates compute manager resource info on ComputeNode table.
-
- This method is called when nova-coompute launches, and
- whenever admin executes "nova-manage service update_resource".
- """
-
- # Updating host information
- dic = {'vcpus': self.get_vcpu_total(),
- 'memory_mb': self.get_memory_mb_total(),
- 'local_gb': self.get_local_gb_total(),
- 'vcpus_used': self.get_vcpu_used(),
- 'memory_mb_used': self.get_memory_mb_used(),
- 'local_gb_used': self.get_local_gb_used(),
- 'hypervisor_type': self.get_hypervisor_type(),
- 'hypervisor_version': self.get_hypervisor_version(),
- 'hypervisor_hostname': CONF.host,
- 'cpu_info': self.get_cpu_info(),
- 'cpu_arch': CONF.cpu_arch}
-
- LOG.info(_('#### RLK: cpu_arch = %s ') % CONF.cpu_arch)
- return dic
-
- def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
- raise NotImplementedError()
-
- def live_migration(self, ctxt, instance_ref, dest,
- post_method, recover_method):
- raise NotImplementedError()
-
- def unfilter_instance(self, instance_ref):
- """See comments of same method in firewall_driver."""
- pass
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first."""
- LOG.debug(_("Updating!"))
- return self.HostState.get_host_stats(refresh=refresh)
-
-
-class HostState(object):
- """Manages information about the XenServer host this compute
- node is running on.
- """
-
- def __init__(self, connection):
- super(HostState, self).__init__()
- self.connection = connection
- self._stats = {}
- self.update_status()
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first.
- """
- if refresh:
- self.update_status()
- return self._stats
-
- def update_status(self):
- """
- We can get host status information.
- """
- LOG.debug(_("Updating host stats"))
- data = {}
- data["vcpus"] = self.connection.get_vcpu_total()
- data["vcpus_used"] = self.connection.get_vcpu_used()
- data["cpu_info"] = self.connection.get_cpu_info()
- data["cpu_arch"] = CONF.cpu_arch
- data["disk_total"] = self.connection.get_local_gb_total()
- data["disk_used"] = self.connection.get_local_gb_used()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
- data["host_memory_total"] = self.connection.get_memory_mb_total()
- data["host_memory_free"] = (data["host_memory_total"] -
- self.connection.get_memory_mb_used())
- data["hypervisor_type"] = self.connection.get_hypervisor_type()
- data["hypervisor_version"] = self.connection.get_hypervisor_version()
- data["hypervisor_hostname"] = CONF.host
- self._stats = data
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
deleted file mode 100644
index 635089366..000000000
--- a/nova/virt/baremetal/fake.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
-
-
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
-
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
-
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- return "fake"
-
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
-
- Returns 1 if the node is in the nodes list.
- """
- return True
-
- def get_status(self):
- """
- Gets status of the given node.
- """
- pass
-
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- return False
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- return "127.0.0.1"
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- return False
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
- pass
-
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
- """
- pass
-
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
-
- User can access the bare-metal node using ssh.
- """
- pass
-
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
- """
- pass
-
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- pass
-
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
-
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- pass
-
- def sleep_mgr(self, time):
- """
- Sleeps until the node is activated.
- """
- pass
-
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- pass
-
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address):
- """
- Activates the given node using ID, IP, and MAC address.
- """
- pass
-
- def get_console_output(self, console_log):
- """
- Gets console output of the given node.
- """
- pass
-
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
-
- Noting to do for tilera nodes: actual image is used.
- """
- pass
-
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
-
- This should be done after ssh key is injected.
- """
- pass
diff --git a/nova/virt/baremetal/nodes.py b/nova/virt/baremetal/nodes.py
deleted file mode 100644
index aa817fdf9..000000000
--- a/nova/virt/baremetal/nodes.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from nova import config
-from nova import exception
-from nova.openstack.common import cfg
-from nova.virt.baremetal import fake
-from nova.virt.baremetal import tilera
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_driver',
- default='tilera',
- help='Bare-metal driver runs on')
- ]
-
-CONF = config.CONF
-CONF.register_opts(baremetal_opts)
-
-
-def get_baremetal_nodes():
- d = CONF.baremetal_driver
- if d == 'tilera':
- return tilera.get_baremetal_nodes()
- elif d == 'fake':
- return fake.get_baremetal_nodes()
- else:
- raise exception.NovaException(_("Unknown baremetal driver %(d)s"))
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
deleted file mode 100644
index e79bc6fb2..000000000
--- a/nova/virt/baremetal/tilera.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tilera back-end for bare-metal compute node provisioning
-
-The details of this implementation are specific to ISI's testbed. This code
-is provided here as an example of how to implement a backend.
-"""
-
-import base64
-import subprocess
-import time
-
-from nova.compute import power_state
-from nova import config
-from nova import exception
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova import utils
-
-CONF = config.CONF
-
-tilera_opts = [
- cfg.StrOpt('tile_monitor',
- default='/usr/local/TileraMDE/bin/tile-monitor',
- help='Tilera command line program for Bare-metal driver')
- ]
-
-CONF.register_opts(tilera_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
-
-
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
-
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalNodes singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalNodes, cls).__new__(cls)
- return cls._instance
-
- def __init__(self, file_name="/tftpboot/tilera_boards"):
- """
- Only call __init__ the first time object is instantiated.
-
- From the bare-metal node list file: /tftpboot/tilera_boards,
- reads each item of each node such as node ID, IP address,
- MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
- and appends each node information into nodes list.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.nodes = []
- self.BOARD_ID = 0
- self.IP_ADDR = 1
- self.MAC_ADDR = 2
- self.VCPUS = 3
- self.MEMORY_MB = 4
- self.LOCAL_GB = 5
- self.MEMORY_MB_USED = 6
- self.LOCAL_GB_USED = 7
- self.HYPERVISOR_TYPE = 8
- self.HYPERVISOR_VER = 9
- self.CPU_INFO = 10
-
- fp = open(file_name, "r")
- for item in fp:
- l = item.split()
- if l[0] == '#':
- continue
- l_d = {'node_id': int(l[self.BOARD_ID]),
- 'ip_addr': l[self.IP_ADDR],
- 'mac_addr': l[self.MAC_ADDR],
- 'status': power_state.NOSTATE,
- 'vcpus': int(l[self.VCPUS]),
- 'memory_mb': int(l[self.MEMORY_MB]),
- 'local_gb': int(l[self.LOCAL_GB]),
- 'memory_mb_used': int(l[self.MEMORY_MB_USED]),
- 'local_gb_used': int(l[self.LOCAL_GB_USED]),
- 'hypervisor_type': l[self.HYPERVISOR_TYPE],
- 'hypervisor_version': int(l[self.HYPERVISOR_VER]),
- 'cpu_info': l[self.CPU_INFO]}
- self.nodes.append(l_d)
- fp.close()
-
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- for node in self.nodes:
- if node['node_id'] == 9:
- if field == 'vcpus':
- return node['vcpus']
- elif field == 'memory_mb':
- return node['memory_mb']
- elif field == 'local_gb':
- return node['local_gb']
- elif field == 'memory_mb_used':
- return node['memory_mb_used']
- elif field == 'local_gb_used':
- return node['local_gb_used']
- elif field == 'hypervisor_type':
- return node['hypervisor_type']
- elif field == 'hypervisor_version':
- return node['hypervisor_version']
- elif field == 'cpu_info':
- return node['cpu_info']
-
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
-
- Returns 1 if the node is in the nodes list.
- """
- for node in self.nodes:
- if node['node_id'] == node_id:
- node['status'] = status
- return True
- return False
-
- def get_status(self):
- """
- Gets status of the given node.
- """
- pass
-
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- for item in self.nodes:
- if item['status'] == 0:
- item['status'] = 1 # make status RUNNING
- return item['node_id']
- raise exception.NotFound("No free nodes available")
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- for item in self.nodes:
- if item['node_id'] == id:
- return item['ip_addr']
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- LOG.debug(_("free_node...."))
- for item in self.nodes:
- if item['node_id'] == str(node_id):
- item['status'] = 0 # make status IDLE
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
- if node_id < 5:
- pdu_num = 1
- pdu_outlet_num = node_id + 5
- else:
- pdu_num = 2
- pdu_outlet_num = node_id
- path1 = "10.0.100." + str(pdu_num)
- utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
- str(mode), '>>', 'pdu_output')
-
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
-
- /tftpboot/fs_x directory is a NFS of node#x
- and /tftpboot/root_x file is an file system image of node#x.
- """
- node_ip = self.get_ip_by_id(node_id)
- LOG.debug(_("deactivate_node is called for "
- "node_id = %(id)s node_ip = %(ip)s"),
- {'id': str(node_id), 'ip': node_ip})
- for item in self.nodes:
- if item['node_id'] == node_id:
- LOG.debug(_("status of node is set to 0"))
- item['status'] = 0
- self.power_mgr(node_id, 2)
- self.sleep_mgr(5)
- path = "/tftpboot/fs_" + str(node_id)
- pathx = "/tftpboot/root_" + str(node_id)
- utils.execute('sudo', '/usr/sbin/rpc.mountd')
- try:
- utils.execute('sudo', 'umount', '-f', pathx)
- utils.execute('sudo', 'rm', '-f', pathx)
- except Exception:
- LOG.debug(_("rootfs is already removed"))
-
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
-
- User can access the bare-metal node using ssh.
- """
- cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "ifconfig xgbe0 hw ether " + mac_address +
- " - --wait --run - ifconfig xgbe0 " + ip_address +
- " - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- #utils.execute(cmd, shell=True)
- self.sleep_mgr(5)
-
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
-
- iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
- /tftpboot/iptables_rule script sets iptables rule on the given node.
- """
- if user_data != '':
- open_ip = base64.b64decode(user_data)
- utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
-
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- LOG.debug(_("Before ping to the bare-metal node"))
- tile_output = "/tftpboot/tile_output_" + str(node_id)
- grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
- tile_output)
- subprocess.Popen(grep_cmd, shell=True)
- self.sleep_mgr(5)
-
- file = open(tile_output, "r")
- out_msg = file.readline().find("Unreachable")
- utils.execute('sudo', 'rm', tile_output)
- if out_msg == -1:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
- locals())
- return True
- else:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
- " out_msg=%(out_msg)s"), locals())
- self.power_mgr(node_id, 2)
- return False
-
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
-
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
-
- def sleep_mgr(self, time_in_seconds):
- """
- Sleeps until the node is activated.
- """
- time.sleep(time_in_seconds)
-
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "/usr/sbin/sshd - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- self.sleep_mgr(5)
-
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address, user_data):
- """
- Activates the given node using ID, IP, and MAC address.
- """
- LOG.debug(_("activate_node"))
-
- self.power_mgr(node_id, 2)
- self.power_mgr(node_id, 3)
- self.sleep_mgr(100)
-
- try:
- self.check_activated(node_id, node_ip)
- self.network_set(node_ip, mac_address, ip_address)
- self.ssh_set(node_ip)
- self.iptables_set(node_ip, user_data)
- return power_state.RUNNING
- except Exception as ex:
- self.deactivate_node(node_id)
- raise exception.NovaException(_("Node is unknown error state."))
-
- def get_console_output(self, console_log, node_id):
- """
- Gets console output of the given node.
- """
- node_ip = self.get_ip_by_id(node_id)
- log_path = "/tftpboot/log_" + str(node_id)
- kmsg_cmd = (CONF.tile_monitor +
- " --resume --net " + node_ip +
- " -- dmesg > " + log_path)
- subprocess.Popen(kmsg_cmd, shell=True)
- self.sleep_mgr(5)
- utils.execute('cp', log_path, console_log)
-
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
-
- Noting to do for tilera nodes: actual image is used.
- """
- path_fs = "/tftpboot/tilera_fs"
- path_root = bp + "/root"
- utils.execute('cp', path_fs, path_root)
-
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
-
- This should be done after ssh key is injected.
- /tftpboot/fs_x directory is a NFS of node#x.
- /tftpboot/root_x file is an file system image of node#x.
- """
- path1 = bpath + "/root"
- pathx = "/tftpboot/root_" + str(node_id)
- path2 = "/tftpboot/fs_" + str(node_id)
- utils.execute('sudo', 'mv', path1, pathx)
- utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 15c03e613..8e509cd28 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -55,7 +55,6 @@ class VMUtils(object):
else:
return vms[0].ElementName
- #TODO(alexpilotti): use the reactor to poll instead of sleep
def check_job_status(self, jobpath):
"""Poll WMI job state for completion"""
job_wmi_path = jobpath.replace('\\', '/')
@@ -65,12 +64,26 @@ class VMUtils(object):
time.sleep(0.1)
job = wmi.WMI(moniker=job_wmi_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
- LOG.debug(_("WMI job failed: %(ErrorSummaryDescription)s - "
- "%(ErrorDescription)s - %(ErrorCode)s") % job)
+ job_state = job.JobState
+ if job.path().Class == "Msvm_ConcreteJob":
+ err_sum_desc = job.ErrorSummaryDescription
+ err_desc = job.ErrorDescription
+ err_code = job.ErrorCode
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "Error details: %(err_sum_desc)s - %(err_desc)s - "
+ "Error code: %(err_code)d") % locals())
+ else:
+ (error, ret_val) = job.GetError()
+ if not ret_val and error:
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "Error details: %(error)s") % locals())
+ else:
+ LOG.debug(_("WMI job failed with status %(job_state)d. "
+ "No error description available") % locals())
return False
desc = job.Description
elap = job.ElapsedTime
- LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s")
% locals())
return True
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 793044477..7256dcdb0 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -311,6 +311,9 @@ class ImageCacheManager(object):
if not checksum_result is None:
image_bad = not checksum_result
+ # Give other threads a chance to run
+ time.sleep(0)
+
instances = []
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]